diff --git a/.github/actions/build-jtreg/action.yml b/.github/actions/build-jtreg/action.yml index a9c046e9dd9..334812e8341 100644 --- a/.github/actions/build-jtreg/action.yml +++ b/.github/actions/build-jtreg/action.yml @@ -37,13 +37,13 @@ runs: - name: 'Check cache for already built JTReg' id: get-cached - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: jtreg/installed key: jtreg-${{ steps.version.outputs.value }} - name: 'Checkout the JTReg source' - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: repository: openjdk/jtreg ref: jtreg-${{ steps.version.outputs.value }} @@ -61,7 +61,7 @@ runs: if: (steps.get-cached.outputs.cache-hit != 'true') - name: 'Upload JTReg artifact' - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v6 with: name: bundles-jtreg-${{ steps.version.outputs.value }} path: jtreg/installed diff --git a/.github/actions/do-build/action.yml b/.github/actions/do-build/action.yml index 6f2c2ce0218..6f6bbdabb68 100644 --- a/.github/actions/do-build/action.yml +++ b/.github/actions/do-build/action.yml @@ -66,7 +66,7 @@ runs: shell: bash - name: 'Upload build logs' - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v6 with: name: failure-logs-${{ inputs.platform }}${{ inputs.debug-suffix }} path: failure-logs @@ -74,7 +74,7 @@ runs: # This is the best way I found to abort the job with an error message - name: 'Notify about build failures' - uses: actions/github-script@v7 + uses: actions/github-script@v8 with: script: core.setFailed('Build failed. See summary for details.') if: steps.check.outputs.failure == 'true' diff --git a/.github/actions/get-bootjdk/action.yml b/.github/actions/get-bootjdk/action.yml index 312fb642c82..d531358b7dd 100644 --- a/.github/actions/get-bootjdk/action.yml +++ b/.github/actions/get-bootjdk/action.yml @@ -65,7 +65,7 @@ runs: - name: 'Check cache for BootJDK' id: get-cached-bootjdk - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: bootjdk/jdk key: boot-jdk-${{ inputs.platform }}-${{ steps.sha256.outputs.value }} diff --git a/.github/actions/get-bundles/action.yml b/.github/actions/get-bundles/action.yml index a356aa9fd8d..55fa0e842d2 100644 --- a/.github/actions/get-bundles/action.yml +++ b/.github/actions/get-bundles/action.yml @@ -54,14 +54,14 @@ runs: steps: - name: 'Download bundles artifact' id: download-bundles - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v8 with: name: bundles-${{ inputs.platform }}${{ inputs.debug-suffix }} path: bundles continue-on-error: true - name: 'Download bundles artifact (retry)' - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v8 with: name: bundles-${{ inputs.platform }}${{ inputs.debug-suffix }} path: bundles @@ -69,7 +69,7 @@ runs: - name: 'Download static bundles artifact' id: download-static-bundles - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v8 with: name: bundles-${{ inputs.platform }}${{ inputs.debug-suffix }}${{ inputs.static-suffix }} path: bundles diff --git a/.github/actions/get-gtest/action.yml b/.github/actions/get-gtest/action.yml index 7a329460a6e..bc53fa2a3b1 100644 --- a/.github/actions/get-gtest/action.yml +++ b/.github/actions/get-gtest/action.yml @@ -40,7 +40,7 @@ runs: var: GTEST_VERSION - name: 'Checkout GTest source' - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: repository: google/googletest ref: 'v${{ steps.version.outputs.value }}' diff --git a/.github/actions/get-jtreg/action.yml b/.github/actions/get-jtreg/action.yml index 36c895fc59d..8c75ae10c7f 100644 --- a/.github/actions/get-jtreg/action.yml +++ b/.github/actions/get-jtreg/action.yml @@ -41,7 +41,7 @@ runs: - name: 'Download JTReg artifact' id: download-jtreg - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v8 with: name: bundles-jtreg-${{ steps.version.outputs.value }} path: jtreg/installed diff --git a/.github/actions/get-msys2/action.yml b/.github/actions/get-msys2/action.yml index 308230ebf2e..7351a120ac4 100644 --- a/.github/actions/get-msys2/action.yml +++ b/.github/actions/get-msys2/action.yml @@ -31,7 +31,7 @@ runs: steps: - name: 'Install MSYS2' id: msys2 - uses: msys2/setup-msys2@v2.28.0 + uses: msys2/setup-msys2@v2.31.0 with: install: 'autoconf tar unzip zip make' path-type: minimal diff --git a/.github/actions/upload-bundles/action.yml b/.github/actions/upload-bundles/action.yml index 78fb0a94bfd..94308002ea7 100644 --- a/.github/actions/upload-bundles/action.yml +++ b/.github/actions/upload-bundles/action.yml @@ -87,7 +87,7 @@ runs: shell: bash - name: 'Upload bundles artifact' - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v6 with: name: bundles-${{ inputs.platform }}${{ inputs.debug-suffix }}${{ inputs.static-suffix }}${{ inputs.bundle-suffix }} path: bundles diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 00000000000..d3f63784ece --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,5 @@ + + + +--------- +- [ ] I confirm that I make this contribution in accordance with the [OpenJDK Interim AI Policy](https://openjdk.org/legal/ai). diff --git a/.github/workflows/build-alpine-linux.yml b/.github/workflows/build-alpine-linux.yml index c39962fa07f..6863da9016e 100644 --- a/.github/workflows/build-alpine-linux.yml +++ b/.github/workflows/build-alpine-linux.yml @@ -74,7 +74,7 @@ jobs: steps: - name: 'Checkout the JDK source' - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: 'Install toolchain and dependencies' run: | diff --git a/.github/workflows/build-cross-compile.yml b/.github/workflows/build-cross-compile.yml index a0642d469aa..99b6c40606c 100644 --- a/.github/workflows/build-cross-compile.yml +++ b/.github/workflows/build-cross-compile.yml @@ -94,7 +94,7 @@ jobs: steps: - name: 'Checkout the JDK source' - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: 'Get the BootJDK' id: bootjdk @@ -122,7 +122,7 @@ jobs: - name: 'Check cache for sysroot' id: get-cached-sysroot - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: sysroot key: sysroot-${{ matrix.debian-arch }}-${{ hashFiles('./.github/workflows/build-cross-compile.yml') }} diff --git a/.github/workflows/build-linux.yml b/.github/workflows/build-linux.yml index 791b53a3f04..c501670439e 100644 --- a/.github/workflows/build-linux.yml +++ b/.github/workflows/build-linux.yml @@ -84,7 +84,7 @@ jobs: steps: - name: 'Checkout the JDK source' - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: 'Get the BootJDK' id: bootjdk diff --git a/.github/workflows/build-macos.yml b/.github/workflows/build-macos.yml index 484e616fad7..435576f4afd 100644 --- a/.github/workflows/build-macos.yml +++ b/.github/workflows/build-macos.yml @@ -75,7 +75,7 @@ jobs: steps: - name: 'Checkout the JDK source' - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: 'Get the BootJDK' id: bootjdk diff --git a/.github/workflows/build-windows.yml b/.github/workflows/build-windows.yml index 4dafc016a99..3bb50a137ec 100644 --- a/.github/workflows/build-windows.yml +++ b/.github/workflows/build-windows.yml @@ -83,7 +83,7 @@ jobs: steps: - name: 'Checkout the JDK source' - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: 'Get MSYS2' uses: ./.github/actions/get-msys2 diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 85ec75f343c..20be196b128 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -75,7 +75,7 @@ jobs: steps: - name: 'Checkout the scripts' - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: sparse-checkout: | .github diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 8f33454305e..b240b42fb97 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -128,7 +128,7 @@ jobs: steps: - name: 'Checkout the JDK source' - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: 'Get MSYS2' uses: ./.github/actions/get-msys2 @@ -239,7 +239,7 @@ jobs: if: always() - name: 'Upload test results' - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v6 with: path: results name: ${{ steps.package.outputs.artifact-name }} @@ -247,7 +247,7 @@ jobs: # This is the best way I found to abort the job with an error message - name: 'Notify about test failures' - uses: actions/github-script@v7 + uses: actions/github-script@v8 with: script: core.setFailed('${{ steps.run-tests.outputs.error-message }}') if: steps.run-tests.outputs.failure == 'true' diff --git a/bin/idea.sh b/bin/idea.sh index a184884b61a..d9a18956e3b 100644 --- a/bin/idea.sh +++ b/bin/idea.sh @@ -187,14 +187,18 @@ fi SOURCE_PREFIX="" +# SOURCES is a single string containing embeded newlines. for root in $MODULE_ROOTS; do if [ "x$CYGPATH" != "x" ]; then root=`$CYGPATH -am $root` elif [ "x$WSL_DISTRO_NAME" != "x" ]; then root=`wslpath -am $root` fi - - SOURCES=$SOURCES" $SOURCE_PREFIX""$root""$SOURCE_POSTFIX" + # Add line termination/indentation for everything after the first entry. + if [ "x$SOURCES" != "x" ]; then + SOURCES="${SOURCES}\n " + fi + SOURCES="${SOURCES}${SOURCE_PREFIX}${root}${SOURCE_POSTFIX}" done add_replacement "###SOURCE_ROOTS###" "$SOURCES" diff --git a/doc/testing.html b/doc/testing.html index 195153c8612..c8d0b928bb0 100644 --- a/doc/testing.html +++ b/doc/testing.html @@ -284,9 +284,10 @@ possible, or if you want to use a fully qualified test descriptor, add

Gtest

Note: To be able to run the Gtest suite, you need to configure your build to be able to find a proper version of the gtest -source. For details, see the section "Running Tests" in the build -documentation.

+source. For details, see the section "Running Tests" in the +build documentation (html, markdown).

Since the Hotspot Gtest suite is so quick, the default is to run all tests. This is specified by just gtest, or as a fully qualified test descriptor gtest:all.

diff --git a/doc/testing.md b/doc/testing.md index d0e54aab02b..5f70f2796ad 100644 --- a/doc/testing.md +++ b/doc/testing.md @@ -198,8 +198,8 @@ use a fully qualified test descriptor, add `jtreg:`, e.g. **Note:** To be able to run the Gtest suite, you need to configure your build to be able to find a proper version of the gtest source. For details, see the -section ["Running Tests" in the build -documentation](building.html#running-tests). +section **"Running Tests" in the build +documentation** ([html](building.html#running-tests), [markdown](building.md#running-tests)). Since the Hotspot Gtest suite is so quick, the default is to run all tests. This is specified by just `gtest`, or as a fully qualified test descriptor diff --git a/make/CompileInterimLangtools.gmk b/make/CompileInterimLangtools.gmk index c7d1c3796f6..8254b8fc0a7 100644 --- a/make/CompileInterimLangtools.gmk +++ b/make/CompileInterimLangtools.gmk @@ -1,5 +1,5 @@ # -# Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -68,17 +68,19 @@ java.compiler.interim_EXTRA_FILES := \ TARGETS += $(BUILDTOOLS_OUTPUTDIR)/gensrc/java.compiler.interim/javax/tools/ToolProvider.java ################################################################################ -# Use the up-to-date PreviewFeature.java and NoPreview.java from the current -# sources, instead of the versions from the boot JDK, as javac may be referring -# to constants from the up-to-date versions. +# Create a hybrid PreviewFeature.java that combines constants +# from the current sources, as those can be used in javac APIs, and from the +# bootstrap JDK, as those can be used from bootstrap JDK classfiles. -$(eval $(call SetupCopyFiles, COPY_PREVIEW_FEATURES, \ - FILES := $(TOPDIR)/src/java.base/share/classes/jdk/internal/javac/PreviewFeature.java \ - $(TOPDIR)/src/java.base/share/classes/jdk/internal/javac/NoPreview.java, \ - DEST := $(BUILDTOOLS_OUTPUTDIR)/gensrc/java.base.interim/jdk/internal/javac/, \ -)) +$(BUILDTOOLS_OUTPUTDIR)/gensrc/java.base.interim/jdk/internal/javac/PreviewFeature.java: \ + $(TOPDIR)/src/java.base/share/classes/jdk/internal/javac/PreviewFeature.java + $(call LogInfo, Generating $@) + $(JAVA) $(TOPDIR)/make/langtools/tools/previewfeature/SetupPreviewFeature.java \ + $(TOPDIR)/src/java.base/share/classes/jdk/internal/javac/PreviewFeature.java \ + $@ -TARGETS += $(COPY_PREVIEW_FEATURES) + +TARGETS += $(BUILDTOOLS_OUTPUTDIR)/gensrc/java.base.interim/jdk/internal/javac/PreviewFeature.java ################################################################################ # Setup the rules to build interim langtools, which is compiled by the boot @@ -123,7 +125,8 @@ define SetupInterimModule $1_DEPS_INTERIM := $$(addsuffix .interim, $$(filter \ $$(INTERIM_LANGTOOLS_BASE_MODULES), $$(call FindTransitiveDepsForModule, $1))) - $$(BUILD_$1.interim): $$(foreach d, $$($1_DEPS_INTERIM), $$(BUILD_$$d)) $(COPY_PREVIEW_FEATURES) + $$(BUILD_$1.interim): $$(foreach d, $$($1_DEPS_INTERIM), $$(BUILD_$$d)) \ + $(BUILDTOOLS_OUTPUTDIR)/gensrc/java.base.interim/jdk/internal/javac/PreviewFeature.java TARGETS += $$(BUILD_$1.interim) endef diff --git a/make/Docs.gmk b/make/Docs.gmk index a8d40e078e2..9cee8cd40c1 100644 --- a/make/Docs.gmk +++ b/make/Docs.gmk @@ -1,4 +1,4 @@ -# Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -93,19 +93,16 @@ JAVADOC_DISABLED_DOCLINT_WARNINGS := missing JAVADOC_DISABLED_DOCLINT_PACKAGES := org.w3c.* javax.smartcardio # The initial set of options for javadoc -# -XDaccessInternalAPI is a temporary workaround, see 8373909 JAVADOC_OPTIONS := -use -keywords -notimestamp \ -serialwarn -encoding utf-8 -docencoding utf-8 -breakiterator \ -splitIndex --system none -javafx --expand-requires transitive \ - --override-methods=summary \ - -XDaccessInternalAPI + --override-methods=summary # The reference options must stay stable to allow for comparisons across the # development cycle. REFERENCE_OPTIONS := -XDignore.symbol.file=true -use -keywords -notimestamp \ -serialwarn -encoding utf-8 -breakiterator -splitIndex --system none \ - -html5 -javafx --expand-requires transitive \ - -XDaccessInternalAPI + -html5 -javafx --expand-requires transitive # Should we add DRAFT stamps to the generated javadoc? ifeq ($(VERSION_IS_GA), true) diff --git a/make/GenerateLinkOptData.gmk b/make/GenerateLinkOptData.gmk index 6f6e1f29b4c..d615a34e71a 100644 --- a/make/GenerateLinkOptData.gmk +++ b/make/GenerateLinkOptData.gmk @@ -70,12 +70,15 @@ CLASSLIST_FILE_VM_OPTS = \ # Save the stderr output of the command and print it along with stdout in case # something goes wrong. +# The classlists must be generated with -Xint to avoid non-determinism +# introduced by JIT compiled code $(CLASSLIST_FILE): $(INTERIM_IMAGE_DIR)/bin/java$(EXECUTABLE_SUFFIX) $(CLASSLIST_JAR) $(call MakeDir, $(LINK_OPT_DIR)) $(call LogInfo, Generating $(patsubst $(OUTPUTDIR)/%, %, $@)) $(call LogInfo, Generating $(patsubst $(OUTPUTDIR)/%, %, $(JLI_TRACE_FILE))) $(FIXPATH) $(INTERIM_IMAGE_DIR)/bin/java -XX:DumpLoadedClassList=$@.raw \ $(CLASSLIST_FILE_VM_OPTS) \ + -Xint \ -Xlog:aot=off \ -Xlog:cds=off \ -cp $(SUPPORT_OUTPUTDIR)/classlist.jar \ @@ -90,6 +93,7 @@ $(CLASSLIST_FILE): $(INTERIM_IMAGE_DIR)/bin/java$(EXECUTABLE_SUFFIX) $(CLASSLIST -XX:SharedClassListFile=$@.interim -XX:SharedArchiveFile=$@.jsa \ -Djava.lang.invoke.MethodHandle.TRACE_RESOLVE=true \ $(CLASSLIST_FILE_VM_OPTS) \ + -Xint \ --module-path $(SUPPORT_OUTPUTDIR)/classlist.jar \ -Xlog:aot=off \ -Xlog:cds=off \ diff --git a/make/Hsdis.gmk b/make/Hsdis.gmk index 469cc488f16..76695fc8dde 100644 --- a/make/Hsdis.gmk +++ b/make/Hsdis.gmk @@ -44,6 +44,9 @@ ifeq ($(HSDIS_BACKEND), capstone) else ifeq ($(call isTargetCpuArch, aarch64), true) CAPSTONE_ARCH := CS_ARCH_$(CAPSTONE_ARCH_AARCH64_NAME) CAPSTONE_MODE := CS_MODE_ARM + else ifeq ($(call isTargetCpuArch, arm), true) + CAPSTONE_ARCH := CS_ARCH_ARM + CAPSTONE_MODE := CS_MODE_ARM else $(error No support for Capstone on this platform) endif diff --git a/make/RunTests.gmk b/make/RunTests.gmk index 02ea632e3ec..d4be5936c41 100644 --- a/make/RunTests.gmk +++ b/make/RunTests.gmk @@ -1020,6 +1020,9 @@ define SetupRunJtregTestBody VM_OPTIONS := $$(JTREG_ALL_OPTIONS) )) $$(call LogWarn, AOT_JDK_CACHE=$$($1_AOT_JDK_CACHE)) $1_JTREG_BASIC_OPTIONS += -vmoption:-XX:AOTCache="$$($1_AOT_JDK_CACHE)" + $1_JTREG_BASIC_OPTIONS += $$(addprefix $$(JTREG_PROBLEM_LIST_PREFIX), $$(wildcard \ + $$(addprefix $$($1_TEST_ROOT)/, ProblemList-AotJdk.txt) \ + )) endif diff --git a/make/autoconf/basic_tools.m4 b/make/autoconf/basic_tools.m4 index 8e42f9205a4..66ef94d48a8 100644 --- a/make/autoconf/basic_tools.m4 +++ b/make/autoconf/basic_tools.m4 @@ -1,5 +1,5 @@ # -# Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -369,6 +369,10 @@ AC_DEFUN_ONCE([BASIC_SETUP_COMPLEX_TOOLS], IS_GNU_DATE=yes else AC_MSG_RESULT([no]) + # Likely at the AIX provided version of the date utility here, which is not compatible + if test "x$OPENJDK_TARGET_OS" = "xaix"; then + AC_MSG_ERROR([gnu date from AIX toolbox is required]) + fi IS_GNU_DATE=no fi AC_SUBST(IS_GNU_DATE) diff --git a/make/autoconf/flags-cflags.m4 b/make/autoconf/flags-cflags.m4 index 2d39d84f52e..423654cd50a 100644 --- a/make/autoconf/flags-cflags.m4 +++ b/make/autoconf/flags-cflags.m4 @@ -544,12 +544,9 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_HELPER], TOOLCHAIN_CFLAGS_JVM="$TOOLCHAIN_CFLAGS_JVM -fstack-protector" TOOLCHAIN_CFLAGS_JDK="-fvisibility=hidden -pipe -fstack-protector" # reduce lib size on linux in link step, this needs also special compile flags - # do this on s390x also for libjvm (where serviceability agent is not supported) if test "x$ENABLE_LINKTIME_GC" = xtrue; then TOOLCHAIN_CFLAGS_JDK="$TOOLCHAIN_CFLAGS_JDK -ffunction-sections -fdata-sections" - if test "x$OPENJDK_TARGET_CPU" = xs390x && test "x$DEBUG_LEVEL" == xrelease; then - TOOLCHAIN_CFLAGS_JVM="$TOOLCHAIN_CFLAGS_JVM -ffunction-sections -fdata-sections" - fi + TOOLCHAIN_CFLAGS_JVM="$TOOLCHAIN_CFLAGS_JVM -ffunction-sections -fdata-sections" fi # technically NOT for CXX (but since this gives *worse* performance, use # no-strict-aliasing everywhere!) @@ -578,6 +575,11 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_HELPER], TOOLCHAIN_CFLAGS_JDK_CONLY="-fno-strict-aliasing" # technically NOT for CXX fi + if test "x$ENABLE_LINKTIME_GC" = xtrue; then + TOOLCHAIN_CFLAGS_JDK="$TOOLCHAIN_CFLAGS_JDK -ffunction-sections -fdata-sections" + TOOLCHAIN_CFLAGS_JVM="$TOOLCHAIN_CFLAGS_JVM -ffunction-sections -fdata-sections" + fi + if test "x$OPENJDK_TARGET_OS" = xaix; then TOOLCHAIN_CFLAGS_JVM="$TOOLCHAIN_CFLAGS_JVM -ffunction-sections -ftls-model -fno-math-errno" TOOLCHAIN_CFLAGS_JDK="-ffunction-sections -fsigned-char" diff --git a/make/autoconf/flags-ldflags.m4 b/make/autoconf/flags-ldflags.m4 index 466ff1beaf4..ff10828731e 100644 --- a/make/autoconf/flags-ldflags.m4 +++ b/make/autoconf/flags-ldflags.m4 @@ -1,5 +1,5 @@ # -# Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -53,16 +53,15 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_HELPER], # add --icf=all (Identical Code Folding — merges identical functions) BASIC_LDFLAGS="-Wl,-z,defs -Wl,-z,relro -Wl,-z,now -Wl,--no-as-needed -Wl,--exclude-libs,ALL" + BASIC_LDFLAGS_JVM_ONLY="" # Linux : remove unused code+data in link step if test "x$ENABLE_LINKTIME_GC" = xtrue; then - if test "x$OPENJDK_TARGET_CPU" = xs390x; then - BASIC_LDFLAGS="$BASIC_LDFLAGS -Wl,--gc-sections" - else - BASIC_LDFLAGS_JDK_ONLY="$BASIC_LDFLAGS_JDK_ONLY -Wl,--gc-sections" - fi + # keep vtables : -Wl,--undefined-glob=_ZTV* (but this seems not to work with gold ld) + # so keep at least the Metadata vtable that is used in the serviceability agent + BASIC_LDFLAGS_JVM_ONLY="$BASIC_LDFLAGS_JVM_ONLY -Wl,--gc-sections -Wl,--undefined=_ZTV8Metadata" + BASIC_LDFLAGS_JDK_ONLY="$BASIC_LDFLAGS_JDK_ONLY -Wl,--gc-sections" fi - BASIC_LDFLAGS_JVM_ONLY="" LDFLAGS_LTO="-flto=auto -fuse-linker-plugin -fno-strict-aliasing $DEBUG_PREFIX_CFLAGS" LDFLAGS_CXX_PARTIAL_LINKING="$MACHINE_FLAG -r" @@ -80,6 +79,10 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_HELPER], if test "x$CXX_IS_USER_SUPPLIED" = xfalse && test "x$CC_IS_USER_SUPPLIED" = xfalse; then UTIL_REQUIRE_TOOLCHAIN_PROGS(LLD, lld) fi + + if test "x$ENABLE_LINKTIME_GC" = xtrue; then + BASIC_LDFLAGS_JDK_ONLY="$BASIC_LDFLAGS_JDK_ONLY -Wl,--gc-sections" + fi fi if test "x$OPENJDK_TARGET_OS" = xaix; then BASIC_LDFLAGS="-Wl,-b64 -Wl,-brtl -Wl,-bnorwexec -Wl,-blibpath:/usr/lib:lib -Wl,-bnoexpall \ diff --git a/make/autoconf/jdk-options.m4 b/make/autoconf/jdk-options.m4 index 87d147d4f07..89fcbc88521 100644 --- a/make/autoconf/jdk-options.m4 +++ b/make/autoconf/jdk-options.m4 @@ -102,9 +102,20 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_JDK_OPTIONS], CHECKING_MSG: [if we should build headless-only (no GUI)]) AC_SUBST(ENABLE_HEADLESS_ONLY) + # Avoid headless-only on macOS and Windows, it is not supported there + if test "x$ENABLE_HEADLESS_ONLY" = xtrue; then + if test "x$OPENJDK_TARGET_OS" = xwindows || test "x$OPENJDK_TARGET_OS" = xmacosx; then + AC_MSG_ERROR([headless-only is not supported on macOS and Windows]) + fi + fi + # should we linktime gc unused code sections in the JDK build ? - if test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$OPENJDK_TARGET_CPU" = xs390x; then - LINKTIME_GC_DEFAULT=true + if test "x$OPENJDK_TARGET_OS" = "xlinux"; then + if test "x$OPENJDK_TARGET_CPU" = "xs390x" || test "x$OPENJDK_TARGET_CPU" = "xppc64le"; then + LINKTIME_GC_DEFAULT=true + else + LINKTIME_GC_DEFAULT=false + fi else LINKTIME_GC_DEFAULT=false fi diff --git a/make/autoconf/toolchain_microsoft.m4 b/make/autoconf/toolchain_microsoft.m4 index f577cf1a2a1..afe04acf029 100644 --- a/make/autoconf/toolchain_microsoft.m4 +++ b/make/autoconf/toolchain_microsoft.m4 @@ -217,10 +217,12 @@ AC_DEFUN([TOOLCHAIN_FIND_VISUAL_STUDIO_BAT_FILE], TOOLCHAIN_CHECK_POSSIBLE_VISUAL_STUDIO_ROOT([$TARGET_CPU], [$VS_VERSION], [$PROGRAMFILES_X86/$VS_INSTALL_DIR], [well-known name]) fi + # Derive system drive root from CMD (which is at /windows/system32/cmd.exe) + WINSYSDRIVE_ROOT="$(dirname "$(dirname "$(dirname "$CMD")")")" TOOLCHAIN_CHECK_POSSIBLE_VISUAL_STUDIO_ROOT([$TARGET_CPU], [$VS_VERSION], - [c:/program files/$VS_INSTALL_DIR], [well-known name]) + [$WINSYSDRIVE_ROOT/program files/$VS_INSTALL_DIR], [well-known name]) TOOLCHAIN_CHECK_POSSIBLE_VISUAL_STUDIO_ROOT([$TARGET_CPU], [$VS_VERSION], - [c:/program files (x86)/$VS_INSTALL_DIR], [well-known name]) + [$WINSYSDRIVE_ROOT/program files (x86)/$VS_INSTALL_DIR], [well-known name]) if test "x$SDK_INSTALL_DIR" != x; then if test "x$ProgramW6432" != x; then TOOLCHAIN_CHECK_POSSIBLE_WIN_SDK_ROOT([$TARGET_CPU], [$VS_VERSION], @@ -235,9 +237,9 @@ AC_DEFUN([TOOLCHAIN_FIND_VISUAL_STUDIO_BAT_FILE], [$PROGRAMFILES/$SDK_INSTALL_DIR], [well-known name]) fi TOOLCHAIN_CHECK_POSSIBLE_WIN_SDK_ROOT([$TARGET_CPU], [$VS_VERSION], - [c:/program files/$SDK_INSTALL_DIR], [well-known name]) + [$WINSYSDRIVE_ROOT/program files/$SDK_INSTALL_DIR], [well-known name]) TOOLCHAIN_CHECK_POSSIBLE_WIN_SDK_ROOT([$TARGET_CPU], [$VS_VERSION], - [c:/program files (x86)/$SDK_INSTALL_DIR], [well-known name]) + [$WINSYSDRIVE_ROOT/program files (x86)/$SDK_INSTALL_DIR], [well-known name]) fi VCVARS_VER=auto @@ -338,7 +340,7 @@ AC_DEFUN([TOOLCHAIN_EXTRACT_VISUAL_STUDIO_ENV], OLDPATH="$PATH" # Make sure we only capture additions to PATH needed by VS. # Clear out path, but need system dir present for vsvars cmd file to be able to run - export PATH=$WINENV_PREFIX/c/windows/system32 + export PATH="$(dirname "$CMD")" # The "| cat" is to stop SetEnv.Cmd to mess with system colors on some systems # We can't pass -vcvars_ver=$VCVARS_VER here because cmd.exe eats all '=' # in bat file arguments. :-( diff --git a/make/common/modules/LauncherCommon.gmk b/make/common/modules/LauncherCommon.gmk index 7682ffbb95c..8d45142ef4a 100644 --- a/make/common/modules/LauncherCommon.gmk +++ b/make/common/modules/LauncherCommon.gmk @@ -1,5 +1,5 @@ # -# Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -35,11 +35,17 @@ include ProcessMarkdown.gmk include $(TOPDIR)/make/ToolsJdk.gmk LAUNCHER_SRC := $(TOPDIR)/src/java.base/share/native/launcher + LAUNCHER_CFLAGS += -I$(TOPDIR)/src/java.base/share/native/launcher \ -I$(TOPDIR)/src/java.base/share/native/libjli \ -I$(TOPDIR)/src/java.base/$(OPENJDK_TARGET_OS_TYPE)/native/libjli \ -I$(TOPDIR)/src/java.base/$(OPENJDK_TARGET_OS)/native/libjli \ # + +ifeq ($(call isTargetOs, aix), true) + LAUNCHER_CFLAGS += -I$(TOPDIR)/src/java.base/aix/native/include +endif + MACOSX_PLIST_DIR := $(TOPDIR)/src/java.base/macosx/native/launcher JAVA_MANIFEST := $(TOPDIR)/src/java.base/windows/native/launcher/java.manifest diff --git a/make/conf/github-actions.conf b/make/conf/github-actions.conf index ebfc9191535..6771e8923dc 100644 --- a/make/conf/github-actions.conf +++ b/make/conf/github-actions.conf @@ -29,21 +29,21 @@ GTEST_VERSION=1.14.0 JTREG_VERSION=8.2.1+1 LINUX_X64_BOOT_JDK_EXT=tar.gz -LINUX_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk25/bd75d5f9689641da8e1daabeccb5528b/36/GPL/openjdk-25_linux-x64_bin.tar.gz -LINUX_X64_BOOT_JDK_SHA256=59cdcaf255add4721de38eb411d4ecfe779356b61fb671aee63c7dec78054c2b +LINUX_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk26/c3cc523845074aa0af4f5e1e1ed4151d/35/GPL/openjdk-26_linux-x64_bin.tar.gz +LINUX_X64_BOOT_JDK_SHA256=83c78367f8c81257beef72aca4bbbf8e6dac8ca2b3a4546a85879a09e6e4e128 ALPINE_LINUX_X64_BOOT_JDK_EXT=tar.gz -ALPINE_LINUX_X64_BOOT_JDK_URL=https://github.com/adoptium/temurin25-binaries/releases/download/jdk-25%2B36/OpenJDK25U-jdk_x64_alpine-linux_hotspot_25_36.tar.gz -ALPINE_LINUX_X64_BOOT_JDK_SHA256=637e47474d411ed86134f413af7d5fef4180ddb0bf556347b7e74a88cf8904c8 +ALPINE_LINUX_X64_BOOT_JDK_URL=https://github.com/adoptium/temurin26-binaries/releases/download/jdk-26%2B35/OpenJDK26U-jdk_x64_alpine-linux_hotspot_26_35.tar.gz +ALPINE_LINUX_X64_BOOT_JDK_SHA256=c105e581fdccb4e7120d889235d1ad8d5b2bed0af4972bc881e0a8ba687c94a4 MACOS_AARCH64_BOOT_JDK_EXT=tar.gz -MACOS_AARCH64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk25/bd75d5f9689641da8e1daabeccb5528b/36/GPL/openjdk-25_macos-aarch64_bin.tar.gz -MACOS_AARCH64_BOOT_JDK_SHA256=2006337bf326fdfdf6117081751ba38c1c8706d63419ecac7ff102ff7c776876 +MACOS_AARCH64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk26/c3cc523845074aa0af4f5e1e1ed4151d/35/GPL/openjdk-26_macos-aarch64_bin.tar.gz +MACOS_AARCH64_BOOT_JDK_SHA256=254586bcd1bf6dcd125ad667ac32562cb1e2ab1abf3a61fb117b6fabb571e765 MACOS_X64_BOOT_JDK_EXT=tar.gz -MACOS_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk25/bd75d5f9689641da8e1daabeccb5528b/36/GPL/openjdk-25_macos-x64_bin.tar.gz -MACOS_X64_BOOT_JDK_SHA256=47482ad9888991ecac9b2bcc131e2b53ff78aff275104cef85f66252308e8a09 +MACOS_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk26/c3cc523845074aa0af4f5e1e1ed4151d/35/GPL/openjdk-26_macos-x64_bin.tar.gz +MACOS_X64_BOOT_JDK_SHA256=8642b89d889c14ede2c446fd5bbe3621c8a3082e3df02013fd1658e39f52929a WINDOWS_X64_BOOT_JDK_EXT=zip -WINDOWS_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk25/bd75d5f9689641da8e1daabeccb5528b/36/GPL/openjdk-25_windows-x64_bin.zip -WINDOWS_X64_BOOT_JDK_SHA256=85bcc178461e2cb3c549ab9ca9dfa73afd54c09a175d6510d0884071867137d3 +WINDOWS_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk26/c3cc523845074aa0af4f5e1e1ed4151d/35/GPL/openjdk-26_windows-x64_bin.zip +WINDOWS_X64_BOOT_JDK_SHA256=2dd2d92c9374cd49a120fe9d916732840bf6bb9f0e0cc29794917a3c08b99c5f diff --git a/make/conf/jib-profiles.js b/make/conf/jib-profiles.js index 76a94b7789e..4c1d2835054 100644 --- a/make/conf/jib-profiles.js +++ b/make/conf/jib-profiles.js @@ -387,8 +387,8 @@ var getJibProfilesCommon = function (input, data) { }; }; - common.boot_jdk_version = "25"; - common.boot_jdk_build_number = "37"; + common.boot_jdk_version = "26"; + common.boot_jdk_build_number = "35"; common.boot_jdk_home = input.get("boot_jdk", "install_path") + "/jdk-" + common.boot_jdk_version + (input.build_os == "macosx" ? ".jdk/Contents/Home" : ""); diff --git a/make/conf/version-numbers.conf b/make/conf/version-numbers.conf index 4392d86ac33..4f63179ae05 100644 --- a/make/conf/version-numbers.conf +++ b/make/conf/version-numbers.conf @@ -1,5 +1,5 @@ # -# Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -37,6 +37,6 @@ DEFAULT_VERSION_DATE=2026-09-15 DEFAULT_VERSION_CLASSFILE_MAJOR=71 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`" DEFAULT_VERSION_CLASSFILE_MINOR=0 DEFAULT_VERSION_DOCS_API_SINCE=11 -DEFAULT_ACCEPTABLE_BOOT_VERSIONS="25 26 27" +DEFAULT_ACCEPTABLE_BOOT_VERSIONS="26 27" DEFAULT_JDK_SOURCE_TARGET_VERSION=27 DEFAULT_PROMOTED_VERSION_PRE=ea diff --git a/make/hotspot/lib/CompileGtest.gmk b/make/hotspot/lib/CompileGtest.gmk index 327014b1e9d..4b21d481049 100644 --- a/make/hotspot/lib/CompileGtest.gmk +++ b/make/hotspot/lib/CompileGtest.gmk @@ -63,6 +63,10 @@ $(eval $(call SetupJdkLibrary, BUILD_GTEST_LIBGTEST, \ unused-result zero-as-null-pointer-constant, \ DISABLED_WARNINGS_clang := format-nonliteral undef unused-result \ zero-as-null-pointer-constant, \ + $(comment Disable deprecated-declarations warnings to workaround) \ + $(comment clang18+glibc12 bug https://github.com/llvm/llvm-project/issues/76515) \ + $(comment until the clang bug has been fixed) \ + DISABLED_WARNINGS_clang_gtest-all.cc := deprecated-declarations, \ DISABLED_WARNINGS_microsoft := 4530, \ DEFAULT_CFLAGS := false, \ CFLAGS := $(JVM_CFLAGS) \ diff --git a/make/hotspot/lib/JvmFlags.gmk b/make/hotspot/lib/JvmFlags.gmk index 57b632ee532..27a96cc4865 100644 --- a/make/hotspot/lib/JvmFlags.gmk +++ b/make/hotspot/lib/JvmFlags.gmk @@ -1,5 +1,5 @@ # -# Copyright (c) 2013, 2025, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2013, 2026, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -43,10 +43,15 @@ JVM_SRC_DIRS += $(call uniq, $(wildcard $(foreach d, $(JVM_SRC_ROOTS), \ $(JVM_VARIANT_OUTPUTDIR)/gensrc # +ifeq ($(call isTargetOs, aix), true) + ADD_PLATFORM_INCLUDE_DIR := -I$(TOPDIR)/src/java.base/aix/native/include +endif + JVM_CFLAGS_INCLUDES += \ $(patsubst %,-I%,$(JVM_SRC_DIRS)) \ -I$(TOPDIR)/src/hotspot/share/include \ -I$(TOPDIR)/src/hotspot/os/$(HOTSPOT_TARGET_OS_TYPE)/include \ + $(ADD_PLATFORM_INCLUDE_DIR) \ -I$(SUPPORT_OUTPUTDIR)/modules_include/java.base \ -I$(SUPPORT_OUTPUTDIR)/modules_include/java.base/$(OPENJDK_TARGET_OS_INCLUDE_SUBDIR) \ -I$(TOPDIR)/src/java.base/share/native/libjimage \ diff --git a/make/jdk/src/classes/build/tools/cldrconverter/CLDRConverter.java b/make/jdk/src/classes/build/tools/cldrconverter/CLDRConverter.java index ab878a4d2a5..9f42326ef09 100644 --- a/make/jdk/src/classes/build/tools/cldrconverter/CLDRConverter.java +++ b/make/jdk/src/classes/build/tools/cldrconverter/CLDRConverter.java @@ -87,6 +87,7 @@ public class CLDRConverter { static final String EXEMPLAR_CITY_PREFIX = "timezone.excity."; static final String ZONE_NAME_PREFIX = "timezone.displayname."; static final String METAZONE_ID_PREFIX = "metazone.id."; + static final String METAZONE_DSTOFFSET_PREFIX = "metazone.dstoffset."; static final String PARENT_LOCALE_PREFIX = "parentLocale."; static final String LIKELY_SCRIPT_PREFIX = "likelyScript."; static final String META_EMPTY_ZONE_NAME = "EMPTY_ZONE"; @@ -139,6 +140,11 @@ public class CLDRConverter { private static final Map tzdbSubstLetters = HashMap.newHashMap(512); private static final Map tzdbLinks = HashMap.newHashMap(512); + // Map of explicit dst offsets for metazones + // key: time zone ID + // value: explicit dstOffset for the corresponding metazone name + static final Map explicitDstOffsets = HashMap.newHashMap(32); + static enum DraftType { UNCONFIRMED, PROVISIONAL, @@ -795,10 +801,7 @@ public class CLDRConverter { String tzKey = Optional.ofNullable((String)handlerSupplMeta.get(tzid)) .orElse(tzid); // Follow link, if needed - String tzLink = null; - for (var k = tzKey; tzdbLinks.containsKey(k);) { - k = tzLink = tzdbLinks.get(k); - } + String tzLink = getTZDBLink(tzKey); if (tzLink == null && tzdbLinks.containsValue(tzKey)) { // reverse link search // this is needed as in tzdb, "America/Buenos_Aires" links to @@ -827,7 +830,7 @@ public class CLDRConverter { } else { // TZDB short names tznames = Arrays.copyOf(tznames, tznames.length); - fillTZDBShortNames(tzid, tznames); + fillTZDBShortNames(tzKey, tznames); names.put(tzid, tznames); } } else { @@ -840,11 +843,13 @@ public class CLDRConverter { String metaKey = METAZONE_ID_PREFIX + meta; data = map.get(metaKey); if (data instanceof String[] tznames) { - // TZDB short names - tznames = Arrays.copyOf((String[])names.getOrDefault(metaKey, tznames), 6); - fillTZDBShortNames(tzid, tznames); - // Keep the metazone prefix here. - names.putIfAbsent(metaKey, tznames); + if (isDefaultZone(meta, tzKey)) { + // Record the metazone names only from the default + // (001) zone, with short names filled from TZDB + tznames = Arrays.copyOf(tznames, tznames.length); + fillTZDBShortNames(tzKey, tznames); + names.put(metaKey, tznames); + } names.put(tzid, meta); if (tzLink != null && availableIds.contains(tzLink)) { names.put(tzLink, meta); @@ -867,6 +872,12 @@ public class CLDRConverter { .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); names.putAll(exCities); + // Explicit metazone offsets + if (id.equals("root")) { + explicitDstOffsets.forEach((k, v) -> + names.put(METAZONE_DSTOFFSET_PREFIX + k, v)); + } + // If there's no UTC entry at this point, add an empty one if (!names.isEmpty() && !names.containsKey("UTC")) { names.putIfAbsent(METAZONE_ID_PREFIX + META_EMPTY_ZONE_NAME, EMPTY_ZONE); @@ -1492,12 +1503,12 @@ public class CLDRConverter { * Fill the TZDB short names if there is no name provided by the CLDR */ private static void fillTZDBShortNames(String tzid, String[] names) { - var val = tzdbShortNamesMap.get(tzdbLinks.getOrDefault(tzid, tzid)); + var val = tzdbShortNamesMap.getOrDefault(tzid, tzdbShortNamesMap.get(getTZDBLink(tzid))); if (val != null) { var format = val.split(NBSP)[0]; var rule = val.split(NBSP)[1]; IntStream.of(1, 3, 5).forEach(i -> { - if (names[i] == null) { + if (names[i] == null || names[i].isEmpty()) { if (format.contains("%s")) { names[i] = switch (i) { case 1 -> format.formatted(tzdbSubstLetters.get(rule + NBSP + STD)); @@ -1519,6 +1530,21 @@ public class CLDRConverter { } } + private static boolean isDefaultZone(String meta, String tzid) { + String zone001 = handlerMetaZones.zidMap().get(meta); + var tzLink = getTZDBLink(tzid); + return canonicalTZMap.getOrDefault(tzid, tzid).equals(zone001) || + tzLink != null && canonicalTZMap.getOrDefault(tzLink, tzLink).equals(zone001); + } + + private static String getTZDBLink(String tzid) { + String tzLink = null; + for (var k = tzid; tzdbLinks.containsKey(k);) { + k = tzLink = tzdbLinks.get(k); + } + return tzLink; + } + /* * Convert TZDB offsets to JDK's offsets, eg, "-08" to "GMT-08:00". * If it cannot recognize the pattern, return the argument as is. diff --git a/make/jdk/src/classes/build/tools/cldrconverter/MetaZonesParseHandler.java b/make/jdk/src/classes/build/tools/cldrconverter/MetaZonesParseHandler.java index 2c3757b7a47..45de46d2476 100644 --- a/make/jdk/src/classes/build/tools/cldrconverter/MetaZonesParseHandler.java +++ b/make/jdk/src/classes/build/tools/cldrconverter/MetaZonesParseHandler.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -84,7 +84,15 @@ class MetaZonesParseHandler extends AbstractLDMLHandler { if (fromLDT.isBefore(now) && toLDT.isAfter(now)) { metazone = attributes.getValue("mzone"); + + // Explicit metazone DST offsets. Only the "dst" offset is needed, + // as "std" is used by default when it doesn't match. + String dstOffset = attributes.getValue("dstOffset"); + if (dstOffset != null) { + CLDRConverter.explicitDstOffsets.put(tzid, dstOffset); + } } + pushIgnoredContainer(qName); break; diff --git a/make/jdk/src/classes/build/tools/cldrconverter/ResourceBundleGenerator.java b/make/jdk/src/classes/build/tools/cldrconverter/ResourceBundleGenerator.java index 3953f38f653..8278bf6bcfa 100644 --- a/make/jdk/src/classes/build/tools/cldrconverter/ResourceBundleGenerator.java +++ b/make/jdk/src/classes/build/tools/cldrconverter/ResourceBundleGenerator.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -198,7 +198,8 @@ class ResourceBundleGenerator implements BundleGenerator { } else if (value instanceof String) { String valStr = (String)value; if (type == BundleType.TIMEZONE && - !key.startsWith(CLDRConverter.EXEMPLAR_CITY_PREFIX) || + !(key.startsWith(CLDRConverter.EXEMPLAR_CITY_PREFIX) || + key.startsWith(CLDRConverter.METAZONE_DSTOFFSET_PREFIX)) || valStr.startsWith(META_VALUE_PREFIX)) { out.printf(" { \"%s\", %s },\n", key, CLDRConverter.saveConvert(valStr, useJava)); } else { diff --git a/make/jdk/src/classes/build/tools/intpoly/FieldGen.java b/make/jdk/src/classes/build/tools/intpoly/FieldGen.java index fcc45db0219..c29a315f368 100644 --- a/make/jdk/src/classes/build/tools/intpoly/FieldGen.java +++ b/make/jdk/src/classes/build/tools/intpoly/FieldGen.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,36 +34,6 @@ import java.util.*; public class FieldGen { - static FieldParams Curve25519 = new FieldParams( - "IntegerPolynomial25519", 26, 10, 1, 255, - Arrays.asList( - new Term(0, -19) - ), - Curve25519CrSequence(), simpleSmallCrSequence(10) - ); - - private static List Curve25519CrSequence() { - List result = new ArrayList(); - - // reduce(7,2) - result.add(new Reduce(17)); - result.add(new Reduce(18)); - - // carry(8,2) - result.add(new Carry(8)); - result.add(new Carry(9)); - - // reduce(0,7) - for (int i = 10; i < 17; i++) { - result.add(new Reduce(i)); - } - - // carry(0,9) - result.addAll(fullCarry(10)); - - return result; - } - static FieldParams Curve448 = new FieldParams( "IntegerPolynomial448", 28, 16, 1, 448, Arrays.asList( @@ -224,8 +194,7 @@ public class FieldGen { } static final FieldParams[] ALL_FIELDS = { - Curve25519, Curve448, - P256, P384, P521, O256, O384, O521, O25519, O448 + Curve448, P256, P384, P521, O256, O384, O521, O25519, O448 }; public static class Term { diff --git a/make/jdk/src/classes/build/tools/taglet/JSpec.java b/make/jdk/src/classes/build/tools/taglet/JSpec.java index 7e1e0ca215e..196aaccb32b 100644 --- a/make/jdk/src/classes/build/tools/taglet/JSpec.java +++ b/make/jdk/src/classes/build/tools/taglet/JSpec.java @@ -25,13 +25,13 @@ package build.tools.taglet; +import java.net.URI; import java.util.EnumSet; import java.util.List; import java.util.Set; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; -import java.lang.reflect.Field; import javax.lang.model.element.Element; @@ -141,6 +141,11 @@ public class JSpec implements Taglet { @Override public String toString(List tags, Element elem) { + throw new UnsupportedOperationException(); + } + + // @Override - requires JDK-8373922 in build JDK + public String toString(List tags, Element elem, URI docRoot) { if (tags.isEmpty()) return ""; @@ -177,7 +182,7 @@ public class JSpec implements Taglet { String preview = m.group("preview"); // null if no preview feature String chapter = m.group("chapter"); String section = m.group("section"); - String rootParent = currentPath().replaceAll("[^/]+", ".."); + String rootParent = docRoot.resolve("..").toString(); String url = preview == null ? String.format("%1$s/specs/%2$s/%2$s-%3$s.html#%2$s-%3$s%4$s", @@ -230,23 +235,6 @@ public class JSpec implements Taglet { return sb.toString(); } - private static ThreadLocal CURRENT_PATH = null; - - private String currentPath() { - if (CURRENT_PATH == null) { - try { - Field f = Class.forName("jdk.javadoc.internal.doclets.formats.html.HtmlDocletWriter") - .getField("CURRENT_PATH"); - @SuppressWarnings("unchecked") - ThreadLocal tl = (ThreadLocal) f.get(null); - CURRENT_PATH = tl; - } catch (ReflectiveOperationException e) { - throw new RuntimeException("Cannot determine current path", e); - } - } - return CURRENT_PATH.get(); - } - private String expand(List trees) { return (new SimpleDocTreeVisitor() { public StringBuilder defaultAction(DocTree tree, StringBuilder sb) { diff --git a/make/jdk/src/classes/build/tools/taglet/SealedGraph.java b/make/jdk/src/classes/build/tools/taglet/SealedGraph.java index 3e93826c180..300999b77c0 100644 --- a/make/jdk/src/classes/build/tools/taglet/SealedGraph.java +++ b/make/jdk/src/classes/build/tools/taglet/SealedGraph.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,7 +32,9 @@ import jdk.javadoc.doclet.Taglet; import javax.lang.model.element.*; import javax.lang.model.type.DeclaredType; +import javax.lang.model.util.Elements; import java.io.IOException; +import java.net.URI; import java.nio.file.Files; import java.nio.file.Path; import java.util.*; @@ -78,6 +80,11 @@ public final class SealedGraph implements Taglet { @Override public String toString(List tags, Element element) { + throw new UnsupportedOperationException(); + } + + // @Override - requires JDK-8373922 in build JDK + public String toString(List tags, Element element, URI docRoot) { if (sealedDotOutputDir == null || sealedDotOutputDir.isEmpty()) { return ""; } @@ -85,9 +92,15 @@ public final class SealedGraph implements Taglet { return ""; } - ModuleElement module = docletEnvironment.getElementUtils().getModuleOf(element); + Elements util = docletEnvironment.getElementUtils(); + ModuleElement module = util.getModuleOf(element); + + // '.' in .DOT file name is converted to '/' in .SVG path, so we use '-' as separator for nested classes. + // module_package.subpackage.Outer-Inner.dot => module/package/subpackage/Outer-Inner-sealed-graph.svg Path dotFile = Path.of(sealedDotOutputDir, - module.getQualifiedName() + "_" + typeElement.getQualifiedName() + ".dot"); + module.getQualifiedName() + "_" + + util.getPackageOf(element).getQualifiedName() + "." + + packagelessCanonicalName(typeElement).replace(".", "-") + ".dot"); Set exports = module.getDirectives().stream() .filter(ModuleElement.ExportsDirective.class::isInstance) @@ -99,7 +112,7 @@ public final class SealedGraph implements Taglet { .map(Objects::toString) .collect(Collectors.toUnmodifiableSet()); - String dotContent = new Renderer().graph(typeElement, exports); + String dotContent = new Renderer().graph(typeElement, exports, docRoot); try { Files.writeString(dotFile, dotContent, WRITE, CREATE, TRUNCATE_EXISTING); @@ -107,8 +120,8 @@ public final class SealedGraph implements Taglet { throw new RuntimeException(e); } - String simpleTypeName = packagelessCanonicalName(typeElement).replace('.', '/'); - String imageFile = simpleTypeName + "-sealed-graph.svg"; + String simpleTypeName = packagelessCanonicalName(typeElement); + String imageFile = simpleTypeName.replace(".", "-") + "-sealed-graph.svg"; int thumbnailHeight = 100; // also appears in the stylesheet String hoverImage = "" + getImage(simpleTypeName, imageFile, -1, true) @@ -137,21 +150,26 @@ public final class SealedGraph implements Taglet { private final class Renderer { // Generates a graph in DOT format - String graph(TypeElement rootClass, Set exports) { - final State state = new State(rootClass); + String graph(TypeElement rootClass, Set exports, URI pathToRoot) { + if (!isInPublicApi(rootClass, exports)) { + // Alternatively we can return "" for the graph since there is no single root to render + throw new IllegalArgumentException("Root not in public API: " + rootClass.getQualifiedName()); + } + final State state = new State(pathToRoot); traverse(state, rootClass, exports); return state.render(); } static void traverse(State state, TypeElement node, Set exports) { + if (!isInPublicApi(node, exports)) { + throw new IllegalArgumentException("Bad request, not in public API: " + node.getQualifiedName()); + } state.addNode(node); if (!(node.getModifiers().contains(Modifier.SEALED) || node.getModifiers().contains(Modifier.FINAL))) { state.addNonSealedEdge(node); } else { for (TypeElement subNode : permittedSubclasses(node, exports)) { - if (isInPublicApi(node, exports) && isInPublicApi(subNode, exports)) { - state.addEdge(node, subNode); - } + state.addEdge(node, subNode); traverse(state, subNode, exports); } } @@ -163,7 +181,7 @@ public final class SealedGraph implements Taglet { private static final String TOOLTIP = "tooltip"; private static final String LINK = "href"; - private final TypeElement rootNode; + private final URI pathToRoot; private final StringBuilder builder; @@ -188,8 +206,8 @@ public final class SealedGraph implements Taglet { } } - public State(TypeElement rootNode) { - this.rootNode = rootNode; + public State(URI pathToRoot) { + this.pathToRoot = pathToRoot; nodeStyleMap = new LinkedHashMap<>(); builder = new StringBuilder() .append("digraph G {") @@ -212,24 +230,15 @@ public final class SealedGraph implements Taglet { var styles = nodeStyleMap.computeIfAbsent(id(node), n -> new LinkedHashMap<>()); styles.put(LABEL, new StyleItem.PlainString(node.getSimpleName().toString())); styles.put(TOOLTIP, new StyleItem.PlainString(node.getQualifiedName().toString())); - styles.put(LINK, new StyleItem.PlainString(relativeLink(node))); + styles.put(LINK, new StyleItem.PlainString(pathToRoot.resolve(relativeLink(node)).toString())); } - // A permitted class must be in the same package or in the same module. - // This implies the module is always the same. private String relativeLink(TypeElement node) { var util = SealedGraph.this.docletEnvironment.getElementUtils(); - var nodePackage = util.getPackageOf(node); - // Note: SVG files for nested types use the simple names of containing types as parent directories. - // We therefore need to convert all dots in the qualified name to "../" below. - var backNavigator = rootNode.getQualifiedName().toString().chars() - .filter(c -> c == '.') - .mapToObj(c -> "../") - .collect(joining()); - var forwardNavigator = nodePackage.getQualifiedName().toString() - .replace(".", "/"); + var path = util.getModuleOf(node).getQualifiedName().toString() + "/" + + util.getPackageOf(node).getQualifiedName().toString().replace(".", "/"); - return backNavigator + forwardNavigator + "/" + packagelessCanonicalName(node) + ".html"; + return path + "/" + packagelessCanonicalName(node) + ".html"; } public void addEdge(TypeElement node, TypeElement subNode) { @@ -281,25 +290,33 @@ public final class SealedGraph implements Taglet { private String quotedId(TypeElement node) { return "\"" + id(node) + "\""; } - - private String simpleName(String name) { - int lastDot = name.lastIndexOf('.'); - return lastDot < 0 - ? name - : name.substring(lastDot); - } - } private static List permittedSubclasses(TypeElement node, Set exports) { - return node.getPermittedSubclasses().stream() - .filter(DeclaredType.class::isInstance) - .map(DeclaredType.class::cast) - .map(DeclaredType::asElement) - .filter(TypeElement.class::isInstance) - .map(TypeElement.class::cast) - .filter(te -> isInPublicApi(te, exports)) - .toList(); + List dfsStack = new ArrayList().reversed(); // Faster operations to head + SequencedCollection result = new LinkedHashSet<>(); // Deduplicate diamond interface inheritance + // The starting node may be in the public API - still expand it + prependSubclasses(node, dfsStack); + + while (!dfsStack.isEmpty()) { + TypeElement now = dfsStack.removeFirst(); + if (isInPublicApi(now, exports)) { + result.addLast(now); + } else { + // Skip the non-exported classes in the hierarchy + prependSubclasses(now, dfsStack); + } + } + + return List.copyOf(result); + } + + private static void prependSubclasses(TypeElement node, List dfs) { + for (var e : node.getPermittedSubclasses().reversed()) { + if (e instanceof DeclaredType dt && dt.asElement() instanceof TypeElement te) { + dfs.addFirst(te); + } + } } private static boolean isInPublicApi(TypeElement typeElement, Set exports) { diff --git a/make/jdk/src/classes/build/tools/taglet/ToolGuide.java b/make/jdk/src/classes/build/tools/taglet/ToolGuide.java index 8db2aee3092..7ad4f6b9b9f 100644 --- a/make/jdk/src/classes/build/tools/taglet/ToolGuide.java +++ b/make/jdk/src/classes/build/tools/taglet/ToolGuide.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,13 +25,13 @@ package build.tools.taglet; +import java.net.URI; import java.util.EnumSet; import java.util.List; import java.util.Set; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; -import java.lang.reflect.Field; import javax.lang.model.element.Element; @@ -91,6 +91,11 @@ public class ToolGuide implements Taglet { @Override public String toString(List tags, Element elem) { + throw new UnsupportedOperationException(); + } + + // @Override - requires JDK-8373922 in build JDK + public String toString(List tags, Element elem, URI docRoot) { if (tags.isEmpty()) return ""; @@ -118,7 +123,7 @@ public class ToolGuide implements Taglet { if (label.isEmpty()) { label = name; } - String rootParent = currentPath().replaceAll("[^/]+", ".."); + String rootParent = docRoot.resolve("..").toString(); String url = String.format("%s/%s/%s.html", rootParent, BASE_URL, name); @@ -141,22 +146,4 @@ public class ToolGuide implements Taglet { return sb.toString(); } - - private static ThreadLocal CURRENT_PATH = null; - - private String currentPath() { - if (CURRENT_PATH == null) { - try { - Field f = Class.forName("jdk.javadoc.internal.doclets.formats.html.HtmlDocletWriter") - .getField("CURRENT_PATH"); - @SuppressWarnings("unchecked") - ThreadLocal tl = (ThreadLocal) f.get(null); - CURRENT_PATH = tl; - } catch (ReflectiveOperationException e) { - throw new RuntimeException("Cannot determine current path", e); - } - } - return CURRENT_PATH.get(); - } - } diff --git a/make/langtools/tools/previewfeature/SetupPreviewFeature.java b/make/langtools/tools/previewfeature/SetupPreviewFeature.java new file mode 100644 index 00000000000..5f9b00edc6d --- /dev/null +++ b/make/langtools/tools/previewfeature/SetupPreviewFeature.java @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package previewfeature; + +import com.sun.source.util.JavacTask; +import com.sun.source.util.Trees; +import java.io.StringWriter; +import java.lang.reflect.Field; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.util.HashSet; +import java.util.Set; +import java.util.stream.Collectors; +import javax.lang.model.element.ElementKind; +import javax.tools.ToolProvider; + +/* Construct a hybrid PreviewFeature.Feature enum that includes constants both + * from the current JDK sources (so that they can be used in the javac API sources), + * and from the bootstrap JDK (so that they can be used in the bootstrap classfiles). + * + * This hybrid enum is only used for the interim javac. + */ +public class SetupPreviewFeature { + public static void main(String... args) throws Exception { + Class runtimeFeature = Class.forName("jdk.internal.javac.PreviewFeature$Feature"); + Set constantsToAdd = new HashSet<>(); + for (Field runtimeField : runtimeFeature.getDeclaredFields()) { + if (runtimeField.isEnumConstant()) { + constantsToAdd.add(runtimeField.getName()); + } + } + var dummy = new StringWriter(); + var compiler = ToolProvider.getSystemJavaCompiler(); + var source = Path.of(args[0]); + try (var fm = compiler.getStandardFileManager(null, null, null)) { + JavacTask task = + (JavacTask) compiler.getTask(dummy, null, null, null, null, fm.getJavaFileObjects(source)); + task.analyze(); + var sourceFeature = task.getElements() + .getTypeElement("jdk.internal.javac.PreviewFeature.Feature"); + int insertPosition = -1; + for (var el : sourceFeature.getEnclosedElements()) { + if (el.getKind() == ElementKind.ENUM_CONSTANT) { + constantsToAdd.remove(el.getSimpleName().toString()); + if (insertPosition == (-1)) { + var trees = Trees.instance(task); + var elPath = trees.getPath(el); + insertPosition = (int) trees.getSourcePositions() + .getStartPosition(elPath.getCompilationUnit(), + elPath.getLeaf()); + } + } + } + var target = Path.of(args[1]); + Files.createDirectories(target.getParent()); + if (constantsToAdd.isEmpty()) { + Files.copy(source, target, StandardCopyOption.REPLACE_EXISTING); + } else { + String sourceCode = Files.readString(source); + try (var out = Files.newBufferedWriter(target)) { + out.write(sourceCode, 0, insertPosition); + out.write(constantsToAdd.stream() + .collect(Collectors.joining(", ", + "/*compatibility constants:*/ ", + ",\n"))); + out.write(sourceCode, insertPosition, sourceCode.length() - insertPosition); + } + } + } + } +} \ No newline at end of file diff --git a/make/modules/java.base/Launcher.gmk b/make/modules/java.base/Launcher.gmk index 3a3920acb12..bfae0925c07 100644 --- a/make/modules/java.base/Launcher.gmk +++ b/make/modules/java.base/Launcher.gmk @@ -95,7 +95,8 @@ ifeq ($(call isTargetOsType, unix), true) CFLAGS := $(VERSION_CFLAGS), \ EXTRA_HEADER_DIRS := libjava, \ EXTRA_OBJECT_FILES := \ - $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/libjava/childproc$(OBJ_SUFFIX), \ + $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/libjava/childproc$(OBJ_SUFFIX) \ + $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/libjava/childproc_errorcodes$(OBJ_SUFFIX), \ LD_SET_ORIGIN := false, \ OUTPUT_DIR := $(SUPPORT_OUTPUTDIR)/modules_libs/$(MODULE), \ )) diff --git a/make/modules/java.desktop/lib/AwtLibraries.gmk b/make/modules/java.desktop/lib/AwtLibraries.gmk index 8b6b50b9e62..887dfab01df 100644 --- a/make/modules/java.desktop/lib/AwtLibraries.gmk +++ b/make/modules/java.desktop/lib/AwtLibraries.gmk @@ -1,5 +1,5 @@ # -# Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -99,14 +99,16 @@ ifeq ($(call isTargetOs, windows), true) $(TOPDIR)/src/$(MODULE)/windows/native/libawt/windows/awt.rc endif -# This is the object file to provide the dladdr API, which is not -# part of AIX. It occurs several times in the jdk code base. -# Do not include it. When statically linking the java -# launcher with all JDK and VM static libraries, we use the -# --whole-archive linker option. The duplicate objects in different -# static libraries cause linking errors due to duplicate symbols. ifeq ($(call isTargetOs, aix), true) + # This is the object file to provide the dladdr API, which is not + # part of AIX. It occurs several times in the jdk code base. + # Do not include it. When statically linking the java + # launcher with all JDK and VM static libraries, we use the + # --whole-archive linker option. The duplicate objects in different + # static libraries cause linking errors due to duplicate symbols. LIBAWT_STATIC_EXCLUDE_OBJS := porting_aix.o + + LIBAWT_CFLAGS += -I$(TOPDIR)/src/java.base/aix/native/include endif # -fgcse-after-reload improves performance of MaskFill in Java2D by 20% for @@ -423,6 +425,9 @@ endif ifeq ($(call isTargetOs, linux)+$(ENABLE_HEADLESS_ONLY), true+true) LIBJAWT_CFLAGS += -DHEADLESS endif +ifeq ($(call isTargetOs, aix)+$(ENABLE_HEADLESS_ONLY), true+true) + LIBJAWT_CFLAGS += -DHEADLESS +endif ifeq ($(call isTargetOs, windows)+$(call isTargetCpu, x86), true+true) LIBJAWT_LIBS_windows := kernel32.lib diff --git a/make/modules/java.desktop/lib/ClientLibraries.gmk b/make/modules/java.desktop/lib/ClientLibraries.gmk index b76cb8dc4e3..3e37fe79643 100644 --- a/make/modules/java.desktop/lib/ClientLibraries.gmk +++ b/make/modules/java.desktop/lib/ClientLibraries.gmk @@ -1,5 +1,5 @@ # -# Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -257,6 +257,7 @@ ifeq ($(ENABLE_HEADLESS_ONLY), false) DISABLED_WARNINGS_microsoft_dgif_lib.c := 4018 4267, \ DISABLED_WARNINGS_microsoft_splashscreen_impl.c := 4018 4267 4244, \ DISABLED_WARNINGS_microsoft_splashscreen_png.c := 4267, \ + DISABLED_WARNINGS_microsoft_pngread.c := 4146, \ DISABLED_WARNINGS_microsoft_splashscreen_sys.c := 4267 4244, \ LDFLAGS := $(ICONV_LDFLAGS), \ LDFLAGS_windows := -delayload:user32.dll, \ @@ -338,11 +339,8 @@ else # noexcept-type required for GCC 7 builds. Not required for GCC 8+. # expansion-to-defined required for GCC 9 builds. Not required for GCC 10+. # maybe-uninitialized required for GCC 8 builds. Not required for GCC 9+. - # calloc-transposed-args required for GCC 14 builds. (fixed upstream in - # Harfbuzz 032c931e1c0cfb20f18e5acb8ba005775242bd92) HARFBUZZ_DISABLED_WARNINGS_CXX_gcc := class-memaccess noexcept-type \ - expansion-to-defined dangling-reference maybe-uninitialized \ - calloc-transposed-args + expansion-to-defined dangling-reference maybe-uninitialized HARFBUZZ_DISABLED_WARNINGS_clang := missing-field-initializers \ range-loop-analysis unused-variable HARFBUZZ_DISABLED_WARNINGS_microsoft := 4267 4244 diff --git a/make/modules/jdk.hotspot.agent/Lib.gmk b/make/modules/jdk.hotspot.agent/Lib.gmk index ed8de631dc3..da02e0dab39 100644 --- a/make/modules/jdk.hotspot.agent/Lib.gmk +++ b/make/modules/jdk.hotspot.agent/Lib.gmk @@ -1,5 +1,5 @@ # -# Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -55,6 +55,12 @@ else LIBSAPROC_LINK_TYPE := C endif +# DWARF related sources would be included on supported platforms only. +LIBSAPROC_EXCLUDE_FILES := +ifneq ($(call And, $(call isTargetOs, linux) $(call isTargetCpu, x86_64 aarch64)), true) + LIBSAPROC_EXCLUDE_FILES := DwarfParser.cpp dwarf.cpp +endif + $(eval $(call SetupJdkLibrary, BUILD_LIBSAPROC, \ NAME := saproc, \ LINK_TYPE := $(LIBSAPROC_LINK_TYPE), \ @@ -70,6 +76,7 @@ $(eval $(call SetupJdkLibrary, BUILD_LIBSAPROC, \ CFLAGS := $(LIBSAPROC_CFLAGS), \ CXXFLAGS := $(LIBSAPROC_CFLAGS) $(LIBSAPROC_CXXFLAGS), \ EXTRA_SRC := $(LIBSAPROC_EXTRA_SRC), \ + EXCLUDE_FILES := $(LIBSAPROC_EXCLUDE_FILES), \ JDK_LIBS := java.base:libjava, \ LIBS_linux := $(LIBDL), \ LIBS_macosx := \ diff --git a/make/modules/jdk.jpackage/Java.gmk b/make/modules/jdk.jpackage/Java.gmk index da66fc14009..1fd4d527217 100644 --- a/make/modules/jdk.jpackage/Java.gmk +++ b/make/modules/jdk.jpackage/Java.gmk @@ -1,5 +1,5 @@ # -# Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -29,7 +29,7 @@ DISABLED_WARNINGS_java += dangling-doc-comments COPY += .gif .png .txt .spec .script .prerm .preinst \ .postrm .postinst .list .sh .desktop .copyright .control .plist .template \ - .icns .scpt .wxs .wxl .wxi .wxf .ico .bmp .tiff .service .xsl + .icns .scpt .wxs .wxl .wxi .wxf .ico .bmp .tiff .service .xsl .js CLEAN += .properties diff --git a/make/scripts/fixpath.sh b/make/scripts/fixpath.sh index 6a524df4c68..78690f1f2cc 100644 --- a/make/scripts/fixpath.sh +++ b/make/scripts/fixpath.sh @@ -88,7 +88,10 @@ function setup() { fi if [[ -z ${CMD+x} ]]; then - CMD="$DRIVEPREFIX/c/windows/system32/cmd.exe" + CMD="$(type -p cmd.exe 2>/dev/null)" + if [[ -z "$CMD" ]]; then + CMD="$DRIVEPREFIX/c/windows/system32/cmd.exe" + fi fi if [[ -z ${WINTEMP+x} ]]; then diff --git a/make/test/JtregNativeJdk.gmk b/make/test/JtregNativeJdk.gmk index 0482011f561..6774e708f99 100644 --- a/make/test/JtregNativeJdk.gmk +++ b/make/test/JtregNativeJdk.gmk @@ -1,5 +1,5 @@ # -# Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -63,7 +63,8 @@ ifeq ($(call isTargetOs, windows), true) BUILD_JDK_JTREG_EXCLUDE += libDirectIO.c libInheritedChannel.c \ libExplicitAttach.c libImplicitAttach.c \ exelauncher.c libFDLeaker.c exeFDLeakTester.c \ - libChangeSignalDisposition.c exePrintSignalDisposition.c + libChangeSignalDisposition.c exePrintSignalDisposition.c \ + libConcNativeFork.c libPipesCloseOnExec.c BUILD_JDK_JTREG_EXECUTABLES_LIBS_exeNullCallerTest := $(LIBCXX) BUILD_JDK_JTREG_EXECUTABLES_LIBS_exerevokeall := advapi32.lib @@ -77,6 +78,9 @@ else BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libLinkerInvokerUnnamed := -pthread BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libLinkerInvokerModule := -pthread BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libLoaderLookupInvoker := -pthread + BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libConcNativeFork := -pthread + BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libPipesCloseOnExec := -pthread + BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libLoaderLookupInvoker := -pthread BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libExplicitAttach := -pthread BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libImplicitAttach := -pthread diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad index 9734c6845ea..53fa4e3066c 100644 --- a/src/hotspot/cpu/aarch64/aarch64.ad +++ b/src/hotspot/cpu/aarch64/aarch64.ad @@ -1,5 +1,5 @@ // -// Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. +// Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved. // Copyright (c) 2014, 2024, Red Hat, Inc. All rights reserved. // Copyright 2025 Arm Limited and/or its affiliates. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -1182,12 +1182,12 @@ class CallStubImpl { public: // Size of call trampoline stub. static uint size_call_trampoline() { - return 0; // no call trampolines on this platform + return MacroAssembler::max_trampoline_stub_size(); } // number of relocations needed by a call trampoline stub static uint reloc_call_trampoline() { - return 0; // no call trampolines on this platform + return 5; // metadata; call dest; trampoline address; trampoline destination; trampoline_owner_metadata } }; @@ -2233,15 +2233,9 @@ uint BoxLockNode::size(PhaseRegAlloc *ra_) const { void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const { st->print_cr("# MachUEPNode"); - if (UseCompressedClassPointers) { - st->print_cr("\tldrw rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass"); - st->print_cr("\tldrw r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass"); - st->print_cr("\tcmpw rscratch1, r10"); - } else { - st->print_cr("\tldr rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass"); - st->print_cr("\tldr r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass"); - st->print_cr("\tcmp rscratch1, r10"); - } + st->print_cr("\tldrw rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass"); + st->print_cr("\tldrw r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass"); + st->print_cr("\tcmpw rscratch1, r10"); st->print_cr("\tbne, SharedRuntime::_ic_miss_stub"); } #endif @@ -2467,11 +2461,8 @@ bool Matcher::is_generic_vector(MachOper* opnd) { return opnd->opcode() == VREG; } +#ifdef ASSERT // Return whether or not this register is ever used as an argument. -// This function is used on startup to build the trampoline stubs in -// generateOptoStub. Registers not mentioned will be killed by the VM -// call in the trampoline, and arguments in those registers not be -// available to the callee. bool Matcher::can_be_java_arg(int reg) { return @@ -2492,11 +2483,7 @@ bool Matcher::can_be_java_arg(int reg) reg == V6_num || reg == V6_H_num || reg == V7_num || reg == V7_H_num; } - -bool Matcher::is_spillable_arg(int reg) -{ - return can_be_java_arg(reg); -} +#endif uint Matcher::int_pressure_limit() { @@ -2531,10 +2518,6 @@ uint Matcher::float_pressure_limit() return (FLOATPRESSURE == -1) ? _FLOAT_REG_mask.size() : FLOATPRESSURE; } -bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) { - return false; -} - const RegMask& Matcher::divI_proj_mask() { ShouldNotReachHere(); return RegMask::EMPTY; @@ -3814,11 +3797,6 @@ frame %{ // Compiled code's Frame Pointer frame_pointer(R31); - // Interpreter stores its frame pointer in a register which is - // stored to the stack by I2CAdaptors. - // I2CAdaptors convert from interpreted java to compiled java. - interpreter_frame_pointer(R29); - // Stack alignment requirement stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes) @@ -8036,6 +8014,21 @@ instruct membar_release_lock() %{ ins_pipe(pipe_serial); %} +instruct membar_storeload() %{ + match(MemBarStoreLoad); + ins_cost(VOLATILE_REF_COST*100); + + format %{ "MEMBAR-store-load\n\t" + "dmb ish" %} + + ins_encode %{ + __ block_comment("membar_storeload"); + __ membar(Assembler::StoreLoad); + %} + + ins_pipe(pipe_serial); +%} + instruct unnecessary_membar_volatile() %{ predicate(unnecessary_volatile(n)); match(MemBarVolatile); @@ -8065,6 +8058,20 @@ instruct membar_volatile() %{ ins_pipe(pipe_serial); %} +instruct membar_full() %{ + match(MemBarFull); + ins_cost(VOLATILE_REF_COST*100); + + format %{ "membar_full\n\t" + "dmb ish" %} + ins_encode %{ + __ block_comment("membar_full"); + __ membar(Assembler::AnyAny); + %} + + ins_pipe(pipe_serial); +%} + // ============================================================================ // Cast/Convert Instructions diff --git a/src/hotspot/cpu/aarch64/aarch64_vector.ad b/src/hotspot/cpu/aarch64/aarch64_vector.ad index 19f03d97a72..4c854913e63 100644 --- a/src/hotspot/cpu/aarch64/aarch64_vector.ad +++ b/src/hotspot/cpu/aarch64/aarch64_vector.ad @@ -1,6 +1,6 @@ // // Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved. -// Copyright (c) 2020, 2025, Arm Limited. All rights reserved. +// Copyright (c) 2020, 2026, Arm Limited. All rights reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -247,10 +247,39 @@ source %{ case Op_MinVHF: case Op_MaxVHF: case Op_SqrtVHF: + if (UseSVE == 0 && !is_feat_fp16_supported()) { + return false; + } + break; + // At the time of writing this, the Vector API has no half-float (FP16) species. + // Consequently, AddReductionVHF and MulReductionVHF are only produced by the + // auto-vectorizer, which requires strictly ordered semantics for FP reductions. + // + // There is no direct Neon instruction that performs strictly ordered floating + // point add reduction. Hence, on Neon only machines, the add reduction operation + // is implemented as a scalarized sequence using half-precision scalar instruction + // FADD which requires FEAT_FP16 and ASIMDHP to be available on the target. + // On SVE machines (UseSVE > 0) however, there is a direct instruction (FADDA) which + // implements strictly ordered floating point add reduction which does not require + // the FEAT_FP16 and ASIMDHP checks as SVE supports half-precision floats by default. + case Op_AddReductionVHF: // FEAT_FP16 is enabled if both "fphp" and "asimdhp" features are supported. // Only the Neon instructions need this check. SVE supports half-precision floats // by default. - if (UseSVE == 0 && !is_feat_fp16_supported()) { + if (length_in_bytes < 8 || (UseSVE == 0 && !is_feat_fp16_supported())) { + return false; + } + break; + case Op_MulReductionVHF: + // There are no direct Neon/SVE instructions that perform strictly ordered + // floating point multiply reduction. + // For vector length ≤ 16 bytes, the reduction is implemented as a scalarized + // sequence using half-precision scalar instruction FMUL. This path requires + // FEAT_FP16 and ASIMDHP to be available on the target. + // For vector length > 16 bytes, this operation is disabled because there is no + // direct SVE instruction that performs a strictly ordered FP16 multiply + // reduction. + if (length_in_bytes < 8 || length_in_bytes > 16 || !is_feat_fp16_supported()) { return false; } break; @@ -300,6 +329,7 @@ source %{ case Op_VectorRearrange: case Op_MulReductionVD: case Op_MulReductionVF: + case Op_MulReductionVHF: case Op_MulReductionVI: case Op_MulReductionVL: case Op_CompressBitsV: @@ -364,6 +394,7 @@ source %{ case Op_VectorMaskCmp: case Op_LoadVectorGather: case Op_StoreVectorScatter: + case Op_AddReductionVHF: case Op_AddReductionVF: case Op_AddReductionVD: case Op_AndReductionV: @@ -597,13 +628,9 @@ instruct vloadcon(vReg dst, immI0 src) %{ BasicType bt = Matcher::vector_element_basic_type(this); if (UseSVE == 0) { uint length_in_bytes = Matcher::vector_length_in_bytes(this); + int entry_idx = __ vector_iota_entry_index(bt); assert(length_in_bytes <= 16, "must be"); - // The iota indices are ordered by type B/S/I/L/F/D, and the offset between two types is 16. - int offset = exact_log2(type2aelembytes(bt)) << 4; - if (is_floating_point_type(bt)) { - offset += 32; - } - __ lea(rscratch1, ExternalAddress(StubRoutines::aarch64::vector_iota_indices() + offset)); + __ lea(rscratch1, ExternalAddress(StubRoutines::aarch64::vector_iota_indices(entry_idx))); if (length_in_bytes == 16) { __ ldrq($dst$$FloatRegister, rscratch1); } else { @@ -3406,6 +3433,44 @@ instruct reduce_non_strict_order_add4F_neon(vRegF dst, vRegF fsrc, vReg vsrc, vR ins_pipe(pipe_slow); %} +// Add Reduction for Half floats (FP16). +// Neon does not provide direct instructions for strictly ordered floating-point add reductions. +// On Neon-only targets (UseSVE = 0), this operation is implemented as a sequence of scalar additions: +// values equal to the vector width are loaded into a vector register, each lane is extracted, +// and its value is accumulated into the running sum, producing a final scalar result. +instruct reduce_addHF_neon(vRegF dst, vRegF fsrc, vReg vsrc, vReg tmp) %{ + predicate(UseSVE == 0); + match(Set dst (AddReductionVHF fsrc vsrc)); + effect(TEMP_DEF dst, TEMP tmp); + format %{ "reduce_addHF $dst, $fsrc, $vsrc\t# 4HF/8HF. KILL $tmp" %} + ins_encode %{ + uint length_in_bytes = Matcher::vector_length_in_bytes(this, $vsrc); + __ neon_reduce_add_fp16($dst$$FloatRegister, $fsrc$$FloatRegister, + $vsrc$$FloatRegister, length_in_bytes, $tmp$$FloatRegister); + %} + ins_pipe(pipe_slow); +%} + +// This rule calculates the reduction result in strict order. Two cases will +// reach here: +// 1. Non strictly-ordered AddReductionVHF when vector size > 128-bits. For example - +// AddReductionVHF generated by Vector API. For vector size > 128-bits, it is more +// beneficial performance-wise to generate direct SVE instruction even if it is +// strictly ordered. +// 2. Strictly-ordered AddReductionVHF. For example - AddReductionVHF generated by +// auto-vectorization on SVE machine. +instruct reduce_addHF_sve(vRegF dst_src1, vReg src2) %{ + predicate(UseSVE > 0); + match(Set dst_src1 (AddReductionVHF dst_src1 src2)); + format %{ "reduce_addHF_sve $dst_src1, $dst_src1, $src2" %} + ins_encode %{ + uint length_in_bytes = Matcher::vector_length_in_bytes(this, $src2); + assert(length_in_bytes == MaxVectorSize, "invalid vector length"); + __ sve_fadda($dst_src1$$FloatRegister, __ H, ptrue, $src2$$FloatRegister); + %} + ins_pipe(pipe_slow); +%} + // This rule calculates the reduction result in strict order. Two cases will // reach here: // 1. Non strictly-ordered AddReductionVF when vector size > 128-bits. For example - @@ -3496,12 +3561,14 @@ instruct reduce_addL_masked(iRegLNoSp dst, iRegL isrc, vReg vsrc, pRegGov pg, vR ins_pipe(pipe_slow); %} -instruct reduce_addF_masked(vRegF dst_src1, vReg src2, pRegGov pg) %{ +instruct reduce_addFHF_masked(vRegF dst_src1, vReg src2, pRegGov pg) %{ predicate(UseSVE > 0); + match(Set dst_src1 (AddReductionVHF (Binary dst_src1 src2) pg)); match(Set dst_src1 (AddReductionVF (Binary dst_src1 src2) pg)); - format %{ "reduce_addF_masked $dst_src1, $pg, $dst_src1, $src2" %} + format %{ "reduce_addFHF_masked $dst_src1, $pg, $dst_src1, $src2" %} ins_encode %{ - __ sve_fadda($dst_src1$$FloatRegister, __ S, + BasicType bt = Matcher::vector_element_basic_type(this, $src2); + __ sve_fadda($dst_src1$$FloatRegister, __ elemType_to_regVariant(bt), $pg$$PRegister, $src2$$FloatRegister); %} ins_pipe(pipe_slow); @@ -3549,14 +3616,17 @@ instruct reduce_mulL(iRegLNoSp dst, iRegL isrc, vReg vsrc) %{ ins_pipe(pipe_slow); %} -instruct reduce_mulF(vRegF dst, vRegF fsrc, vReg vsrc, vReg tmp) %{ + +instruct reduce_mulFHF(vRegF dst, vRegF fsrc, vReg vsrc, vReg tmp) %{ predicate(Matcher::vector_length_in_bytes(n->in(2)) <= 16); + match(Set dst (MulReductionVHF fsrc vsrc)); match(Set dst (MulReductionVF fsrc vsrc)); effect(TEMP_DEF dst, TEMP tmp); - format %{ "reduce_mulF $dst, $fsrc, $vsrc\t# 2F/4F. KILL $tmp" %} + format %{ "reduce_mulFHF $dst, $fsrc, $vsrc\t# 2F/4F/4HF/8HF. KILL $tmp" %} ins_encode %{ uint length_in_bytes = Matcher::vector_length_in_bytes(this, $vsrc); - __ neon_reduce_mul_fp($dst$$FloatRegister, T_FLOAT, $fsrc$$FloatRegister, + BasicType bt = Matcher::vector_element_basic_type(this, $vsrc); + __ neon_reduce_mul_fp($dst$$FloatRegister, bt, $fsrc$$FloatRegister, $vsrc$$FloatRegister, length_in_bytes, $tmp$$FloatRegister); %} ins_pipe(pipe_slow); diff --git a/src/hotspot/cpu/aarch64/aarch64_vector_ad.m4 b/src/hotspot/cpu/aarch64/aarch64_vector_ad.m4 index 48bffb3cf35..58ed234194a 100644 --- a/src/hotspot/cpu/aarch64/aarch64_vector_ad.m4 +++ b/src/hotspot/cpu/aarch64/aarch64_vector_ad.m4 @@ -1,6 +1,6 @@ // // Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved. -// Copyright (c) 2020, 2025, Arm Limited. All rights reserved. +// Copyright (c) 2020, 2026, Arm Limited. All rights reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -237,10 +237,39 @@ source %{ case Op_MinVHF: case Op_MaxVHF: case Op_SqrtVHF: + if (UseSVE == 0 && !is_feat_fp16_supported()) { + return false; + } + break; + // At the time of writing this, the Vector API has no half-float (FP16) species. + // Consequently, AddReductionVHF and MulReductionVHF are only produced by the + // auto-vectorizer, which requires strictly ordered semantics for FP reductions. + // + // There is no direct Neon instruction that performs strictly ordered floating + // point add reduction. Hence, on Neon only machines, the add reduction operation + // is implemented as a scalarized sequence using half-precision scalar instruction + // FADD which requires FEAT_FP16 and ASIMDHP to be available on the target. + // On SVE machines (UseSVE > 0) however, there is a direct instruction (FADDA) which + // implements strictly ordered floating point add reduction which does not require + // the FEAT_FP16 and ASIMDHP checks as SVE supports half-precision floats by default. + case Op_AddReductionVHF: // FEAT_FP16 is enabled if both "fphp" and "asimdhp" features are supported. // Only the Neon instructions need this check. SVE supports half-precision floats // by default. - if (UseSVE == 0 && !is_feat_fp16_supported()) { + if (length_in_bytes < 8 || (UseSVE == 0 && !is_feat_fp16_supported())) { + return false; + } + break; + case Op_MulReductionVHF: + // There are no direct Neon/SVE instructions that perform strictly ordered + // floating point multiply reduction. + // For vector length ≤ 16 bytes, the reduction is implemented as a scalarized + // sequence using half-precision scalar instruction FMUL. This path requires + // FEAT_FP16 and ASIMDHP to be available on the target. + // For vector length > 16 bytes, this operation is disabled because there is no + // direct SVE instruction that performs a strictly ordered FP16 multiply + // reduction. + if (length_in_bytes < 8 || length_in_bytes > 16 || !is_feat_fp16_supported()) { return false; } break; @@ -290,6 +319,7 @@ source %{ case Op_VectorRearrange: case Op_MulReductionVD: case Op_MulReductionVF: + case Op_MulReductionVHF: case Op_MulReductionVI: case Op_MulReductionVL: case Op_CompressBitsV: @@ -354,6 +384,7 @@ source %{ case Op_VectorMaskCmp: case Op_LoadVectorGather: case Op_StoreVectorScatter: + case Op_AddReductionVHF: case Op_AddReductionVF: case Op_AddReductionVD: case Op_AndReductionV: @@ -2063,6 +2094,25 @@ instruct reduce_non_strict_order_add4F_neon(vRegF dst, vRegF fsrc, vReg vsrc, vR ins_pipe(pipe_slow); %} dnl + +// Add Reduction for Half floats (FP16). +// Neon does not provide direct instructions for strictly ordered floating-point add reductions. +// On Neon-only targets (UseSVE = 0), this operation is implemented as a sequence of scalar additions: +// values equal to the vector width are loaded into a vector register, each lane is extracted, +// and its value is accumulated into the running sum, producing a final scalar result. +instruct reduce_addHF_neon(vRegF dst, vRegF fsrc, vReg vsrc, vReg tmp) %{ + predicate(UseSVE == 0); + match(Set dst (AddReductionVHF fsrc vsrc)); + effect(TEMP_DEF dst, TEMP tmp); + format %{ "reduce_addHF $dst, $fsrc, $vsrc\t# 4HF/8HF. KILL $tmp" %} + ins_encode %{ + uint length_in_bytes = Matcher::vector_length_in_bytes(this, $vsrc); + __ neon_reduce_add_fp16($dst$$FloatRegister, $fsrc$$FloatRegister, + $vsrc$$FloatRegister, length_in_bytes, $tmp$$FloatRegister); + %} + ins_pipe(pipe_slow); +%} +dnl dnl REDUCE_ADD_FP_SVE($1, $2 ) dnl REDUCE_ADD_FP_SVE(type, size) define(`REDUCE_ADD_FP_SVE', ` @@ -2074,21 +2124,26 @@ define(`REDUCE_ADD_FP_SVE', ` // strictly ordered. // 2. Strictly-ordered AddReductionV$1. For example - AddReductionV$1 generated by // auto-vectorization on SVE machine. -instruct reduce_add$1_sve(vReg$1 dst_src1, vReg src2) %{ - predicate(!VM_Version::use_neon_for_vector(Matcher::vector_length_in_bytes(n->in(2))) || - n->as_Reduction()->requires_strict_order()); +instruct reduce_add$1_sve(vReg`'ifelse($1, HF, F, $1) dst_src1, vReg src2) %{ + ifelse($1, HF, + `predicate(UseSVE > 0);', + `predicate(!VM_Version::use_neon_for_vector(Matcher::vector_length_in_bytes(n->in(2))) || + n->as_Reduction()->requires_strict_order());') match(Set dst_src1 (AddReductionV$1 dst_src1 src2)); format %{ "reduce_add$1_sve $dst_src1, $dst_src1, $src2" %} ins_encode %{ - assert(UseSVE > 0, "must be sve"); - uint length_in_bytes = Matcher::vector_length_in_bytes(this, $src2); + ifelse($1, HF, `', + `assert(UseSVE > 0, "must be sve"); + ')dnl +uint length_in_bytes = Matcher::vector_length_in_bytes(this, $src2); assert(length_in_bytes == MaxVectorSize, "invalid vector length"); __ sve_fadda($dst_src1$$FloatRegister, __ $2, ptrue, $src2$$FloatRegister); %} ins_pipe(pipe_slow); %}')dnl dnl -REDUCE_ADD_FP_SVE(F, S) +REDUCE_ADD_FP_SVE(HF, H) +REDUCE_ADD_FP_SVE(F, S) // reduction addD @@ -2129,21 +2184,30 @@ dnl dnl REDUCE_ADD_FP_PREDICATE($1, $2 ) dnl REDUCE_ADD_FP_PREDICATE(insn_name, op_name) define(`REDUCE_ADD_FP_PREDICATE', ` -instruct reduce_add$1_masked(vReg$1 dst_src1, vReg src2, pRegGov pg) %{ +instruct reduce_add$1_masked(vReg$2 dst_src1, vReg src2, pRegGov pg) %{ predicate(UseSVE > 0); - match(Set dst_src1 (AddReductionV$1 (Binary dst_src1 src2) pg)); + ifelse($2, F, + `match(Set dst_src1 (AddReductionVHF (Binary dst_src1 src2) pg)); + match(Set dst_src1 (AddReductionV$2 (Binary dst_src1 src2) pg));', + `match(Set dst_src1 (AddReductionV$2 (Binary dst_src1 src2) pg));') format %{ "reduce_add$1_masked $dst_src1, $pg, $dst_src1, $src2" %} ins_encode %{ - __ sve_fadda($dst_src1$$FloatRegister, __ $2, - $pg$$PRegister, $src2$$FloatRegister); + ifelse($2, F, + `BasicType bt = Matcher::vector_element_basic_type(this, $src2); + ',)dnl +ifelse($2, F, + `__ sve_fadda($dst_src1$$FloatRegister, __ elemType_to_regVariant(bt), + $pg$$PRegister, $src2$$FloatRegister);', + `__ sve_fadda($dst_src1$$FloatRegister, __ $2, + $pg$$PRegister, $src2$$FloatRegister);') %} ins_pipe(pipe_slow); %}')dnl dnl REDUCE_ADD_INT_PREDICATE(I, iRegIorL2I) REDUCE_ADD_INT_PREDICATE(L, iRegL) -REDUCE_ADD_FP_PREDICATE(F, S) -REDUCE_ADD_FP_PREDICATE(D, D) +REDUCE_ADD_FP_PREDICATE(FHF, F) +REDUCE_ADD_FP_PREDICATE(D, D) // ------------------------------ Vector reduction mul ------------------------- @@ -2176,30 +2240,37 @@ instruct reduce_mulL(iRegLNoSp dst, iRegL isrc, vReg vsrc) %{ ins_pipe(pipe_slow); %} -instruct reduce_mulF(vRegF dst, vRegF fsrc, vReg vsrc, vReg tmp) %{ - predicate(Matcher::vector_length_in_bytes(n->in(2)) <= 16); - match(Set dst (MulReductionVF fsrc vsrc)); +dnl REDUCE_MUL_FP($1, $2 ) +dnl REDUCE_MUL_FP(insn_name, op_name) +define(`REDUCE_MUL_FP', ` +instruct reduce_mul$1(vReg$2 dst, vReg$2 ifelse($2, F, fsrc, dsrc), vReg vsrc, vReg tmp) %{ + predicate(Matcher::vector_length_in_bytes(n->in(2)) ifelse($2, F, <=, ==) 16); + ifelse($2, F, + `match(Set dst (MulReductionVHF fsrc vsrc)); + match(Set dst (MulReductionV$2 fsrc vsrc));', + `match(Set dst (MulReductionV$2 dsrc vsrc));') effect(TEMP_DEF dst, TEMP tmp); - format %{ "reduce_mulF $dst, $fsrc, $vsrc\t# 2F/4F. KILL $tmp" %} + ifelse($2, F, + `format %{ "reduce_mul$1 $dst, $fsrc, $vsrc\t# 2F/4F/4HF/8HF. KILL $tmp" %}', + `format %{ "reduce_mul$1 $dst, $dsrc, $vsrc\t# 2D. KILL $tmp" %}') ins_encode %{ - uint length_in_bytes = Matcher::vector_length_in_bytes(this, $vsrc); - __ neon_reduce_mul_fp($dst$$FloatRegister, T_FLOAT, $fsrc$$FloatRegister, - $vsrc$$FloatRegister, length_in_bytes, $tmp$$FloatRegister); + ifelse($2, F, + `uint length_in_bytes = Matcher::vector_length_in_bytes(this, $vsrc); + ',)dnl +ifelse($2, F, + `BasicType bt = Matcher::vector_element_basic_type(this, $vsrc); + ',)dnl +ifelse($2, F, + `__ neon_reduce_mul_fp($dst$$FloatRegister, bt, $fsrc$$FloatRegister, + $vsrc$$FloatRegister, length_in_bytes, $tmp$$FloatRegister);', + `__ neon_reduce_mul_fp($dst$$FloatRegister, T_DOUBLE, $dsrc$$FloatRegister, + $vsrc$$FloatRegister, 16, $tmp$$FloatRegister);') %} ins_pipe(pipe_slow); -%} - -instruct reduce_mulD(vRegD dst, vRegD dsrc, vReg vsrc, vReg tmp) %{ - predicate(Matcher::vector_length_in_bytes(n->in(2)) == 16); - match(Set dst (MulReductionVD dsrc vsrc)); - effect(TEMP_DEF dst, TEMP tmp); - format %{ "reduce_mulD $dst, $dsrc, $vsrc\t# 2D. KILL $tmp" %} - ins_encode %{ - __ neon_reduce_mul_fp($dst$$FloatRegister, T_DOUBLE, $dsrc$$FloatRegister, - $vsrc$$FloatRegister, 16, $tmp$$FloatRegister); - %} - ins_pipe(pipe_slow); -%} +%}')dnl +dnl +REDUCE_MUL_FP(FHF, F) +REDUCE_MUL_FP(D, D) dnl dnl REDUCE_BITWISE_OP_NEON($1, $2 $3 $4 ) diff --git a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp index 19b3bb1a65b..4c1c8d9bbc8 100644 --- a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp @@ -1000,30 +1000,6 @@ public: f(0b0101010, 31, 25), f(0, 24), sf(offset, 23, 5), f(0, 4), f(cond, 3, 0); } -#define INSN(NAME, cond) \ - void NAME(address dest) { \ - br(cond, dest); \ - } - - INSN(beq, EQ); - INSN(bne, NE); - INSN(bhs, HS); - INSN(bcs, CS); - INSN(blo, LO); - INSN(bcc, CC); - INSN(bmi, MI); - INSN(bpl, PL); - INSN(bvs, VS); - INSN(bvc, VC); - INSN(bhi, HI); - INSN(bls, LS); - INSN(bge, GE); - INSN(blt, LT); - INSN(bgt, GT); - INSN(ble, LE); - INSN(bal, AL); - INSN(bnv, NV); - void br(Condition cc, Label &L); #undef INSN @@ -1095,6 +1071,10 @@ public: #undef INSN + void wfet(Register rt) { + system(0b00, 0b011, 0b0001, 0b0000, 0b000, rt); + } + // we only provide mrs and msr for the special purpose system // registers where op1 (instr[20:19]) == 11 // n.b msr has L (instr[21]) == 0 mrs has L == 1 @@ -3814,8 +3794,8 @@ public: } private: - void sve_cpy(FloatRegister Zd, SIMD_RegVariant T, PRegister Pg, int imm8, - bool isMerge, bool isFloat) { + void _sve_cpy(FloatRegister Zd, SIMD_RegVariant T, PRegister Pg, int imm8, + bool isMerge, bool isFloat) { starti; assert(T != Q, "invalid size"); int sh = 0; @@ -3839,11 +3819,11 @@ private: public: // SVE copy signed integer immediate to vector elements (predicated) void sve_cpy(FloatRegister Zd, SIMD_RegVariant T, PRegister Pg, int imm8, bool isMerge) { - sve_cpy(Zd, T, Pg, imm8, isMerge, /*isFloat*/false); + _sve_cpy(Zd, T, Pg, imm8, isMerge, /*isFloat*/false); } // SVE copy floating-point immediate to vector elements (predicated) void sve_cpy(FloatRegister Zd, SIMD_RegVariant T, PRegister Pg, double d) { - sve_cpy(Zd, T, Pg, checked_cast(pack(d)), /*isMerge*/true, /*isFloat*/true); + _sve_cpy(Zd, T, Pg, checked_cast(pack(d)), /*isMerge*/true, /*isFloat*/true); } // SVE conditionally select elements from two vectors diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp index 30048a2079d..4de6237304d 100644 --- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -42,6 +42,7 @@ #include "runtime/frame.inline.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" +#include "runtime/threadIdentifier.hpp" #include "utilities/powerOfTwo.hpp" #include "vmreg_aarch64.inline.hpp" @@ -59,22 +60,6 @@ const Register SHIFT_count = r0; // where count for shift operations must be #define __ _masm-> -static void select_different_registers(Register preserve, - Register extra, - Register &tmp1, - Register &tmp2) { - if (tmp1 == preserve) { - assert_different_registers(tmp1, tmp2, extra); - tmp1 = extra; - } else if (tmp2 == preserve) { - assert_different_registers(tmp1, tmp2, extra); - tmp2 = extra; - } - assert_different_registers(preserve, tmp1, tmp2); -} - - - static void select_different_registers(Register preserve, Register extra, Register &tmp1, @@ -536,6 +521,10 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod #if INCLUDE_CDS if (AOTCodeCache::is_on_for_dump()) { address b = c->as_pointer(); + if (b == (address)ThreadIdentifier::unsafe_offset()) { + __ lea(dest->as_register_lo(), ExternalAddress(b)); + break; + } if (AOTRuntimeConstants::contains(b)) { __ load_aotrc_address(dest->as_register_lo(), b); break; @@ -1269,12 +1258,9 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L } else if (obj == klass_RInfo) { klass_RInfo = dst; } - if (k->is_loaded() && !UseCompressedClassPointers) { - select_different_registers(obj, dst, k_RInfo, klass_RInfo); - } else { - Rtmp1 = op->tmp3()->as_register(); - select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1); - } + + Rtmp1 = op->tmp3()->as_register(); + select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1); assert_different_registers(obj, k_RInfo, klass_RInfo); diff --git a/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp index ad26d494b2d..f10c5197d91 100644 --- a/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -1287,9 +1287,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) { } LIR_Opr reg = rlock_result(x); LIR_Opr tmp3 = LIR_OprFact::illegalOpr; - if (!x->klass()->is_loaded() || UseCompressedClassPointers) { - tmp3 = new_register(objectType); - } + tmp3 = new_register(objectType); __ checkcast(reg, obj.result(), x->klass(), new_register(objectType), new_register(objectType), tmp3, x->direct_compare(), info_for_exception, patching_info, stub, @@ -1308,9 +1306,7 @@ void LIRGenerator::do_InstanceOf(InstanceOf* x) { } obj.load_item(); LIR_Opr tmp3 = LIR_OprFact::illegalOpr; - if (!x->klass()->is_loaded() || UseCompressedClassPointers) { - tmp3 = new_register(objectType); - } + tmp3 = new_register(objectType); __ instanceof(reg, obj.result(), x->klass(), new_register(objectType), new_register(objectType), tmp3, x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci()); diff --git a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp index e934632715c..89a9422ea48 100644 --- a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -105,12 +105,8 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register } else { mov(t1, checked_cast(markWord::prototype().value())); str(t1, Address(obj, oopDesc::mark_offset_in_bytes())); - if (UseCompressedClassPointers) { // Take care not to kill klass - encode_klass_not_null(t1, klass); - strw(t1, Address(obj, oopDesc::klass_offset_in_bytes())); - } else { - str(klass, Address(obj, oopDesc::klass_offset_in_bytes())); - } + encode_klass_not_null(t1, klass); // Take care not to kill klass + strw(t1, Address(obj, oopDesc::klass_offset_in_bytes())); } if (len->is_valid()) { @@ -121,7 +117,7 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register // Clear gap/first 4 bytes following the length field. strw(zr, Address(obj, base_offset)); } - } else if (UseCompressedClassPointers && !UseCompactObjectHeaders) { + } else if (!UseCompactObjectHeaders) { store_klass_gap(obj, zr); } } diff --git a/src/hotspot/cpu/aarch64/c1_globals_aarch64.hpp b/src/hotspot/cpu/aarch64/c1_globals_aarch64.hpp index 938a64dd399..bb6b3ce907e 100644 --- a/src/hotspot/cpu/aarch64/c1_globals_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/c1_globals_aarch64.hpp @@ -42,7 +42,6 @@ define_pd_global(bool, TieredCompilation, false); define_pd_global(intx, CompileThreshold, 1500 ); define_pd_global(intx, OnStackReplacePercentage, 933 ); -define_pd_global(intx, NewSizeThreadIncrease, 4*K ); define_pd_global(size_t, InitialCodeCacheSize, 160*K); define_pd_global(size_t, ReservedCodeCacheSize, 32*M ); define_pd_global(size_t, NonProfiledCodeHeapSize, 13*M ); @@ -52,7 +51,6 @@ define_pd_global(bool, ProfileInterpreter, false); define_pd_global(size_t, CodeCacheExpansionSize, 32*K ); define_pd_global(size_t, CodeCacheMinBlockLength, 1); define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K); -define_pd_global(bool, NeverActAsServerClassMachine, true ); define_pd_global(bool, CICompileOSR, true ); #endif // !COMPILER2 define_pd_global(bool, UseTypeProfile, false); diff --git a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp index dc0b9eb9546..3c179f21c14 100644 --- a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved. + * Copyright 2026 Arm Limited and/or its affiliates. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1883,6 +1884,27 @@ void C2_MacroAssembler::neon_reduce_mul_fp(FloatRegister dst, BasicType bt, BLOCK_COMMENT("neon_reduce_mul_fp {"); switch(bt) { + // The T_SHORT type below is for Float16 type which also uses floating-point + // instructions. + case T_SHORT: + fmulh(dst, fsrc, vsrc); + ext(vtmp, T8B, vsrc, vsrc, 2); + fmulh(dst, dst, vtmp); + ext(vtmp, T8B, vsrc, vsrc, 4); + fmulh(dst, dst, vtmp); + ext(vtmp, T8B, vsrc, vsrc, 6); + fmulh(dst, dst, vtmp); + if (isQ) { + ext(vtmp, T16B, vsrc, vsrc, 8); + fmulh(dst, dst, vtmp); + ext(vtmp, T16B, vsrc, vsrc, 10); + fmulh(dst, dst, vtmp); + ext(vtmp, T16B, vsrc, vsrc, 12); + fmulh(dst, dst, vtmp); + ext(vtmp, T16B, vsrc, vsrc, 14); + fmulh(dst, dst, vtmp); + } + break; case T_FLOAT: fmuls(dst, fsrc, vsrc); ins(vtmp, S, vsrc, 0, 1); @@ -1907,6 +1929,33 @@ void C2_MacroAssembler::neon_reduce_mul_fp(FloatRegister dst, BasicType bt, BLOCK_COMMENT("} neon_reduce_mul_fp"); } +// Vector reduction add for half float type with ASIMD instructions. +void C2_MacroAssembler::neon_reduce_add_fp16(FloatRegister dst, FloatRegister fsrc, FloatRegister vsrc, + unsigned vector_length_in_bytes, FloatRegister vtmp) { + assert(vector_length_in_bytes == 8 || vector_length_in_bytes == 16, "unsupported"); + bool isQ = vector_length_in_bytes == 16; + + BLOCK_COMMENT("neon_reduce_add_fp16 {"); + faddh(dst, fsrc, vsrc); + ext(vtmp, T8B, vsrc, vsrc, 2); + faddh(dst, dst, vtmp); + ext(vtmp, T8B, vsrc, vsrc, 4); + faddh(dst, dst, vtmp); + ext(vtmp, T8B, vsrc, vsrc, 6); + faddh(dst, dst, vtmp); + if (isQ) { + ext(vtmp, T16B, vsrc, vsrc, 8); + faddh(dst, dst, vtmp); + ext(vtmp, T16B, vsrc, vsrc, 10); + faddh(dst, dst, vtmp); + ext(vtmp, T16B, vsrc, vsrc, 12); + faddh(dst, dst, vtmp); + ext(vtmp, T16B, vsrc, vsrc, 14); + faddh(dst, dst, vtmp); + } + BLOCK_COMMENT("} neon_reduce_add_fp16"); +} + // Helper to select logical instruction void C2_MacroAssembler::neon_reduce_logical_helper(int opc, bool is64, Register Rd, Register Rn, Register Rm, @@ -2414,17 +2463,17 @@ void C2_MacroAssembler::neon_rearrange_hsd(FloatRegister dst, FloatRegister src, break; case T_LONG: case T_DOUBLE: - // Load the iota indices for Long type. The indices are ordered by - // type B/S/I/L/F/D, and the offset between two types is 16; Hence - // the offset for L is 48. - lea(rscratch1, - ExternalAddress(StubRoutines::aarch64::vector_iota_indices() + 48)); - ldrq(tmp, rscratch1); - // Check whether the input "shuffle" is the same with iota indices. - // Return "src" if true, otherwise swap the two elements of "src". - cm(EQ, dst, size2, shuffle, tmp); - ext(tmp, size1, src, src, 8); - bsl(dst, size1, src, tmp); + { + int idx = vector_iota_entry_index(T_LONG); + lea(rscratch1, + ExternalAddress(StubRoutines::aarch64::vector_iota_indices(idx))); + ldrq(tmp, rscratch1); + // Check whether the input "shuffle" is the same with iota indices. + // Return "src" if true, otherwise swap the two elements of "src". + cm(EQ, dst, size2, shuffle, tmp); + ext(tmp, size1, src, src, 8); + bsl(dst, size1, src, tmp); + } break; default: assert(false, "unsupported element type"); @@ -2875,3 +2924,45 @@ void C2_MacroAssembler::vector_expand_sve(FloatRegister dst, FloatRegister src, // dst = 00 87 00 65 00 43 00 21 sve_tbl(dst, size, src, dst); } + +// Optimized SVE cpy (imm, zeroing) instruction. +// +// `movi; cpy(imm, merging)` and `cpy(imm, zeroing)` have the same +// functionality, but test results show that `movi; cpy(imm, merging)` has +// higher throughput on some microarchitectures. This would depend on +// microarchitecture and so may vary between implementations. +void C2_MacroAssembler::sve_cpy(FloatRegister dst, SIMD_RegVariant T, + PRegister pg, int imm8, bool isMerge) { + if (VM_Version::prefer_sve_merging_mode_cpy() && !isMerge) { + // Generates a NEON instruction `movi V.2d, #0`. + // On AArch64, Z and V registers alias in the low 128 bits, so V is + // the low 128 bits of Z. A write to V also clears all bits of + // Z above 128, so this `movi` instruction effectively zeroes the + // entire Z register. According to the Arm Software Optimization + // Guide, `movi` is zero latency. + movi(dst, T2D, 0); + isMerge = true; + } + Assembler::sve_cpy(dst, T, pg, imm8, isMerge); +} + +int C2_MacroAssembler::vector_iota_entry_index(BasicType bt) { + // The vector iota entries array is ordered by type B/S/I/L/F/D, and + // the offset between two types is 16. + switch(bt) { + case T_BYTE: + return 0; + case T_SHORT: + return 1; + case T_INT: + return 2; + case T_LONG: + return 3; + case T_FLOAT: + return 4; + case T_DOUBLE: + return 5; + default: + ShouldNotReachHere(); + } +} diff --git a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp index 4f3a41da402..f96d3ffb863 100644 --- a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp @@ -75,6 +75,8 @@ unsigned vector_length_in_bytes); public: + using Assembler::sve_cpy; + // jdk.internal.util.ArraysSupport.vectorizedHashCode address arrays_hashcode(Register ary, Register cnt, Register result, FloatRegister vdata0, FloatRegister vdata1, FloatRegister vdata2, FloatRegister vdata3, @@ -175,6 +177,9 @@ FloatRegister fsrc, FloatRegister vsrc, unsigned vector_length_in_bytes, FloatRegister vtmp); + void neon_reduce_add_fp16(FloatRegister dst, FloatRegister fsrc, FloatRegister vsrc, + unsigned vector_length_in_bytes, FloatRegister vtmp); + void neon_reduce_logical(int opc, Register dst, BasicType bt, Register isrc, FloatRegister vsrc, unsigned vector_length_in_bytes); @@ -244,4 +249,8 @@ void vector_expand_sve(FloatRegister dst, FloatRegister src, PRegister pg, FloatRegister tmp1, FloatRegister tmp2, BasicType bt, int vector_length_in_bytes); + + void sve_cpy(FloatRegister dst, SIMD_RegVariant T, PRegister pg, int imm8, + bool isMerge); + int vector_iota_entry_index(BasicType bt); #endif // CPU_AARCH64_C2_MACROASSEMBLER_AARCH64_HPP diff --git a/src/hotspot/cpu/aarch64/c2_globals_aarch64.hpp b/src/hotspot/cpu/aarch64/c2_globals_aarch64.hpp index a0dea3643a1..192461d1a61 100644 --- a/src/hotspot/cpu/aarch64/c2_globals_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/c2_globals_aarch64.hpp @@ -47,7 +47,6 @@ define_pd_global(intx, ConditionalMoveLimit, 3); define_pd_global(intx, FreqInlineSize, 325); define_pd_global(intx, MinJumpTableSize, 10); define_pd_global(intx, InteriorEntryAlignment, 16); -define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K)); define_pd_global(intx, LoopUnrollLimit, 60); define_pd_global(intx, LoopPercentProfileLimit, 10); // InitialCodeCacheSize derived from specjbb2000 run. @@ -75,9 +74,6 @@ define_pd_global(size_t, NonNMethodCodeHeapSize, 5*M ); define_pd_global(size_t, CodeCacheMinBlockLength, 6); define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K); -// Ergonomics related flags -define_pd_global(bool, NeverActAsServerClassMachine, false); - define_pd_global(bool, TrapBasedRangeChecks, false); // Not needed. #endif // CPU_AARCH64_C2_GLOBALS_AARCH64_HPP diff --git a/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp b/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp index 6fe3315014b..640cd495383 100644 --- a/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp @@ -89,16 +89,21 @@ void CompiledDirectCall::set_to_interpreted(const methodHandle& callee, address NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + NativeInstruction::instruction_size); + // In AOT "production" run we have mixture of AOTed and normal JITed code. + // Static call stub in AOTed nmethod always has far jump. + // Normal JITed nmethod may have short or far jump depending on distance. + // Determine actual jump instruction we have in code. + address next_instr = method_holder->next_instruction_address(); + bool is_general_jump = nativeInstruction_at(next_instr)->is_general_jump(); + #ifdef ASSERT - NativeJump* jump = MacroAssembler::codestub_branch_needs_far_jump() - ? nativeGeneralJump_at(method_holder->next_instruction_address()) - : nativeJump_at(method_holder->next_instruction_address()); + NativeJump* jump = is_general_jump ? nativeGeneralJump_at(next_instr) : nativeJump_at(next_instr); verify_mt_safe(callee, entry, method_holder, jump); #endif // Update stub. method_holder->set_data((intptr_t)callee()); - MacroAssembler::pd_patch_instruction(method_holder->next_instruction_address(), entry); + MacroAssembler::pd_patch_instruction(next_instr, entry); ICache::invalidate_range(stub, to_interp_stub_size()); // Update jump to call. set_destination_mt_safe(stub); diff --git a/src/hotspot/cpu/aarch64/downcallLinker_aarch64.cpp b/src/hotspot/cpu/aarch64/downcallLinker_aarch64.cpp index 65d448f908c..130d2949800 100644 --- a/src/hotspot/cpu/aarch64/downcallLinker_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/downcallLinker_aarch64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2019, Arm Limited. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -146,10 +146,10 @@ void DowncallLinker::StubGenerator::generate() { bool should_save_return_value = !_needs_return_buffer; RegSpiller out_reg_spiller(_output_registers); - int spill_offset = -1; + int out_spill_offset = -1; if (should_save_return_value) { - spill_offset = 0; + out_spill_offset = 0; // spill area can be shared with shadow space and out args, // since they are only used before the call, // and spill area is only used after. @@ -174,6 +174,9 @@ void DowncallLinker::StubGenerator::generate() { // FP-> | | // |---------------------| = frame_bottom_offset = frame_size // | (optional) | + // | in_reg_spiller area | + // |---------------------| + // | (optional) | // | capture state buf | // |---------------------| = StubLocations::CAPTURED_STATE_BUFFER // | (optional) | @@ -187,6 +190,19 @@ void DowncallLinker::StubGenerator::generate() { GrowableArray out_regs = ForeignGlobals::replace_place_holders(_input_registers, locs); ArgumentShuffle arg_shuffle(filtered_java_regs, out_regs, shuffle_reg); + // Need to spill for state capturing runtime call. + // The area spilled into is distinct from the capture state buffer. + RegSpiller in_reg_spiller(out_regs); + int in_spill_offset = -1; + if (_captured_state_mask != 0) { + // The spill area cannot be shared with the out_spill since + // spilling needs to happen before the call. Allocate a new + // region in the stack for this spill space. + in_spill_offset = allocated_frame_size; + allocated_frame_size += in_reg_spiller.spill_size_bytes(); + } + + #ifndef PRODUCT LogTarget(Trace, foreign, downcall) lt; if (lt.is_enabled()) { @@ -228,6 +244,20 @@ void DowncallLinker::StubGenerator::generate() { arg_shuffle.generate(_masm, shuffle_reg, 0, _abi._shadow_space_bytes); __ block_comment("} argument shuffle"); + if (_captured_state_mask != 0) { + assert(in_spill_offset != -1, "must be"); + __ block_comment("{ load initial thread local"); + in_reg_spiller.generate_spill(_masm, in_spill_offset); + + // Copy the contents of the capture state buffer into thread local + __ ldr(c_rarg0, Address(sp, locs.data_offset(StubLocations::CAPTURED_STATE_BUFFER))); + __ movw(c_rarg1, _captured_state_mask); + __ rt_call(CAST_FROM_FN_PTR(address, DowncallLinker::capture_state_pre), tmp1); + + in_reg_spiller.generate_fill(_masm, in_spill_offset); + __ block_comment("} load initial thread local"); + } + __ blr(as_Register(locs.get(StubLocations::TARGET_ADDRESS))); // this call is assumed not to have killed rthread @@ -254,15 +284,15 @@ void DowncallLinker::StubGenerator::generate() { __ block_comment("{ save thread local"); if (should_save_return_value) { - out_reg_spiller.generate_spill(_masm, spill_offset); + out_reg_spiller.generate_spill(_masm, out_spill_offset); } __ ldr(c_rarg0, Address(sp, locs.data_offset(StubLocations::CAPTURED_STATE_BUFFER))); __ movw(c_rarg1, _captured_state_mask); - __ rt_call(CAST_FROM_FN_PTR(address, DowncallLinker::capture_state), tmp1); + __ rt_call(CAST_FROM_FN_PTR(address, DowncallLinker::capture_state_post), tmp1); if (should_save_return_value) { - out_reg_spiller.generate_fill(_masm, spill_offset); + out_reg_spiller.generate_fill(_masm, out_spill_offset); } __ block_comment("} save thread local"); @@ -321,7 +351,7 @@ void DowncallLinker::StubGenerator::generate() { if (should_save_return_value) { // Need to save the native result registers around any runtime calls. - out_reg_spiller.generate_spill(_masm, spill_offset); + out_reg_spiller.generate_spill(_masm, out_spill_offset); } __ mov(c_rarg0, rthread); @@ -330,7 +360,7 @@ void DowncallLinker::StubGenerator::generate() { __ blr(tmp1); if (should_save_return_value) { - out_reg_spiller.generate_fill(_masm, spill_offset); + out_reg_spiller.generate_fill(_masm, out_spill_offset); } __ b(L_after_safepoint_poll); @@ -342,13 +372,13 @@ void DowncallLinker::StubGenerator::generate() { __ bind(L_reguard); if (should_save_return_value) { - out_reg_spiller.generate_spill(_masm, spill_offset); + out_reg_spiller.generate_spill(_masm, out_spill_offset); } __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), tmp1); if (should_save_return_value) { - out_reg_spiller.generate_fill(_masm, spill_offset); + out_reg_spiller.generate_fill(_masm, out_spill_offset); } __ b(L_after_reguard); diff --git a/src/hotspot/cpu/aarch64/frame_aarch64.inline.hpp b/src/hotspot/cpu/aarch64/frame_aarch64.inline.hpp index cb53d8663ad..748ab0e0e2b 100644 --- a/src/hotspot/cpu/aarch64/frame_aarch64.inline.hpp +++ b/src/hotspot/cpu/aarch64/frame_aarch64.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -245,8 +245,8 @@ inline bool frame::equal(frame other) const { // Return unique id for this frame. The id must have a value where we can distinguish // identity and younger/older relationship. null represents an invalid (incomparable) -// frame. -inline intptr_t* frame::id(void) const { return unextended_sp(); } +// frame. Should not be called for heap frames. +inline intptr_t* frame::id(void) const { return real_fp(); } // Return true if the frame is older (less recent activation) than the frame represented by id inline bool frame::is_older(intptr_t* id) const { assert(this->id() != nullptr && id != nullptr, "null frame id"); @@ -412,6 +412,9 @@ inline frame frame::sender(RegisterMap* map) const { StackWatermarkSet::on_iteration(map->thread(), result); } + // Calling frame::id() is currently not supported for heap frames. + assert(result._on_heap || this->_on_heap || result.is_older(this->id()), "Must be"); + return result; } diff --git a/src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.cpp index 0bfc320179d..7ce4e0f8aed 100644 --- a/src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -56,8 +56,10 @@ void CardTableBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet d } } -void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj, Address dst) { - +void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2) { + precond(tmp1 != noreg); + precond(tmp2 != noreg); + assert_different_registers(obj, tmp1, tmp2); BarrierSet* bs = BarrierSet::barrier_set(); assert(bs->kind() == BarrierSet::CardTableBarrierSet, "Wrong barrier set kind"); @@ -65,16 +67,16 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register ob assert(CardTable::dirty_card_val() == 0, "must be"); - __ load_byte_map_base(rscratch1); + __ load_byte_map_base(tmp1); if (UseCondCardMark) { Label L_already_dirty; - __ ldrb(rscratch2, Address(obj, rscratch1)); - __ cbz(rscratch2, L_already_dirty); - __ strb(zr, Address(obj, rscratch1)); + __ ldrb(tmp2, Address(obj, tmp1)); + __ cbz(tmp2, L_already_dirty); + __ strb(zr, Address(obj, tmp1)); __ bind(L_already_dirty); } else { - __ strb(zr, Address(obj, rscratch1)); + __ strb(zr, Address(obj, tmp1)); } } @@ -112,10 +114,10 @@ void CardTableBarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorS if (needs_post_barrier) { // flatten object address if needed if (!precise || (dst.index() == noreg && dst.offset() == 0)) { - store_check(masm, dst.base(), dst); + store_check(masm, dst.base(), tmp1, tmp2); } else { __ lea(tmp3, dst); - store_check(masm, tmp3, dst); + store_check(masm, tmp3, tmp1, tmp2); } } } diff --git a/src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.hpp index 07dd8eb5565..07016381f78 100644 --- a/src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,7 +46,7 @@ protected: virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Address dst, Register val, Register tmp1, Register tmp2, Register tmp3); - void store_check(MacroAssembler* masm, Register obj, Address dst); + void store_check(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2); }; #endif // CPU_AARCH64_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_AARCH64_HPP diff --git a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp index 4f0977a414f..f0885fee93d 100644 --- a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp @@ -879,7 +879,9 @@ void ZBarrierSetAssembler::patch_barrier_relocation(address addr, int format) { ShouldNotReachHere(); } - ICache::invalidate_word((address)patch_addr); + if (!UseSingleICacheInvalidation) { + ICache::invalidate_word((address)patch_addr); + } } #ifdef COMPILER1 diff --git a/src/hotspot/cpu/aarch64/globals_aarch64.hpp b/src/hotspot/cpu/aarch64/globals_aarch64.hpp index a59e83c4b69..dfeba73bede 100644 --- a/src/hotspot/cpu/aarch64/globals_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/globals_aarch64.hpp @@ -95,7 +95,7 @@ define_pd_global(intx, InlineSmallCode, 1000); "Use simplest and shortest implementation for array equals") \ product(bool, UseSIMDForBigIntegerShiftIntrinsics, true, \ "Use SIMD instructions for left/right shift of BigInteger") \ - product(bool, UseSIMDForSHA3Intrinsic, true, \ + product(bool, UseSIMDForSHA3Intrinsic, false, \ "Use SIMD SHA3 instructions for SHA3 intrinsic") \ product(bool, AvoidUnalignedAccesses, false, \ "Avoid generating unaligned memory accesses") \ @@ -115,18 +115,26 @@ define_pd_global(intx, InlineSmallCode, 1000); "Value -1 means off.") \ range(-1, 4096) \ product(ccstr, OnSpinWaitInst, "yield", DIAGNOSTIC, \ - "The instruction to use to implement " \ - "java.lang.Thread.onSpinWait()." \ - "Valid values are: none, nop, isb, yield, sb.") \ + "The instruction to use for java.lang.Thread.onSpinWait(). " \ + "Valid values are: none, nop, isb, yield, sb, wfet.") \ constraint(OnSpinWaitInstNameConstraintFunc, AtParse) \ product(uint, OnSpinWaitInstCount, 1, DIAGNOSTIC, \ - "The number of OnSpinWaitInst instructions to generate." \ - "It cannot be used with OnSpinWaitInst=none.") \ + "The number of OnSpinWaitInst instructions to generate. " \ + "It cannot be used with OnSpinWaitInst=none. " \ + "For OnSpinWaitInst=wfet it must be 1.") \ range(1, 99) \ + product(uint, OnSpinWaitDelay, 40, DIAGNOSTIC, \ + "The minimum delay (in nanoseconds) of the OnSpinWait loop. " \ + "It can only be used with -XX:OnSpinWaitInst=wfet.") \ + range(1, 1000) \ product(ccstr, UseBranchProtection, "none", \ "Branch Protection to use: none, standard, pac-ret") \ product(bool, AlwaysMergeDMB, true, DIAGNOSTIC, \ "Always merge DMB instructions in code emission") \ + product(bool, NeoverseN1ICacheErratumMitigation, false, DIAGNOSTIC, \ + "Enable workaround for Neoverse N1 erratum 1542419") \ + product(bool, UseSingleICacheInvalidation, false, DIAGNOSTIC, \ + "Defer multiple ICache invalidation to single invalidation") \ // end of ARCH_FLAGS diff --git a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp index 2b506b241e0..980fedb406d 100644 --- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -989,26 +989,15 @@ void InterpreterMacroAssembler::profile_final_call(Register mdp) { void InterpreterMacroAssembler::profile_virtual_call(Register receiver, - Register mdp, - bool receiver_can_be_null) { + Register mdp) { if (ProfileInterpreter) { Label profile_continue; // If no method data exists, go to profile_continue. test_method_data_pointer(mdp, profile_continue); - Label skip_receiver_profile; - if (receiver_can_be_null) { - Label not_null; - // We are making a call. Increment the count for null receiver. - increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); - b(skip_receiver_profile); - bind(not_null); - } - // Record the receiver type. profile_receiver_type(receiver, mdp, 0); - bind(skip_receiver_profile); // The method data pointer needs to be updated to reflect the new target. update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size())); diff --git a/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp b/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp index 74d4430000d..9a074f1ce69 100644 --- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -285,8 +285,7 @@ class InterpreterMacroAssembler: public MacroAssembler { void profile_not_taken_branch(Register mdp); void profile_call(Register mdp); void profile_final_call(Register mdp); - void profile_virtual_call(Register receiver, Register mdp, - bool receiver_can_be_null = false); + void profile_virtual_call(Register receiver, Register mdp); void profile_ret(Register return_bci, Register mdp); void profile_null_seen(Register mdp); void profile_typecheck(Register mdp, Register klass); diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp index 3e3e95be07e..7bec0a3c0ca 100644 --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp @@ -55,6 +55,7 @@ #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" #include "utilities/globalDefinitions.hpp" +#include "utilities/integerCast.hpp" #include "utilities/powerOfTwo.hpp" #ifdef COMPILER1 #include "c1/c1_LIRAssembler.hpp" @@ -762,7 +763,7 @@ void MacroAssembler::call_VM_base(Register oop_result, assert(java_thread == rthread, "unexpected register"); #ifdef ASSERT // TraceBytecodes does not use r12 but saves it over the call, so don't verify - // if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?"); + // if (!TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?"); #endif // ASSERT assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); @@ -952,7 +953,10 @@ void MacroAssembler::emit_static_call_stub() { } int MacroAssembler::static_call_stub_size() { - if (!codestub_branch_needs_far_jump()) { + // During AOT production run AOT and JIT compiled code + // are used at the same time. We need this size + // to be the same for both types of code. + if (!codestub_branch_needs_far_jump() && !AOTCodeCache::is_on_for_use()) { // isb; movk; movz; movz; b return 5 * NativeInstruction::instruction_size; } @@ -1002,14 +1006,10 @@ int MacroAssembler::ic_check(int end_alignment) { load_narrow_klass_compact(tmp1, receiver); ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset())); cmpw(tmp1, tmp2); - } else if (UseCompressedClassPointers) { + } else { ldrw(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes())); ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset())); cmpw(tmp1, tmp2); - } else { - ldr(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes())); - ldr(tmp2, Address(data, CompiledICData::speculated_klass_offset())); - cmp(tmp1, tmp2); } Label dont; @@ -2917,7 +2917,11 @@ void MacroAssembler::increment(Address dst, int value) // Push lots of registers in the bit set supplied. Don't push sp. // Return the number of words pushed -int MacroAssembler::push(unsigned int bitset, Register stack) { +int MacroAssembler::push(RegSet regset, Register stack) { + if (regset.bits() == 0) { + return 0; + } + auto bitset = integer_cast(regset.bits()); int words_pushed = 0; // Scan bitset to accumulate register pairs @@ -2947,7 +2951,11 @@ int MacroAssembler::push(unsigned int bitset, Register stack) { return count; } -int MacroAssembler::pop(unsigned int bitset, Register stack) { +int MacroAssembler::pop(RegSet regset, Register stack) { + if (regset.bits() == 0) { + return 0; + } + auto bitset = integer_cast(regset.bits()); int words_pushed = 0; // Scan bitset to accumulate register pairs @@ -2979,7 +2987,11 @@ int MacroAssembler::pop(unsigned int bitset, Register stack) { // Push lots of registers in the bit set supplied. Don't push sp. // Return the number of dwords pushed -int MacroAssembler::push_fp(unsigned int bitset, Register stack, FpPushPopMode mode) { +int MacroAssembler::push_fp(FloatRegSet regset, Register stack, FpPushPopMode mode) { + if (regset.bits() == 0) { + return 0; + } + auto bitset = integer_cast(regset.bits()); int words_pushed = 0; bool use_sve = false; int sve_vector_size_in_bytes = 0; @@ -3092,7 +3104,11 @@ int MacroAssembler::push_fp(unsigned int bitset, Register stack, FpPushPopMode m } // Return the number of dwords popped -int MacroAssembler::pop_fp(unsigned int bitset, Register stack, FpPushPopMode mode) { +int MacroAssembler::pop_fp(FloatRegSet regset, Register stack, FpPushPopMode mode) { + if (regset.bits() == 0) { + return 0; + } + auto bitset = integer_cast(regset.bits()); int words_pushed = 0; bool use_sve = false; int sve_vector_size_in_bytes = 0; @@ -3202,7 +3218,11 @@ int MacroAssembler::pop_fp(unsigned int bitset, Register stack, FpPushPopMode mo } // Return the number of dwords pushed -int MacroAssembler::push_p(unsigned int bitset, Register stack) { +int MacroAssembler::push_p(PRegSet regset, Register stack) { + if (regset.bits() == 0) { + return 0; + } + auto bitset = integer_cast(regset.bits()); bool use_sve = false; int sve_predicate_size_in_slots = 0; @@ -3239,7 +3259,11 @@ int MacroAssembler::push_p(unsigned int bitset, Register stack) { } // Return the number of dwords popped -int MacroAssembler::pop_p(unsigned int bitset, Register stack) { +int MacroAssembler::pop_p(PRegSet regset, Register stack) { + if (regset.bits() == 0) { + return 0; + } + auto bitset = integer_cast(regset.bits()); bool use_sve = false; int sve_predicate_size_in_slots = 0; @@ -3278,7 +3302,6 @@ int MacroAssembler::pop_p(unsigned int bitset, Register stack) { #ifdef ASSERT void MacroAssembler::verify_heapbase(const char* msg) { #if 0 - assert (UseCompressedOops || UseCompressedClassPointers, "should be compressed"); assert (Universe::heap() != nullptr, "java heap should be initialized"); if (!UseCompressedOops || Universe::ptr_base() == nullptr) { // rheapbase is allocated as general register @@ -3456,7 +3479,7 @@ void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement void MacroAssembler::reinit_heapbase() { if (UseCompressedOops) { - if (Universe::is_fully_initialized()) { + if (Universe::is_fully_initialized() && !AOTCodeCache::is_on_for_dump()) { mov(rheapbase, CompressedOops::base()); } else { lea(rheapbase, ExternalAddress(CompressedOops::base_addr())); @@ -5067,13 +5090,10 @@ void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) { void MacroAssembler::load_klass(Register dst, Register src) { if (UseCompactObjectHeaders) { load_narrow_klass_compact(dst, src); - decode_klass_not_null(dst); - } else if (UseCompressedClassPointers) { - ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes())); - decode_klass_not_null(dst); } else { - ldr(dst, Address(src, oopDesc::klass_offset_in_bytes())); + ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes())); } + decode_klass_not_null(dst); } void MacroAssembler::restore_cpu_control_state_after_jni(Register tmp1, Register tmp2) { @@ -5125,25 +5145,22 @@ void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, R void MacroAssembler::cmp_klass(Register obj, Register klass, Register tmp) { assert_different_registers(obj, klass, tmp); - if (UseCompressedClassPointers) { - if (UseCompactObjectHeaders) { - load_narrow_klass_compact(tmp, obj); - } else { - ldrw(tmp, Address(obj, oopDesc::klass_offset_in_bytes())); - } - if (CompressedKlassPointers::base() == nullptr) { - cmp(klass, tmp, LSL, CompressedKlassPointers::shift()); - return; - } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0 - && CompressedKlassPointers::shift() == 0) { - // Only the bottom 32 bits matter - cmpw(klass, tmp); - return; - } - decode_klass_not_null(tmp); + if (UseCompactObjectHeaders) { + load_narrow_klass_compact(tmp, obj); } else { - ldr(tmp, Address(obj, oopDesc::klass_offset_in_bytes())); + ldrw(tmp, Address(obj, oopDesc::klass_offset_in_bytes())); } + if (CompressedKlassPointers::base() == nullptr) { + cmp(klass, tmp, LSL, CompressedKlassPointers::shift()); + return; + } else if (!AOTCodeCache::is_on_for_dump() && + ((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0 + && CompressedKlassPointers::shift() == 0) { + // Only the bottom 32 bits matter + cmpw(klass, tmp); + return; + } + decode_klass_not_null(tmp); cmp(klass, tmp); } @@ -5151,36 +5168,25 @@ void MacroAssembler::cmp_klasses_from_objects(Register obj1, Register obj2, Regi if (UseCompactObjectHeaders) { load_narrow_klass_compact(tmp1, obj1); load_narrow_klass_compact(tmp2, obj2); - cmpw(tmp1, tmp2); - } else if (UseCompressedClassPointers) { + } else { ldrw(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes())); ldrw(tmp2, Address(obj2, oopDesc::klass_offset_in_bytes())); - cmpw(tmp1, tmp2); - } else { - ldr(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes())); - ldr(tmp2, Address(obj2, oopDesc::klass_offset_in_bytes())); - cmp(tmp1, tmp2); } + cmpw(tmp1, tmp2); } void MacroAssembler::store_klass(Register dst, Register src) { // FIXME: Should this be a store release? concurrent gcs assumes // klass length is valid if klass field is not null. assert(!UseCompactObjectHeaders, "not with compact headers"); - if (UseCompressedClassPointers) { - encode_klass_not_null(src); - strw(src, Address(dst, oopDesc::klass_offset_in_bytes())); - } else { - str(src, Address(dst, oopDesc::klass_offset_in_bytes())); - } + encode_klass_not_null(src); + strw(src, Address(dst, oopDesc::klass_offset_in_bytes())); } void MacroAssembler::store_klass_gap(Register dst, Register src) { assert(!UseCompactObjectHeaders, "not with compact headers"); - if (UseCompressedClassPointers) { - // Store to klass gap in destination - strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes())); - } + // Store to klass gap in destination + strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes())); } // Algorithm must match CompressedOops::encode. @@ -5326,8 +5332,6 @@ MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() { } MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode(address base, int shift, const size_t range) { - assert(UseCompressedClassPointers, "not using compressed class pointers"); - // KlassDecodeMode shouldn't be set already. assert(_klass_decode_mode == KlassDecodeNone, "set once"); @@ -5393,7 +5397,7 @@ void MacroAssembler::encode_klass_not_null_for_aot(Register dst, Register src) { } void MacroAssembler::encode_klass_not_null(Register dst, Register src) { - if (AOTCodeCache::is_on_for_dump()) { + if (CompressedKlassPointers::base() != nullptr && AOTCodeCache::is_on_for_dump()) { encode_klass_not_null_for_aot(dst, src); return; } @@ -5457,8 +5461,6 @@ void MacroAssembler::decode_klass_not_null_for_aot(Register dst, Register src) { } void MacroAssembler::decode_klass_not_null(Register dst, Register src) { - assert (UseCompressedClassPointers, "should only be used for compressed headers"); - if (AOTCodeCache::is_on_for_dump()) { decode_klass_not_null_for_aot(dst, src); return; @@ -5525,7 +5527,6 @@ void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { } void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { - assert (UseCompressedClassPointers, "should only be used for compressed headers"); assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); int index = oop_recorder()->find_index(k); @@ -6835,6 +6836,9 @@ void MacroAssembler::spin_wait() { assert(VM_Version::supports_sb(), "current CPU does not support SB instruction"); sb(); break; + case SpinWait::WFET: + spin_wait_wfet(VM_Version::spin_wait_desc().delay()); + break; default: ShouldNotReachHere(); } @@ -6842,6 +6846,28 @@ void MacroAssembler::spin_wait() { block_comment("}"); } +void MacroAssembler::spin_wait_wfet(int delay_ns) { + // The sequence assumes CNTFRQ_EL0 is fixed to 1GHz. The assumption is valid + // starting from Armv8.6, according to the "D12.1.2 The system counter" of the + // Arm Architecture Reference Manual for A-profile architecture version M.a.a. + // This is sufficient because FEAT_WFXT is introduced from Armv8.6. + Register target = rscratch1; + Register current = rscratch2; + get_cntvctss_el0(current); + add(target, current, delay_ns); + + Label L_wait_loop; + bind(L_wait_loop); + + wfet(target); + get_cntvctss_el0(current); + + cmp(current, target); + br(LT, L_wait_loop); + + sb(); +} + // Stack frame creation/removal void MacroAssembler::enter(bool strip_ret_addr) { diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp index fa32f3055b9..a6cc862d05c 100644 --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp @@ -499,29 +499,20 @@ private: void mov_immediate64(Register dst, uint64_t imm64); void mov_immediate32(Register dst, uint32_t imm32); - int push(unsigned int bitset, Register stack); - int pop(unsigned int bitset, Register stack); - - int push_fp(unsigned int bitset, Register stack, FpPushPopMode mode); - int pop_fp(unsigned int bitset, Register stack, FpPushPopMode mode); - - int push_p(unsigned int bitset, Register stack); - int pop_p(unsigned int bitset, Register stack); - void mov(Register dst, Address a); public: - void push(RegSet regs, Register stack) { if (regs.bits()) push(regs.bits(), stack); } - void pop(RegSet regs, Register stack) { if (regs.bits()) pop(regs.bits(), stack); } + int push(RegSet regset, Register stack); + int pop(RegSet regset, Register stack); - void push_fp(FloatRegSet regs, Register stack, FpPushPopMode mode = PushPopFull) { if (regs.bits()) push_fp(regs.bits(), stack, mode); } - void pop_fp(FloatRegSet regs, Register stack, FpPushPopMode mode = PushPopFull) { if (regs.bits()) pop_fp(regs.bits(), stack, mode); } + int push_fp(FloatRegSet regset, Register stack, FpPushPopMode mode = PushPopFull); + int pop_fp(FloatRegSet regset, Register stack, FpPushPopMode mode = PushPopFull); static RegSet call_clobbered_gp_registers(); - void push_p(PRegSet regs, Register stack) { if (regs.bits()) push_p(regs.bits(), stack); } - void pop_p(PRegSet regs, Register stack) { if (regs.bits()) pop_p(regs.bits(), stack); } + int push_p(PRegSet regset, Register stack); + int pop_p(PRegSet regset, Register stack); // Push and pop everything that might be clobbered by a native // runtime call except rscratch1 and rscratch2. (They are always @@ -660,6 +651,14 @@ public: msr(0b011, 0b0100, 0b0010, 0b000, reg); } + // CNTVCTSS_EL0: op1 == 011 + // CRn == 1110 + // CRm == 0000 + // op2 == 110 + inline void get_cntvctss_el0(Register reg) { + mrs(0b011, 0b1110, 0b0000, 0b110, reg); + } + // idiv variant which deals with MINLONG as dividend and -1 as divisor int corrected_idivl(Register result, Register ra, Register rb, bool want_remainder, Register tmp = rscratch1); @@ -891,10 +890,6 @@ public: // thread in the default location (rthread) void reset_last_Java_frame(bool clear_fp); - // Stores - void store_check(Register obj); // store check for obj - register is destroyed afterwards - void store_check(Register obj, Address dst); // same as above, dst is exact store location (reg. is destroyed) - void resolve_jobject(Register value, Register tmp1, Register tmp2); void resolve_global_jobject(Register value, Register tmp1, Register tmp2); @@ -1724,6 +1719,7 @@ public: // Code for java.lang.Thread::onSpinWait() intrinsic. void spin_wait(); + void spin_wait_wfet(int delay_ns); void fast_lock(Register basic_lock, Register obj, Register t1, Register t2, Register t3, Label& slow); void fast_unlock(Register obj, Register t1, Register t2, Register t3, Label& slow); diff --git a/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp b/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp index fc7274714ad..ab9896fa426 100644 --- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp @@ -97,7 +97,7 @@ protected: #define MACOS_WX_WRITE MACOS_AARCH64_ONLY(os::thread_wx_enable_write()) void set_char_at(int offset, char c) { MACOS_WX_WRITE; *addr_at(offset) = (u_char)c; } void set_int_at(int offset, jint i) { MACOS_WX_WRITE; *(jint*)addr_at(offset) = i; } - void set_uint_at(int offset, jint i) { MACOS_WX_WRITE; *(juint*)addr_at(offset) = i; } + void set_uint_at(int offset, juint i) { MACOS_WX_WRITE; *(juint*)addr_at(offset) = i; } void set_ptr_at(int offset, address ptr) { MACOS_WX_WRITE; *(address*)addr_at(offset) = ptr; } void set_oop_at(int offset, oop o) { MACOS_WX_WRITE; *(oop*)addr_at(offset) = o; } #undef MACOS_WX_WRITE @@ -178,13 +178,11 @@ public: address destination() const; void set_destination(address dest) { - int offset = dest - instruction_address(); - unsigned int insn = 0b100101 << 26; + int64_t offset = dest - instruction_address(); + juint insn = 0b100101u << 26u; assert((offset & 3) == 0, "should be"); - offset >>= 2; - offset &= (1 << 26) - 1; // mask off insn part - insn |= offset; - set_int_at(displacement_offset, insn); + Instruction_aarch64::spatch(reinterpret_cast
(&insn), 25, 0, offset >> 2); + set_uint_at(displacement_offset, insn); } void verify_alignment() { ; } diff --git a/src/hotspot/cpu/aarch64/relocInfo_aarch64.cpp b/src/hotspot/cpu/aarch64/relocInfo_aarch64.cpp index dbec2d76d4f..f1b9fb213a2 100644 --- a/src/hotspot/cpu/aarch64/relocInfo_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/relocInfo_aarch64.cpp @@ -54,7 +54,12 @@ void Relocation::pd_set_data_value(address x, bool verify_only) { bytes = MacroAssembler::pd_patch_instruction_size(addr(), x); break; } - ICache::invalidate_range(addr(), bytes); + + if (UseSingleICacheInvalidation) { + assert(_binding != nullptr, "expect to be called with RelocIterator in use"); + } else { + ICache::invalidate_range(addr(), bytes); + } } address Relocation::pd_call_destination(address orig_addr) { diff --git a/src/hotspot/cpu/aarch64/runtime_aarch64.cpp b/src/hotspot/cpu/aarch64/runtime_aarch64.cpp index e36aa21b567..638e57b03fe 100644 --- a/src/hotspot/cpu/aarch64/runtime_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/runtime_aarch64.cpp @@ -290,7 +290,7 @@ ExceptionBlob* OptoRuntime::generate_exception_blob() { assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned"); const char* name = OptoRuntime::stub_name(StubId::c2_exception_id); - CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::C2Blob, (uint)BlobId::c2_exception_id, name); + CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::C2Blob, BlobId::c2_exception_id); if (blob != nullptr) { return blob->as_exception_blob(); } diff --git a/src/hotspot/cpu/aarch64/spin_wait_aarch64.cpp b/src/hotspot/cpu/aarch64/spin_wait_aarch64.cpp index 7da0151d834..97a981ab815 100644 --- a/src/hotspot/cpu/aarch64/spin_wait_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/spin_wait_aarch64.cpp @@ -32,6 +32,7 @@ bool SpinWait::supports(const char *name) { strcmp(name, "isb") == 0 || strcmp(name, "yield") == 0 || strcmp(name, "sb") == 0 || + strcmp(name, "wfet") == 0 || strcmp(name, "none") == 0); } @@ -46,6 +47,8 @@ SpinWait::Inst SpinWait::from_name(const char* name) { return SpinWait::YIELD; } else if (strcmp(name, "sb") == 0) { return SpinWait::SB; + } else if (strcmp(name, "wfet") == 0) { + return SpinWait::WFET; } return SpinWait::NONE; diff --git a/src/hotspot/cpu/aarch64/spin_wait_aarch64.hpp b/src/hotspot/cpu/aarch64/spin_wait_aarch64.hpp index 0e96a4b7157..6ebcd2477a8 100644 --- a/src/hotspot/cpu/aarch64/spin_wait_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/spin_wait_aarch64.hpp @@ -24,6 +24,8 @@ #ifndef CPU_AARCH64_SPIN_WAIT_AARCH64_HPP #define CPU_AARCH64_SPIN_WAIT_AARCH64_HPP +#include "utilities/debug.hpp" + class SpinWait { public: enum Inst { @@ -31,21 +33,30 @@ public: NOP, ISB, YIELD, - SB + SB, + WFET }; private: Inst _inst; int _count; + int _delay; Inst from_name(const char *name); public: - SpinWait(Inst inst = NONE, int count = 0) : _inst(inst), _count(inst == NONE ? 0 : count) {} - SpinWait(const char *name, int count) : SpinWait(from_name(name), count) {} + SpinWait(Inst inst = NONE, int count = 0, int delay = -1) + : _inst(inst), _count(inst == NONE ? 0 : count), _delay(delay) {} + SpinWait(const char *name, int count, int delay) + : SpinWait(from_name(name), count, delay) {} Inst inst() const { return _inst; } int inst_count() const { return _count; } + int delay() const { + assert(_inst == WFET, "Specifying the delay value is only supported for WFET"); + assert(_delay > 0, "The delay value must be positive"); + return _delay; + } static bool supports(const char *name); }; diff --git a/src/hotspot/cpu/aarch64/stubDeclarations_aarch64.hpp b/src/hotspot/cpu/aarch64/stubDeclarations_aarch64.hpp index 695534604b8..d1f59e479db 100644 --- a/src/hotspot/cpu/aarch64/stubDeclarations_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/stubDeclarations_aarch64.hpp @@ -29,32 +29,39 @@ #define STUBGEN_PREUNIVERSE_BLOBS_ARCH_DO(do_stub, \ do_arch_blob, \ do_arch_entry, \ - do_arch_entry_init) \ + do_arch_entry_init, \ + do_arch_entry_array) \ do_arch_blob(preuniverse, 0) \ #define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \ do_arch_blob, \ do_arch_entry, \ - do_arch_entry_init) \ + do_arch_entry_init, \ + do_arch_entry_array) \ do_arch_blob(initial, 10000) \ #define STUBGEN_CONTINUATION_BLOBS_ARCH_DO(do_stub, \ do_arch_blob, \ do_arch_entry, \ - do_arch_entry_init) \ + do_arch_entry_init, \ + do_arch_entry_array) \ do_arch_blob(continuation, 2000) \ +// count needed for declaration of vector_iota_indices stub +#define VECTOR_IOTA_COUNT 6 #define STUBGEN_COMPILER_BLOBS_ARCH_DO(do_stub, \ do_arch_blob, \ do_arch_entry, \ - do_arch_entry_init) \ + do_arch_entry_init, \ + do_arch_entry_array) \ do_arch_blob(compiler, 70000) \ do_stub(compiler, vector_iota_indices) \ - do_arch_entry(aarch64, compiler, vector_iota_indices, \ - vector_iota_indices, vector_iota_indices) \ + do_arch_entry_array(aarch64, compiler, vector_iota_indices, \ + vector_iota_indices, vector_iota_indices, \ + VECTOR_IOTA_COUNT) \ do_stub(compiler, large_array_equals) \ do_arch_entry(aarch64, compiler, large_array_equals, \ large_array_equals, large_array_equals) \ @@ -84,8 +91,7 @@ do_stub(compiler, count_positives) \ do_arch_entry(aarch64, compiler, count_positives, count_positives, \ count_positives) \ - do_stub(compiler, count_positives_long) \ - do_arch_entry(aarch64, compiler, count_positives_long, \ + do_arch_entry(aarch64, compiler, count_positives, \ count_positives_long, count_positives_long) \ do_stub(compiler, compare_long_string_LL) \ do_arch_entry(aarch64, compiler, compare_long_string_LL, \ @@ -108,14 +114,16 @@ do_stub(compiler, string_indexof_linear_ul) \ do_arch_entry(aarch64, compiler, string_indexof_linear_ul, \ string_indexof_linear_ul, string_indexof_linear_ul) \ - /* this uses the entry for ghash_processBlocks */ \ - do_stub(compiler, ghash_processBlocks_wide) \ + do_stub(compiler, ghash_processBlocks_small) \ + do_arch_entry(aarch64, compiler, ghash_processBlocks_small, \ + ghash_processBlocks_small, ghash_processBlocks_small) \ #define STUBGEN_FINAL_BLOBS_ARCH_DO(do_stub, \ do_arch_blob, \ do_arch_entry, \ - do_arch_entry_init) \ + do_arch_entry_init, \ + do_arch_entry_array) \ do_arch_blob(final, 20000 ZGC_ONLY(+85000)) \ do_stub(final, copy_byte_f) \ do_arch_entry(aarch64, final, copy_byte_f, copy_byte_f, \ @@ -139,9 +147,49 @@ do_stub(final, spin_wait) \ do_arch_entry_init(aarch64, final, spin_wait, spin_wait, \ spin_wait, empty_spin_wait) \ - /* stub only -- entries are not stored in StubRoutines::aarch64 */ \ /* n.b. these are not the same as the generic atomic stubs */ \ do_stub(final, atomic_entry_points) \ + do_arch_entry(aarch64, final, atomic_entry_points, \ + atomic_fetch_add_4_impl, atomic_fetch_add_4_impl) \ + do_arch_entry(aarch64, final, atomic_entry_points, \ + atomic_fetch_add_8_impl, atomic_fetch_add_8_impl) \ + do_arch_entry(aarch64, final, atomic_entry_points, \ + atomic_fetch_add_4_relaxed_impl, \ + atomic_fetch_add_4_relaxed_impl) \ + do_arch_entry(aarch64, final, atomic_entry_points, \ + atomic_fetch_add_8_relaxed_impl, \ + atomic_fetch_add_8_relaxed_impl) \ + do_arch_entry(aarch64, final, atomic_entry_points, \ + atomic_xchg_4_impl, atomic_xchg_4_impl) \ + do_arch_entry(aarch64, final, atomic_entry_points, \ + atomic_xchg_8_impl, atomic_xchg_8_impl) \ + do_arch_entry(aarch64, final, atomic_entry_points, \ + atomic_cmpxchg_1_impl, atomic_cmpxchg_1_impl) \ + do_arch_entry(aarch64, final, atomic_entry_points, \ + atomic_cmpxchg_4_impl, atomic_cmpxchg_4_impl) \ + do_arch_entry(aarch64, final, atomic_entry_points, \ + atomic_cmpxchg_8_impl, atomic_cmpxchg_8_impl) \ + do_arch_entry(aarch64, final, atomic_entry_points, \ + atomic_cmpxchg_1_relaxed_impl, \ + atomic_cmpxchg_1_relaxed_impl) \ + do_arch_entry(aarch64, final, atomic_entry_points, \ + atomic_cmpxchg_4_relaxed_impl, \ + atomic_cmpxchg_4_relaxed_impl) \ + do_arch_entry(aarch64, final, atomic_entry_points, \ + atomic_cmpxchg_8_relaxed_impl, \ + atomic_cmpxchg_8_relaxed_impl) \ + do_arch_entry(aarch64, final, atomic_entry_points, \ + atomic_cmpxchg_4_release_impl, \ + atomic_cmpxchg_4_release_impl) \ + do_arch_entry(aarch64, final, atomic_entry_points, \ + atomic_cmpxchg_8_release_impl, \ + atomic_cmpxchg_8_release_impl) \ + do_arch_entry(aarch64, final, atomic_entry_points, \ + atomic_cmpxchg_4_seq_cst_impl, \ + atomic_cmpxchg_4_seq_cst_impl) \ + do_arch_entry(aarch64, final, atomic_entry_points, \ + atomic_cmpxchg_8_seq_cst_impl, \ + atomic_cmpxchg_8_seq_cst_impl) \ #endif // CPU_AARCH64_STUBDECLARATIONS_HPP diff --git a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp index a459a28b09e..fddb37b7b8d 100644 --- a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp @@ -79,6 +79,166 @@ #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") +// Constant data definitions + +static const uint32_t _sha256_round_consts[64] = { + 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, + 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, + 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, + 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, + 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, + 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, + 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, + 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, + 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, + 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, + 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, + 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, + 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, + 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, + 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, + 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2, +}; + +static const uint64_t _sha512_round_consts[80] = { + 0x428A2F98D728AE22L, 0x7137449123EF65CDL, 0xB5C0FBCFEC4D3B2FL, + 0xE9B5DBA58189DBBCL, 0x3956C25BF348B538L, 0x59F111F1B605D019L, + 0x923F82A4AF194F9BL, 0xAB1C5ED5DA6D8118L, 0xD807AA98A3030242L, + 0x12835B0145706FBEL, 0x243185BE4EE4B28CL, 0x550C7DC3D5FFB4E2L, + 0x72BE5D74F27B896FL, 0x80DEB1FE3B1696B1L, 0x9BDC06A725C71235L, + 0xC19BF174CF692694L, 0xE49B69C19EF14AD2L, 0xEFBE4786384F25E3L, + 0x0FC19DC68B8CD5B5L, 0x240CA1CC77AC9C65L, 0x2DE92C6F592B0275L, + 0x4A7484AA6EA6E483L, 0x5CB0A9DCBD41FBD4L, 0x76F988DA831153B5L, + 0x983E5152EE66DFABL, 0xA831C66D2DB43210L, 0xB00327C898FB213FL, + 0xBF597FC7BEEF0EE4L, 0xC6E00BF33DA88FC2L, 0xD5A79147930AA725L, + 0x06CA6351E003826FL, 0x142929670A0E6E70L, 0x27B70A8546D22FFCL, + 0x2E1B21385C26C926L, 0x4D2C6DFC5AC42AEDL, 0x53380D139D95B3DFL, + 0x650A73548BAF63DEL, 0x766A0ABB3C77B2A8L, 0x81C2C92E47EDAEE6L, + 0x92722C851482353BL, 0xA2BFE8A14CF10364L, 0xA81A664BBC423001L, + 0xC24B8B70D0F89791L, 0xC76C51A30654BE30L, 0xD192E819D6EF5218L, + 0xD69906245565A910L, 0xF40E35855771202AL, 0x106AA07032BBD1B8L, + 0x19A4C116B8D2D0C8L, 0x1E376C085141AB53L, 0x2748774CDF8EEB99L, + 0x34B0BCB5E19B48A8L, 0x391C0CB3C5C95A63L, 0x4ED8AA4AE3418ACBL, + 0x5B9CCA4F7763E373L, 0x682E6FF3D6B2B8A3L, 0x748F82EE5DEFB2FCL, + 0x78A5636F43172F60L, 0x84C87814A1F0AB72L, 0x8CC702081A6439ECL, + 0x90BEFFFA23631E28L, 0xA4506CEBDE82BDE9L, 0xBEF9A3F7B2C67915L, + 0xC67178F2E372532BL, 0xCA273ECEEA26619CL, 0xD186B8C721C0C207L, + 0xEADA7DD6CDE0EB1EL, 0xF57D4F7FEE6ED178L, 0x06F067AA72176FBAL, + 0x0A637DC5A2C898A6L, 0x113F9804BEF90DAEL, 0x1B710B35131C471BL, + 0x28DB77F523047D84L, 0x32CAAB7B40C72493L, 0x3C9EBE0A15C9BEBCL, + 0x431D67C49C100D4CL, 0x4CC5D4BECB3E42B6L, 0x597F299CFC657E2AL, + 0x5FCB6FAB3AD6FAECL, 0x6C44198C4A475817L +}; + +static const uint64_t _sha3_round_consts[24] = { + 0x0000000000000001L, 0x0000000000008082L, 0x800000000000808AL, + 0x8000000080008000L, 0x000000000000808BL, 0x0000000080000001L, + 0x8000000080008081L, 0x8000000000008009L, 0x000000000000008AL, + 0x0000000000000088L, 0x0000000080008009L, 0x000000008000000AL, + 0x000000008000808BL, 0x800000000000008BL, 0x8000000000008089L, + 0x8000000000008003L, 0x8000000000008002L, 0x8000000000000080L, + 0x000000000000800AL, 0x800000008000000AL, 0x8000000080008081L, + 0x8000000000008080L, 0x0000000080000001L, 0x8000000080008008L +}; + +static const uint64_t _double_keccak_round_consts[24] = { + 0x0000000000000001L, 0x0000000000008082L, 0x800000000000808AL, + 0x8000000080008000L, 0x000000000000808BL, 0x0000000080000001L, + 0x8000000080008081L, 0x8000000000008009L, 0x000000000000008AL, + 0x0000000000000088L, 0x0000000080008009L, 0x000000008000000AL, + 0x000000008000808BL, 0x800000000000008BL, 0x8000000000008089L, + 0x8000000000008003L, 0x8000000000008002L, 0x8000000000000080L, + 0x000000000000800AL, 0x800000008000000AL, 0x8000000080008081L, + 0x8000000000008080L, 0x0000000080000001L, 0x8000000080008008L +}; + +static const char _encodeBlock_toBase64[64] = { + 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', + 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', + 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', + 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', + '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '/' +}; + +static const char _encodeBlock_toBase64URL[64] = { + 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', + 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', + 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', + 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', + '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-', '_' +}; + +// Non-SIMD lookup tables are mostly dumped from fromBase64 array used in java.util.Base64, +// except the trailing character '=' is also treated illegal value in this intrinsic. That +// is java.util.Base64.fromBase64['='] = -2, while fromBase(URL)64ForNoSIMD['='] = 255 here. +static const uint8_t _decodeBlock_fromBase64ForNoSIMD[256] = { + 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, + 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, + 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 62u, 255u, 255u, 255u, 63u, + 52u, 53u, 54u, 55u, 56u, 57u, 58u, 59u, 60u, 61u, 255u, 255u, 255u, 255u, 255u, 255u, + 255u, 0u, 1u, 2u, 3u, 4u, 5u, 6u, 7u, 8u, 9u, 10u, 11u, 12u, 13u, 14u, + 15u, 16u, 17u, 18u, 19u, 20u, 21u, 22u, 23u, 24u, 25u, 255u, 255u, 255u, 255u, 255u, + 255u, 26u, 27u, 28u, 29u, 30u, 31u, 32u, 33u, 34u, 35u, 36u, 37u, 38u, 39u, 40u, + 41u, 42u, 43u, 44u, 45u, 46u, 47u, 48u, 49u, 50u, 51u, 255u, 255u, 255u, 255u, 255u, + 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, + 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, + 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, + 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, + 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, + 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, + 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, + 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, +}; + +static const uint8_t _decodeBlock_fromBase64URLForNoSIMD[256] = { + 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, + 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, + 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 62u, 255u, 255u, + 52u, 53u, 54u, 55u, 56u, 57u, 58u, 59u, 60u, 61u, 255u, 255u, 255u, 255u, 255u, 255u, + 255u, 0u, 1u, 2u, 3u, 4u, 5u, 6u, 7u, 8u, 9u, 10u, 11u, 12u, 13u, 14u, + 15u, 16u, 17u, 18u, 19u, 20u, 21u, 22u, 23u, 24u, 25u, 255u, 255u, 255u, 255u, 63u, + 255u, 26u, 27u, 28u, 29u, 30u, 31u, 32u, 33u, 34u, 35u, 36u, 37u, 38u, 39u, 40u, + 41u, 42u, 43u, 44u, 45u, 46u, 47u, 48u, 49u, 50u, 51u, 255u, 255u, 255u, 255u, 255u, + 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, + 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, + 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, + 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, + 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, + 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, + 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, + 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, +}; + +// A legal value of base64 code is in range [0, 127]. We need two lookups +// with tbl/tbx and combine them to get the decode data. The 1st table vector +// lookup use tbl, out of range indices are set to 0 in destination. The 2nd +// table vector lookup use tbx, out of range indices are unchanged in +// destination. Input [64..126] is mapped to index [65, 127] in second lookup. +// The value of index 64 is set to 0, so that we know that we already get the +// decoded data with the 1st lookup. +static const uint8_t _decodeBlock_fromBase64ForSIMD[128] = { + 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, + 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, + 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 62u, 255u, 255u, 255u, 63u, + 52u, 53u, 54u, 55u, 56u, 57u, 58u, 59u, 60u, 61u, 255u, 255u, 255u, 255u, 255u, 255u, + 0u, 255u, 0u, 1u, 2u, 3u, 4u, 5u, 6u, 7u, 8u, 9u, 10u, 11u, 12u, 13u, + 14u, 15u, 16u, 17u, 18u, 19u, 20u, 21u, 22u, 23u, 24u, 25u, 255u, 255u, 255u, 255u, + 255u, 255u, 26u, 27u, 28u, 29u, 30u, 31u, 32u, 33u, 34u, 35u, 36u, 37u, 38u, 39u, + 40u, 41u, 42u, 43u, 44u, 45u, 46u, 47u, 48u, 49u, 50u, 51u, 255u, 255u, 255u, 255u, +}; + +static const uint8_t _decodeBlock_fromBase64URLForSIMD[128] = { + 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, + 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, + 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 62u, 255u, 255u, + 52u, 53u, 54u, 55u, 56u, 57u, 58u, 59u, 60u, 61u, 255u, 255u, 255u, 255u, 255u, 255u, + 0u, 255u, 0u, 1u, 2u, 3u, 4u, 5u, 6u, 7u, 8u, 9u, 10u, 11u, 12u, 13u, + 14u, 15u, 16u, 17u, 18u, 19u, 20u, 21u, 22u, 23u, 24u, 25u, 255u, 255u, 255u, 255u, + 63u, 255u, 26u, 27u, 28u, 29u, 30u, 31u, 32u, 33u, 34u, 35u, 36u, 37u, 38u, 39u, + 40u, 41u, 42u, 43u, 44u, 45u, 46u, 47u, 48u, 49u, 50u, 51u, 255u, 255u, 255u, 255u, +}; + + // Stub Code definitions class StubGenerator: public StubCodeGenerator { @@ -203,8 +363,17 @@ class StubGenerator: public StubCodeGenerator { "adjust this code"); StubId stub_id = StubId::stubgen_call_stub_id; + GrowableArray
entries; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 2, "sanity check"); + address start = load_archive_data(stub_id, &entries); + if (start != nullptr) { + assert(entries.length() == 1, "expected 1 extra entry"); + return_address = entries.at(0); + return start; + } StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); const Address sp_after_call (rfp, sp_after_call_off * wordSize); @@ -323,6 +492,7 @@ class StubGenerator: public StubCodeGenerator { // save current address for use by exception handling code return_address = __ pc(); + entries.append(return_address); // store result depending on type (everything that is not // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) @@ -406,6 +576,9 @@ class StubGenerator: public StubCodeGenerator { __ strd(j_farg0, Address(j_rarg2, 0)); __ br(Assembler::AL, exit); + // record the stub entry and end plus the auxiliary entry + store_archive_data(stub_id, start, __ pc(), &entries); + return start; } @@ -423,8 +596,14 @@ class StubGenerator: public StubCodeGenerator { address generate_catch_exception() { StubId stub_id = StubId::stubgen_catch_exception_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); // same as in generate_call_stub(): const Address sp_after_call(rfp, sp_after_call_off * wordSize); @@ -450,7 +629,9 @@ class StubGenerator: public StubCodeGenerator { __ verify_oop(r0); __ str(r0, Address(rthread, Thread::pending_exception_offset())); - __ mov(rscratch1, (address)__FILE__); + // special case -- add file name string to AOT address table + address file = (address)AOTCodeCache::add_C_string(__FILE__); + __ lea(rscratch1, ExternalAddress(file)); __ str(rscratch1, Address(rthread, Thread::exception_file_offset())); __ movw(rscratch1, (int)__LINE__); __ strw(rscratch1, Address(rthread, Thread::exception_line_offset())); @@ -458,7 +639,10 @@ class StubGenerator: public StubCodeGenerator { // complete return to VM assert(StubRoutines::_call_stub_return_address != nullptr, "_call_stub_return_address must have been generated before"); - __ b(StubRoutines::_call_stub_return_address); + __ b(RuntimeAddress(StubRoutines::_call_stub_return_address)); + + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); return start; } @@ -479,8 +663,14 @@ class StubGenerator: public StubCodeGenerator { address generate_forward_exception() { StubId stub_id = StubId::stubgen_forward_exception_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); // Upon entry, LR points to the return address returning into // Java (interpreted or compiled) code; i.e., the return address @@ -551,6 +741,9 @@ class StubGenerator: public StubCodeGenerator { __ verify_oop(r0); __ br(r19); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -569,8 +762,14 @@ class StubGenerator: public StubCodeGenerator { // [tos + 5]: saved rscratch1 address generate_verify_oop() { StubId stub_id = StubId::stubgen_verify_oop_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Label exit, error; @@ -613,33 +812,64 @@ class StubGenerator: public StubCodeGenerator { __ blr(rscratch1); __ hlt(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } // Generate indices for iota vector. - address generate_iota_indices(StubId stub_id) { + void generate_iota_indices(StubId stub_id) { + GrowableArray
entries; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == VECTOR_IOTA_COUNT, "sanity check"); + address start = load_archive_data(stub_id, &entries); + if (start != nullptr) { + assert(entries.length() == entry_count - 1, + "unexpected entries count %d", entries.length()); + StubRoutines::aarch64::_vector_iota_indices[0] = start; + for (int i = 1; i < VECTOR_IOTA_COUNT; i++) { + StubRoutines::aarch64::_vector_iota_indices[i] = entries.at(i - 1); + } + return; + } __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); // B __ emit_data64(0x0706050403020100, relocInfo::none); __ emit_data64(0x0F0E0D0C0B0A0908, relocInfo::none); + entries.append(__ pc()); // H __ emit_data64(0x0003000200010000, relocInfo::none); __ emit_data64(0x0007000600050004, relocInfo::none); + entries.append(__ pc()); // S __ emit_data64(0x0000000100000000, relocInfo::none); __ emit_data64(0x0000000300000002, relocInfo::none); + entries.append(__ pc()); // D __ emit_data64(0x0000000000000000, relocInfo::none); __ emit_data64(0x0000000000000001, relocInfo::none); + entries.append(__ pc()); // S - FP __ emit_data64(0x3F80000000000000, relocInfo::none); // 0.0f, 1.0f __ emit_data64(0x4040000040000000, relocInfo::none); // 2.0f, 3.0f + entries.append(__ pc()); // D - FP __ emit_data64(0x0000000000000000, relocInfo::none); // 0.0d __ emit_data64(0x3FF0000000000000, relocInfo::none); // 1.0d - return start; + + // record the stub entry and end + store_archive_data(stub_id, start, __ pc(), &entries); + + // install the entry addresses in the entry array + assert(entries.length() == entry_count - 1, + "unexpected entries count %d", entries.length()); + StubRoutines::aarch64::_vector_iota_indices[0] = start; + for (int i = 1; i < VECTOR_IOTA_COUNT; i++) { + StubRoutines::aarch64::_vector_iota_indices[i] = entries.at(i - 1); + } } // The inner part of zero_words(). This is the bulk operation, @@ -656,15 +886,21 @@ class StubGenerator: public StubCodeGenerator { // r11 < MacroAssembler::zero_words_block_size. address generate_zero_blocks() { + StubId stub_id = StubId::stubgen_zero_blocks_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); + StubCodeMark mark(this, stub_id); Label done; Label base_aligned; Register base = r10, cnt = r11; - __ align(CodeEntryAlignment); - StubId stub_id = StubId::stubgen_zero_blocks_id; - StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); if (UseBlockZeroing) { int zva_length = VM_Version::zva_length(); @@ -707,6 +943,9 @@ class StubGenerator: public StubCodeGenerator { __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -803,6 +1042,12 @@ class StubGenerator: public StubCodeGenerator { // s and d are adjusted to point to the remaining words to copy // address generate_copy_longs(StubId stub_id, DecoratorSet decorators, Register s, Register d, Register count) { + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } BasicType type; copy_direction direction; @@ -854,7 +1099,7 @@ class StubGenerator: public StubCodeGenerator { StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Label unaligned_copy_long; if (AvoidUnalignedAccesses) { @@ -1154,6 +1399,9 @@ class StubGenerator: public StubCodeGenerator { __ ret(lr); } + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -1445,19 +1693,25 @@ class StubGenerator: public StubCodeGenerator { } if (direction == copy_forwards) { if (type != T_OBJECT) { - __ bl(StubRoutines::aarch64::copy_byte_f()); + __ lea(rscratch1, RuntimeAddress(StubRoutines::aarch64::copy_byte_f())); + __ blr(rscratch1); } else if ((decorators & IS_DEST_UNINITIALIZED) != 0) { - __ bl(StubRoutines::aarch64::copy_oop_uninit_f()); + __ lea(rscratch1, RuntimeAddress(StubRoutines::aarch64::copy_oop_uninit_f())); + __ blr(rscratch1); } else { - __ bl(StubRoutines::aarch64::copy_oop_f()); + __ lea(rscratch1, RuntimeAddress(StubRoutines::aarch64::copy_oop_f())); + __ blr(rscratch1); } } else { if (type != T_OBJECT) { - __ bl(StubRoutines::aarch64::copy_byte_b()); + __ lea(rscratch1, RuntimeAddress(StubRoutines::aarch64::copy_byte_b())); + __ blr(rscratch1); } else if ((decorators & IS_DEST_UNINITIALIZED) != 0) { - __ bl(StubRoutines::aarch64::copy_oop_uninit_b()); + __ lea(rscratch1, RuntimeAddress(StubRoutines::aarch64::copy_oop_uninit_b())); + __ blr(rscratch1); } else { - __ bl(StubRoutines::aarch64::copy_oop_b()); + __ lea(rscratch1, RuntimeAddress(StubRoutines::aarch64::copy_oop_b())); + __ blr(rscratch1); } } @@ -1508,8 +1762,8 @@ class StubGenerator: public StubCodeGenerator { // stub_id - is used to name the stub and identify all details of // how to perform the copy. // - // entry - is assigned to the stub's post push entry point unless - // it is null + // nopush_entry - is assigned to the stub's post push entry point + // unless it is null // // Inputs: // c_rarg0 - source array address @@ -1525,8 +1779,6 @@ class StubGenerator: public StubCodeGenerator { // copy method // address generate_disjoint_copy(StubId stub_id, address *nopush_entry) { - Register s = c_rarg0, d = c_rarg1, count = c_rarg2; - RegSet saved_reg = RegSet::of(s, d, count); int size; bool aligned; bool is_oop; @@ -1607,17 +1859,45 @@ class StubGenerator: public StubCodeGenerator { ShouldNotReachHere(); break; } + // all stubs provide a 2nd entry which omits the frame push for + // use when bailing out from a conjoint copy. However we may also + // need some extra addressses for memory access protection. + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 2, "sanity check"); + assert(nopush_entry != nullptr, "all disjoint copy stubs export a nopush entry"); + + bool add_extras = !is_oop && (!aligned || sizeof(jlong) == size); + int extra_count = ((add_extras ? 1 : 0) * UnsafeMemoryAccess::COLUMN_COUNT); + GrowableArray
entries; + GrowableArray
extras; + GrowableArray
*extras_ptr = (extra_count > 0 ? &extras : nullptr); + address start = load_archive_data(stub_id, &entries, extras_ptr); + if (start != nullptr) { + assert(entries.length() == entry_count - 1, + "unexpected entries count %d", entries.length()); + *nopush_entry = entries.at(0); + assert(extras.length() == extra_count, + "unexpected extra count %d", extras.length()); + if (add_extras) { + // register one handler at offset 0 + register_unsafe_access_handlers(extras, 0, 1); + } + return start; + } + + Register s = c_rarg0, d = c_rarg1, count = c_rarg2; + RegSet saved_reg = RegSet::of(s, d, count); __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); - if (nopush_entry != nullptr) { - *nopush_entry = __ pc(); - // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) - BLOCK_COMMENT("Entry:"); - } + *nopush_entry = __ pc(); + entries.append(*nopush_entry); + + // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) + BLOCK_COMMENT("Post-Push Entry:"); DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT; if (dest_uninitialized) { @@ -1636,8 +1916,7 @@ class StubGenerator: public StubCodeGenerator { } { // UnsafeMemoryAccess page error: continue after unsafe access - bool add_entry = !is_oop && (!aligned || sizeof(jlong) == size); - UnsafeMemoryAccessMark umam(this, add_entry, true); + UnsafeMemoryAccessMark umam(this, add_extras, true); copy_memory(decorators, is_oop ? T_OBJECT : T_BYTE, aligned, s, d, count, size); } @@ -1652,6 +1931,20 @@ class StubGenerator: public StubCodeGenerator { __ leave(); __ mov(r0, zr); // return 0 __ ret(lr); + + address end = __ pc(); + + if (add_extras) { + // retrieve the registered handler addresses + retrieve_unsafe_access_handlers(start, end, extras); + assert(extras.length() == extra_count + , "incorrect handlers count %d", extras.length()); + } + + // record the stub entry and end plus the no_push entry and any + // extra handler addresses + store_archive_data(stub_id, start, end, &entries, extras_ptr); + return start; } @@ -1663,8 +1956,8 @@ class StubGenerator: public StubCodeGenerator { // corresponding disjoint copy routine which can be // jumped to if the ranges do not actually overlap // - // entry - is assigned to the stub's post push entry point unless - // it is null + // nopush_entry - is assigned to the stub's post push entry point + // unless it is null // // // Inputs: @@ -1681,8 +1974,6 @@ class StubGenerator: public StubCodeGenerator { // used by some other conjoint copy method // address generate_conjoint_copy(StubId stub_id, address nooverlap_target, address *nopush_entry) { - Register s = c_rarg0, d = c_rarg1, count = c_rarg2; - RegSet saved_regs = RegSet::of(s, d, count); int size; bool aligned; bool is_oop; @@ -1762,15 +2053,47 @@ class StubGenerator: public StubCodeGenerator { default: ShouldNotReachHere(); } + // only some conjoint stubs generate a 2nd entry + int entry_count = StubInfo::entry_count(stub_id); + int expected_entry_count = (nopush_entry == nullptr ? 1 : 2); + assert(entry_count == expected_entry_count, + "expected entry count %d does not match declared entry count %d for stub %s", + expected_entry_count, entry_count, StubInfo::name(stub_id)); + // We need to protect memory accesses in certain cases + bool add_extras = !is_oop && (!aligned || sizeof(jlong) == size); + int extra_count = ((add_extras ? 1 : 0) * UnsafeMemoryAccess::COLUMN_COUNT); + GrowableArray
entries; + GrowableArray
extras; + GrowableArray
*entries_ptr = (nopush_entry != nullptr ? &entries : nullptr); + GrowableArray
*extras_ptr = (extra_count > 0 ? &extras : nullptr); + address start = load_archive_data(stub_id, entries_ptr, extras_ptr); + if (start != nullptr) { + assert(entries.length() == expected_entry_count - 1, + "unexpected entries count %d", entries.length()); + assert(extras.length() == extra_count, + "unexpected extra count %d", extras.length()); + if (nopush_entry != nullptr) { + *nopush_entry = entries.at(0); + } + if (add_extras) { + // register one handler at offset 0 + register_unsafe_access_handlers(extras, 0, 1); + } + return start; + } + + Register s = c_rarg0, d = c_rarg1, count = c_rarg2; + RegSet saved_regs = RegSet::of(s, d, count); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); if (nopush_entry != nullptr) { *nopush_entry = __ pc(); + entries.append(*nopush_entry); // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) - BLOCK_COMMENT("Entry:"); + BLOCK_COMMENT("Post-Push Entry:"); } // use fwd copy when (d-s) above_equal (count*size) @@ -1798,8 +2121,7 @@ class StubGenerator: public StubCodeGenerator { } { // UnsafeMemoryAccess page error: continue after unsafe access - bool add_entry = !is_oop && (!aligned || sizeof(jlong) == size); - UnsafeMemoryAccessMark umam(this, add_entry, true); + UnsafeMemoryAccessMark umam(this, add_extras, true); copy_memory(decorators, is_oop ? T_OBJECT : T_BYTE, aligned, s, d, count, -size); } if (is_oop) { @@ -1811,6 +2133,23 @@ class StubGenerator: public StubCodeGenerator { __ leave(); __ mov(r0, zr); // return 0 __ ret(lr); + + assert(entries.length() == expected_entry_count - 1, + "unexpected entries count %d", entries.length()); + + address end = __ pc(); + + if (add_extras) { + // retrieve the registered handler addresses + retrieve_unsafe_access_handlers(start, end, extras); + assert(extras.length() == extra_count, + "incorrect handlers count %d", extras.length()); + } + + // record the stub entry and end plus any no_push entry and/or + // extra handler addresses + store_archive_data(stub_id, start, end, entries_ptr, extras_ptr); + return start; } @@ -1864,6 +2203,27 @@ class StubGenerator: public StubCodeGenerator { ShouldNotReachHere(); } + // The normal stub provides a 2nd entry which omits the frame push + // for use when bailing out from a disjoint copy. + // Only some conjoint stubs generate a 2nd entry + int entry_count = StubInfo::entry_count(stub_id); + int expected_entry_count = (nopush_entry == nullptr ? 1 : 2); + GrowableArray
entries; + GrowableArray
*entries_ptr = (expected_entry_count == 1 ? nullptr : &entries); + assert(entry_count == expected_entry_count, + "expected entry count %d does not match declared entry count %d for stub %s", + expected_entry_count, entry_count, StubInfo::name(stub_id)); + address start = load_archive_data(stub_id, entries_ptr); + if (start != nullptr) { + assert(entries.length() + 1 == expected_entry_count, + "expected entry count %d does not match return entry count %d for stub %s", + expected_entry_count, entries.length() + 1, StubInfo::name(stub_id)); + if (nopush_entry != nullptr) { + *nopush_entry = entries.at(0); + } + return start; + } + Label L_load_element, L_store_element, L_do_card_marks, L_done, L_done_pop; // Input registers (after setup_arg_regs) @@ -1896,7 +2256,7 @@ class StubGenerator: public StubCodeGenerator { __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); // required for proper stackwalking of RuntimeStub frame @@ -1913,6 +2273,7 @@ class StubGenerator: public StubCodeGenerator { // Caller of this entry point must set up the argument registers. if (nopush_entry != nullptr) { *nopush_entry = __ pc(); + entries.append(*nopush_entry); BLOCK_COMMENT("Entry:"); } @@ -2010,6 +2371,8 @@ class StubGenerator: public StubCodeGenerator { __ leave(); __ ret(lr); + // record the stub entry and end plus any no_push entry + store_archive_data(stub_id, start, __ pc() , entries_ptr); return start; } @@ -2072,13 +2435,18 @@ class StubGenerator: public StubCodeGenerator { address int_copy_entry, address long_copy_entry) { StubId stub_id = StubId::stubgen_unsafe_arraycopy_id; - + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } Label L_long_aligned, L_int_aligned, L_short_aligned; Register s = c_rarg0, d = c_rarg1, count = c_rarg2; __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); // required for proper stackwalking of RuntimeStub frame // bump this on entry, not on exit: @@ -2104,6 +2472,9 @@ class StubGenerator: public StubCodeGenerator { __ lsr(count, count, LogBytesPerLong); // size => long_count __ b(RuntimeAddress(long_copy_entry)); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -2125,7 +2496,12 @@ class StubGenerator: public StubCodeGenerator { address int_copy_entry, address oop_copy_entry, address long_copy_entry, address checkcast_copy_entry) { StubId stub_id = StubId::stubgen_generic_arraycopy_id; - + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } Label L_failed, L_objArray; Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs; @@ -2144,7 +2520,7 @@ class StubGenerator: public StubCodeGenerator { StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); // required for proper stackwalking of RuntimeStub frame @@ -2383,6 +2759,9 @@ class StubGenerator: public StubCodeGenerator { __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -2427,10 +2806,15 @@ class StubGenerator: public StubCodeGenerator { default: ShouldNotReachHere(); }; - + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); BLOCK_COMMENT("Entry:"); @@ -2563,15 +2947,32 @@ class StubGenerator: public StubCodeGenerator { __ bind(L_exit2); __ leave(); __ ret(lr); + + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address generate_unsafecopy_common_error_exit() { - address start_pc = __ pc(); + StubId stub_id = StubId::stubgen_unsafecopy_common_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); + StubCodeMark mark(this, stub_id); + start = __ pc(); __ leave(); __ mov(r0, 0); __ ret(lr); - return start_pc; + + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + + return start; } // @@ -2589,13 +2990,28 @@ class StubGenerator: public StubCodeGenerator { // c_rarg2 - byte value // address generate_unsafe_setmemory() { + StubId stub_id = StubId::stubgen_unsafe_setmemory_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + // we expect one set of extra unsafememory access handler entries + GrowableArray
extras; + int extra_count = 1 * UnsafeMemoryAccess::COLUMN_COUNT; + address start = load_archive_data(stub_id, nullptr, &extras); + if (start != nullptr) { + assert(extras.length() == extra_count, + "unexpected extra entry count %d", extras.length()); + register_unsafe_access_handlers(extras, 0, 1); + return start; + } + __ align(CodeEntryAlignment); - StubCodeMark mark(this, StubId::stubgen_unsafe_setmemory_id); - address start = __ pc(); + StubCodeMark mark(this, stub_id); + start = __ pc(); Register dest = c_rarg0, count = c_rarg1, value = c_rarg2; Label tail; + { UnsafeMemoryAccessMark umam(this, true, false); __ enter(); // required for proper stackwalking of RuntimeStub frame @@ -2679,6 +3095,17 @@ class StubGenerator: public StubCodeGenerator { __ bind(finished); __ leave(); __ ret(lr); + // have to exit the block and destroy the UnsafeMemoryAccessMark + // in order to retrieve the handler end address + } + + // install saved handler addresses in extras + address end = __ pc(); + retrieve_unsafe_access_handlers(start, end, extras); + assert(extras.length() == extra_count, + "incorrect handlers count %d", extras.length()); + // record the stub entry and end plus the extras + store_archive_data(stub_id, start, end, nullptr, &extras); return start; } @@ -2686,33 +3113,45 @@ class StubGenerator: public StubCodeGenerator { address generate_data_cache_writeback() { const Register line = c_rarg0; // address of line to write back - __ align(CodeEntryAlignment); - StubId stub_id = StubId::stubgen_data_cache_writeback_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); __ cache_wb(Address(line, 0)); __ leave(); __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address generate_data_cache_writeback_sync() { - const Register is_pre = c_rarg0; // pre or post sync - - __ align(CodeEntryAlignment); - StubId stub_id = StubId::stubgen_data_cache_writeback_sync_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + const Register is_pre = c_rarg0; // pre or post sync + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); // pre wbsync is a no-op // post wbsync translates to an sfence Label skip; - address start = __ pc(); + start = __ pc(); __ enter(); __ cbnz(is_pre, skip); __ cache_wbsync(false); @@ -2720,6 +3159,9 @@ class StubGenerator: public StubCodeGenerator { __ leave(); __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -2882,8 +3324,15 @@ class StubGenerator: public StubCodeGenerator { // c_rarg2 - sessionKe (key) in little endian int array // address generate_aescrypt_encryptBlock() { - __ align(CodeEntryAlignment); + assert(UseAES, "need AES cryptographic extension support"); StubId stub_id = StubId::stubgen_aescrypt_encryptBlock_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); const Register from = c_rarg0; // source array address @@ -2891,7 +3340,7 @@ class StubGenerator: public StubCodeGenerator { const Register key = c_rarg2; // key array address const Register keylen = rscratch1; - address start = __ pc(); + start = __ pc(); __ enter(); __ ldrw(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); @@ -2904,6 +3353,9 @@ class StubGenerator: public StubCodeGenerator { __ leave(); __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -2916,8 +3368,14 @@ class StubGenerator: public StubCodeGenerator { // address generate_aescrypt_decryptBlock() { assert(UseAES, "need AES cryptographic extension support"); - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_aescrypt_decryptBlock_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); Label L_doLast; @@ -2926,7 +3384,7 @@ class StubGenerator: public StubCodeGenerator { const Register key = c_rarg2; // key array address const Register keylen = rscratch1; - address start = __ pc(); + start = __ pc(); __ enter(); // required for proper stackwalking of RuntimeStub frame __ ldrw(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); @@ -2938,6 +3396,9 @@ class StubGenerator: public StubCodeGenerator { __ leave(); __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -2955,8 +3416,14 @@ class StubGenerator: public StubCodeGenerator { // address generate_cipherBlockChaining_encryptAESCrypt() { assert(UseAES, "need AES cryptographic extension support"); - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_cipherBlockChaining_encryptAESCrypt_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); Label L_loadkeys_44, L_loadkeys_52, L_aes_loop, L_rounds_44, L_rounds_52; @@ -2969,7 +3436,7 @@ class StubGenerator: public StubCodeGenerator { const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) const Register keylen = rscratch1; - address start = __ pc(); + start = __ pc(); __ enter(); @@ -3043,6 +3510,9 @@ class StubGenerator: public StubCodeGenerator { __ leave(); __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -3060,8 +3530,14 @@ class StubGenerator: public StubCodeGenerator { // address generate_cipherBlockChaining_decryptAESCrypt() { assert(UseAES, "need AES cryptographic extension support"); - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_cipherBlockChaining_decryptAESCrypt_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); Label L_loadkeys_44, L_loadkeys_52, L_aes_loop, L_rounds_44, L_rounds_52; @@ -3074,7 +3550,7 @@ class StubGenerator: public StubCodeGenerator { const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) const Register keylen = rscratch1; - address start = __ pc(); + start = __ pc(); __ enter(); @@ -3152,6 +3628,9 @@ class StubGenerator: public StubCodeGenerator { __ leave(); __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -3188,6 +3667,13 @@ class StubGenerator: public StubCodeGenerator { // r0 - input length // address generate_counterMode_AESCrypt() { + StubId stub_id = StubId::stubgen_counterMode_AESCrypt_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } const Register in = c_rarg0; const Register out = c_rarg1; const Register key = c_rarg2; @@ -3248,9 +3734,8 @@ class StubGenerator: public StubCodeGenerator { // Wide bulk encryption of whole blocks. __ align(CodeEntryAlignment); - StubId stub_id = StubId::stubgen_counterMode_AESCrypt_id; StubCodeMark mark(this, stub_id); - const address start = __ pc(); + start = __ pc(); __ enter(); Label DONE, CTR_large_block, large_block_return; @@ -3435,6 +3920,9 @@ class StubGenerator: public StubCodeGenerator { __ strw(used, Address(used_ptr)); __ b(large_block_return); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -3451,11 +3939,16 @@ class StubGenerator: public StubCodeGenerator { // return - number of processed bytes address generate_galoisCounterMode_AESCrypt() { Label ghash_polynomial; // local data generated after code - - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_galoisCounterMode_AESCrypt_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); const Register in = c_rarg0; @@ -3567,6 +4060,9 @@ class StubGenerator: public StubCodeGenerator { // 128-bit vector __ emit_int64(0x87); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -3685,10 +4181,16 @@ class StubGenerator: public StubCodeGenerator { default: ShouldNotReachHere(); } + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Register buf = c_rarg0; Register state = c_rarg1; @@ -3815,6 +4317,9 @@ class StubGenerator: public StubCodeGenerator { __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -3838,11 +4343,16 @@ class StubGenerator: public StubCodeGenerator { default: ShouldNotReachHere(); } - + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Register buf = c_rarg0; Register state = c_rarg1; @@ -3919,6 +4429,9 @@ class StubGenerator: public StubCodeGenerator { __ emit_int32(0x8f1bbcdc); __ emit_int32(0xca62c1d6); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -3943,30 +4456,15 @@ class StubGenerator: public StubCodeGenerator { default: ShouldNotReachHere(); } - - static const uint32_t round_consts[64] = { - 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, - 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, - 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, - 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, - 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, - 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, - 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, - 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, - 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, - 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, - 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, - 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, - 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, - 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, - 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, - 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2, - }; - + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } __ align(CodeEntryAlignment); - StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Register buf = c_rarg0; Register state = c_rarg1; @@ -3987,7 +4485,7 @@ class StubGenerator: public StubCodeGenerator { // t1 == v7 // load 16 keys to v16..v31 - __ lea(rscratch1, ExternalAddress((address)round_consts)); + __ lea(rscratch1, ExternalAddress((address)_sha256_round_consts)); __ ld1(v16, v17, v18, v19, __ T4S, __ post(rscratch1, 64)); __ ld1(v20, v21, v22, v23, __ T4S, __ post(rscratch1, 64)); __ ld1(v24, v25, v26, v27, __ T4S, __ post(rscratch1, 64)); @@ -4048,6 +4546,9 @@ class StubGenerator: public StubCodeGenerator { __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -4099,41 +4600,15 @@ class StubGenerator: public StubCodeGenerator { default: ShouldNotReachHere(); } - - static const uint64_t round_consts[80] = { - 0x428A2F98D728AE22L, 0x7137449123EF65CDL, 0xB5C0FBCFEC4D3B2FL, - 0xE9B5DBA58189DBBCL, 0x3956C25BF348B538L, 0x59F111F1B605D019L, - 0x923F82A4AF194F9BL, 0xAB1C5ED5DA6D8118L, 0xD807AA98A3030242L, - 0x12835B0145706FBEL, 0x243185BE4EE4B28CL, 0x550C7DC3D5FFB4E2L, - 0x72BE5D74F27B896FL, 0x80DEB1FE3B1696B1L, 0x9BDC06A725C71235L, - 0xC19BF174CF692694L, 0xE49B69C19EF14AD2L, 0xEFBE4786384F25E3L, - 0x0FC19DC68B8CD5B5L, 0x240CA1CC77AC9C65L, 0x2DE92C6F592B0275L, - 0x4A7484AA6EA6E483L, 0x5CB0A9DCBD41FBD4L, 0x76F988DA831153B5L, - 0x983E5152EE66DFABL, 0xA831C66D2DB43210L, 0xB00327C898FB213FL, - 0xBF597FC7BEEF0EE4L, 0xC6E00BF33DA88FC2L, 0xD5A79147930AA725L, - 0x06CA6351E003826FL, 0x142929670A0E6E70L, 0x27B70A8546D22FFCL, - 0x2E1B21385C26C926L, 0x4D2C6DFC5AC42AEDL, 0x53380D139D95B3DFL, - 0x650A73548BAF63DEL, 0x766A0ABB3C77B2A8L, 0x81C2C92E47EDAEE6L, - 0x92722C851482353BL, 0xA2BFE8A14CF10364L, 0xA81A664BBC423001L, - 0xC24B8B70D0F89791L, 0xC76C51A30654BE30L, 0xD192E819D6EF5218L, - 0xD69906245565A910L, 0xF40E35855771202AL, 0x106AA07032BBD1B8L, - 0x19A4C116B8D2D0C8L, 0x1E376C085141AB53L, 0x2748774CDF8EEB99L, - 0x34B0BCB5E19B48A8L, 0x391C0CB3C5C95A63L, 0x4ED8AA4AE3418ACBL, - 0x5B9CCA4F7763E373L, 0x682E6FF3D6B2B8A3L, 0x748F82EE5DEFB2FCL, - 0x78A5636F43172F60L, 0x84C87814A1F0AB72L, 0x8CC702081A6439ECL, - 0x90BEFFFA23631E28L, 0xA4506CEBDE82BDE9L, 0xBEF9A3F7B2C67915L, - 0xC67178F2E372532BL, 0xCA273ECEEA26619CL, 0xD186B8C721C0C207L, - 0xEADA7DD6CDE0EB1EL, 0xF57D4F7FEE6ED178L, 0x06F067AA72176FBAL, - 0x0A637DC5A2C898A6L, 0x113F9804BEF90DAEL, 0x1B710B35131C471BL, - 0x28DB77F523047D84L, 0x32CAAB7B40C72493L, 0x3C9EBE0A15C9BEBCL, - 0x431D67C49C100D4CL, 0x4CC5D4BECB3E42B6L, 0x597F299CFC657E2AL, - 0x5FCB6FAB3AD6FAECL, 0x6C44198C4A475817L - }; - + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } __ align(CodeEntryAlignment); - StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Register buf = c_rarg0; Register state = c_rarg1; @@ -4151,7 +4626,7 @@ class StubGenerator: public StubCodeGenerator { __ ld1(v8, v9, v10, v11, __ T2D, state); // load first 4 round constants - __ lea(rscratch1, ExternalAddress((address)round_consts)); + __ lea(rscratch1, ExternalAddress((address)_sha512_round_consts)); __ ld1(v20, v21, v22, v23, __ T2D, __ post(rscratch1, 64)); __ BIND(sha512_loop); @@ -4236,6 +4711,9 @@ class StubGenerator: public StubCodeGenerator { __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -4349,22 +4827,15 @@ class StubGenerator: public StubCodeGenerator { default: ShouldNotReachHere(); } - - static const uint64_t round_consts[24] = { - 0x0000000000000001L, 0x0000000000008082L, 0x800000000000808AL, - 0x8000000080008000L, 0x000000000000808BL, 0x0000000080000001L, - 0x8000000080008081L, 0x8000000000008009L, 0x000000000000008AL, - 0x0000000000000088L, 0x0000000080008009L, 0x000000008000000AL, - 0x000000008000808BL, 0x800000000000008BL, 0x8000000000008089L, - 0x8000000000008003L, 0x8000000000008002L, 0x8000000000000080L, - 0x000000000000800AL, 0x800000008000000AL, 0x8000000080008081L, - 0x8000000000008080L, 0x0000000080000001L, 0x8000000080008008L - }; - + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } __ align(CodeEntryAlignment); - StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Register buf = c_rarg0; Register state = c_rarg1; @@ -4396,7 +4867,7 @@ class StubGenerator: public StubCodeGenerator { __ movw(rscratch2, 24); // load round_constants base - __ lea(rscratch1, ExternalAddress((address) round_consts)); + __ lea(rscratch1, ExternalAddress((address) _sha3_round_consts)); // load input __ ld1(v25, v26, v27, v28, __ T8B, __ post(buf, 32)); @@ -4488,6 +4959,9 @@ class StubGenerator: public StubCodeGenerator { __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -4495,22 +4969,18 @@ class StubGenerator: public StubCodeGenerator { // c_rarg0 - long[] state0 // c_rarg1 - long[] state1 address generate_double_keccak() { - static const uint64_t round_consts[24] = { - 0x0000000000000001L, 0x0000000000008082L, 0x800000000000808AL, - 0x8000000080008000L, 0x000000000000808BL, 0x0000000080000001L, - 0x8000000080008081L, 0x8000000000008009L, 0x000000000000008AL, - 0x0000000000000088L, 0x0000000080008009L, 0x000000008000000AL, - 0x000000008000808BL, 0x800000000000008BL, 0x8000000000008089L, - 0x8000000000008003L, 0x8000000000008002L, 0x8000000000000080L, - 0x000000000000800AL, 0x800000008000000AL, 0x8000000080008081L, - 0x8000000000008080L, 0x0000000080000001L, 0x8000000080008008L - }; - + StubId stub_id = StubId::stubgen_double_keccak_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } // Implements the double_keccak() method of the // sun.secyrity.provider.SHA3Parallel class __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "double_keccak"); - address start = __ pc(); + StubCodeMark mark(this, stub_id); + start = __ pc(); __ enter(); Register state0 = c_rarg0; @@ -4546,7 +5016,7 @@ class StubGenerator: public StubCodeGenerator { __ movw(rscratch2, 24); // load round_constants base - __ lea(rscratch1, ExternalAddress((address) round_consts)); + __ lea(rscratch1, ExternalAddress((address) _double_keccak_round_consts)); __ BIND(rounds24_loop); __ subw(rscratch2, rscratch2, 1); @@ -4578,6 +5048,9 @@ class StubGenerator: public StubCodeGenerator { __ mov(r0, zr); // return 0 __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -4611,11 +5084,17 @@ class StubGenerator: public StubCodeGenerator { // vectors write their first lane back to the keystream buffer, followed // by the second lane from all vectors and so on. address generate_chacha20Block_blockpar() { + StubId stub_id = StubId::stubgen_chacha20Block_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } Label L_twoRounds, L_cc20_const; __ align(CodeEntryAlignment); - StubId stub_id = StubId::stubgen_chacha20Block_id; StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); int i, j; @@ -4770,6 +5249,9 @@ class StubGenerator: public StubCodeGenerator { __ emit_int64(0x0605040702010003UL); __ emit_int64(0x0E0D0C0F0A09080BUL); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -5258,11 +5740,16 @@ class StubGenerator: public StubCodeGenerator { // coeffs (short[256]) = c_rarg0 // ntt_zetas (short[256]) = c_rarg1 address generate_kyberNtt() { - - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_kyberNtt_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); const Register coeffs = c_rarg0; @@ -5486,6 +5973,9 @@ class StubGenerator: public StubCodeGenerator { __ mov(r0, zr); // return 0 __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -5496,11 +5986,16 @@ class StubGenerator: public StubCodeGenerator { // coeffs (short[256]) = c_rarg0 // ntt_zetas (short[256]) = c_rarg1 address generate_kyberInverseNtt() { - - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_kyberInverseNtt_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); const Register coeffs = c_rarg0; @@ -5770,6 +6265,9 @@ class StubGenerator: public StubCodeGenerator { __ mov(r0, zr); // return 0 __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -5783,11 +6281,16 @@ class StubGenerator: public StubCodeGenerator { // nttb (short[256]) = c_rarg2 // zetas (short[128]) = c_rarg3 address generate_kyberNttMult() { - - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_kyberNttMult_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); const Register result = c_rarg0; @@ -5889,6 +6392,9 @@ class StubGenerator: public StubCodeGenerator { __ mov(r0, zr); // return 0 __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -5900,11 +6406,16 @@ class StubGenerator: public StubCodeGenerator { // a (short[256]) = c_rarg1 // b (short[256]) = c_rarg2 address generate_kyberAddPoly_2() { - - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_kyberAddPoly_2_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); const Register result = c_rarg0; @@ -5973,6 +6484,9 @@ class StubGenerator: public StubCodeGenerator { __ mov(r0, zr); // return 0 __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -5985,11 +6499,16 @@ class StubGenerator: public StubCodeGenerator { // b (short[256]) = c_rarg2 // c (short[256]) = c_rarg3 address generate_kyberAddPoly_3() { - - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_kyberAddPoly_3_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); const Register result = c_rarg0; @@ -6072,6 +6591,9 @@ class StubGenerator: public StubCodeGenerator { __ mov(r0, zr); // return 0 __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -6092,12 +6614,18 @@ class StubGenerator: public StubCodeGenerator { // parsed (short[]) = c_rarg2 // parsedLength = c_rarg3 address generate_kyber12To16() { + StubId stub_id = StubId::stubgen_kyber12To16_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } Label L_F00, L_loop; __ align(CodeEntryAlignment); - StubId stub_id = StubId::stubgen_kyber12To16_id; StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); const Register condensed = c_rarg0; @@ -6225,6 +6753,9 @@ class StubGenerator: public StubCodeGenerator { __ emit_int64(0x0f000f000f000f00); __ emit_int64(0x0f000f000f000f00); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -6234,11 +6765,16 @@ class StubGenerator: public StubCodeGenerator { // // coeffs (short[256]) = c_rarg0 address generate_kyberBarrettReduce() { - - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_kyberBarrettReduce_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); const Register coeffs = c_rarg0; @@ -6318,6 +6854,9 @@ class StubGenerator: public StubCodeGenerator { __ mov(r0, zr); // return 0 __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -6481,11 +7020,16 @@ class StubGenerator: public StubCodeGenerator { // coeffs (int[256]) = c_rarg0 // zetas (int[256]) = c_rarg1 address generate_dilithiumAlmostNtt() { - - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_dilithiumAlmostNtt_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); const Register coeffs = c_rarg0; @@ -6596,6 +7140,9 @@ class StubGenerator: public StubCodeGenerator { __ mov(r0, zr); // return 0 __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -6688,11 +7235,16 @@ class StubGenerator: public StubCodeGenerator { // coeffs (int[256]) = c_rarg0 // zetas (int[256]) = c_rarg1 address generate_dilithiumAlmostInverseNtt() { - - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_dilithiumAlmostInverseNtt_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); const Register coeffs = c_rarg0; @@ -6788,6 +7340,9 @@ class StubGenerator: public StubCodeGenerator { __ mov(r0, zr); // return 0 __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -6801,11 +7356,16 @@ class StubGenerator: public StubCodeGenerator { // poly1 (int[256]) = c_rarg1 // poly2 (int[256]) = c_rarg2 address generate_dilithiumNttMult() { - - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_dilithiumNttMult_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); Label L_loop; @@ -6854,6 +7414,9 @@ class StubGenerator: public StubCodeGenerator { __ mov(r0, zr); // return 0 __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -6865,11 +7428,16 @@ class StubGenerator: public StubCodeGenerator { // coeffs (int[256]) = c_rarg0 // constant (int) = c_rarg1 address generate_dilithiumMontMulByConstant() { - - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_dilithiumMontMulByConstant_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); Label L_loop; @@ -6915,6 +7483,9 @@ class StubGenerator: public StubCodeGenerator { __ mov(r0, zr); // return 0 __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -6929,11 +7500,16 @@ class StubGenerator: public StubCodeGenerator { // twoGamma2 (int) = c_rarg3 // multiplier (int) = c_rarg4 address generate_dilithiumDecomposePoly() { - - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_dilithiumDecomposePoly_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Label L_loop; const Register input = c_rarg0; @@ -7073,6 +7649,9 @@ class StubGenerator: public StubCodeGenerator { __ mov(r0, zr); // return 0 __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -7212,21 +7791,15 @@ class StubGenerator: public StubCodeGenerator { default: ShouldNotReachHere(); } - - static const uint64_t round_consts[24] = { - 0x0000000000000001L, 0x0000000000008082L, 0x800000000000808AL, - 0x8000000080008000L, 0x000000000000808BL, 0x0000000080000001L, - 0x8000000080008081L, 0x8000000000008009L, 0x000000000000008AL, - 0x0000000000000088L, 0x0000000080008009L, 0x000000008000000AL, - 0x000000008000808BL, 0x800000000000008BL, 0x8000000000008089L, - 0x8000000000008003L, 0x8000000000008002L, 0x8000000000000080L, - 0x000000000000800AL, 0x800000008000000AL, 0x8000000080008081L, - 0x8000000000008080L, 0x0000000080000001L, 0x8000000080008008L - }; - + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Register buf = c_rarg0; Register state = c_rarg1; @@ -7378,7 +7951,7 @@ class StubGenerator: public StubCodeGenerator { __ fmovs(v1, 1.0); // exact representation __ str(buf, Address(sp, 16)); - __ lea(tmp3, ExternalAddress((address) round_consts)); + __ lea(tmp3, ExternalAddress((address) _sha3_round_consts)); __ BIND(loop_body); keccak_round_gpr(can_use_fp, can_use_r18, tmp3, @@ -7433,6 +8006,9 @@ class StubGenerator: public StubCodeGenerator { __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -7449,12 +8025,17 @@ class StubGenerator: public StubCodeGenerator { */ address generate_updateBytesCRC32() { assert(UseCRC32Intrinsics, "what are we doing here?"); - - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_updateBytesCRC32_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); const Register crc = c_rarg0; // crc const Register buf = c_rarg1; // source java byte array address @@ -7474,6 +8055,9 @@ class StubGenerator: public StubCodeGenerator { __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -7491,12 +8075,17 @@ class StubGenerator: public StubCodeGenerator { */ address generate_updateBytesCRC32C() { assert(UseCRC32CIntrinsics, "what are we doing here?"); - - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_updateBytesCRC32C_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); const Register crc = c_rarg0; // crc const Register buf = c_rarg1; // source java byte array address @@ -7516,6 +8105,9 @@ class StubGenerator: public StubCodeGenerator { __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -7531,10 +8123,16 @@ class StubGenerator: public StubCodeGenerator { * c_rarg0 - int adler result */ address generate_updateBytesAdler32() { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_updateBytesAdler32_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Label L_simple_by1_loop, L_nmax, L_nmax_loop, L_by16, L_by16_loop, L_by1_loop, L_do_mod, L_combine, L_by1; @@ -7702,6 +8300,9 @@ class StubGenerator: public StubCodeGenerator { __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -7753,11 +8354,17 @@ class StubGenerator: public StubCodeGenerator { * c_rarg4 - z address */ address generate_multiplyToLen() { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_multiplyToLen_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); const Register x = r0; const Register xlen = r1; const Register y = r2; @@ -7779,6 +8386,9 @@ class StubGenerator: public StubCodeGenerator { __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -7786,10 +8396,16 @@ class StubGenerator: public StubCodeGenerator { // squareToLen algorithm for sizes 1..127 described in java code works // faster than multiply_to_len on some CPUs and slower on others, but // multiply_to_len shows a bit better overall results - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_squareToLen_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); const Register x = r0; const Register xlen = r1; @@ -7816,15 +8432,25 @@ class StubGenerator: public StubCodeGenerator { __ pop(spilled_regs, sp); __ leave(); __ ret(lr); + + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address generate_mulAdd() { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_mulAdd_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); const Register out = r0; const Register in = r1; @@ -7838,6 +8464,9 @@ class StubGenerator: public StubCodeGenerator { __ leave(); __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -7851,10 +8480,16 @@ class StubGenerator: public StubCodeGenerator { // c_rarg4 - numIter // address generate_bigIntegerRightShift() { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_bigIntegerRightShiftWorker_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Label ShiftSIMDLoop, ShiftTwoLoop, ShiftThree, ShiftTwo, ShiftOne, Exit; @@ -7961,6 +8596,9 @@ class StubGenerator: public StubCodeGenerator { __ BIND(Exit); __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -7974,10 +8612,16 @@ class StubGenerator: public StubCodeGenerator { // c_rarg4 - numIter // address generate_bigIntegerLeftShift() { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_bigIntegerLeftShiftWorker_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Label ShiftSIMDLoop, ShiftTwoLoop, ShiftThree, ShiftTwo, ShiftOne, Exit; @@ -8072,10 +8716,25 @@ class StubGenerator: public StubCodeGenerator { __ BIND(Exit); __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address generate_count_positives(address &count_positives_long) { + StubId stub_id = StubId::stubgen_count_positives_id; + GrowableArray
entries; + int entry_count = StubInfo::entry_count(stub_id); + // We have an extra entry for count_positives_long. + assert(entry_count == 2, "sanity check"); + address start = load_archive_data(stub_id, &entries); + if (start != nullptr) { + assert(entries.length() == 1, + "unexpected extra entry count %d", entries.length()); + count_positives_long = entries.at(0); + return start; + } const u1 large_loop_size = 64; const uint64_t UPPER_BIT_MASK=0x8080808080808080; int dcache_line = VM_Version::dcache_line_size(); @@ -8083,8 +8742,6 @@ class StubGenerator: public StubCodeGenerator { Register ary1 = r1, len = r2, result = r0; __ align(CodeEntryAlignment); - - StubId stub_id = StubId::stubgen_count_positives_id; StubCodeMark mark(this, stub_id); address entry = __ pc(); @@ -8127,6 +8784,7 @@ class StubGenerator: public StubCodeGenerator { const RegSet spilled_regs = RegSet::range(tmp1, tmp5) + tmp6; count_positives_long = __ pc(); // 2nd entry point + entries.append(count_positives_long); __ enter(); @@ -8241,6 +8899,9 @@ class StubGenerator: public StubCodeGenerator { __ sub(result, result, len); __ ret(lr); + // record the stub entry and end plus the extra entry + store_archive_data(stub_id, entry, __ pc(), &entries); + return entry; } @@ -8331,6 +8992,13 @@ class StubGenerator: public StubCodeGenerator { // r3-r5 are reserved temporary registers // Clobbers: v0-v7 when UseSIMDForArrayEquals, rscratch1, rscratch2 address generate_large_array_equals() { + StubId stub_id = StubId::stubgen_large_array_equals_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } Register a1 = r1, a2 = r2, result = r0, cnt1 = r10, tmp1 = rscratch1, tmp2 = rscratch2, tmp3 = r3, tmp4 = r4, tmp5 = r5, tmp6 = r11, tmp7 = r12, tmp8 = r13; @@ -8346,7 +9014,6 @@ class StubGenerator: public StubCodeGenerator { __ align(CodeEntryAlignment); - StubId stub_id = StubId::stubgen_large_array_equals_id; StubCodeMark mark(this, stub_id); address entry = __ pc(); @@ -8421,6 +9088,10 @@ class StubGenerator: public StubCodeGenerator { __ bind(NOT_EQUAL_NO_POP); __ leave(); __ ret(lr); + + // record the stub entry and end + store_archive_data(stub_id, entry, __ pc()); + return entry; } @@ -8429,6 +9100,33 @@ class StubGenerator: public StubCodeGenerator { // cnt = r2 - elements count // Clobbers: v0-v13, rscratch1, rscratch2 address generate_large_arrays_hashcode(BasicType eltype) { + StubId stub_id; + switch (eltype) { + case T_BOOLEAN: + stub_id = StubId::stubgen_large_arrays_hashcode_boolean_id; + break; + case T_BYTE: + stub_id = StubId::stubgen_large_arrays_hashcode_byte_id; + break; + case T_CHAR: + stub_id = StubId::stubgen_large_arrays_hashcode_char_id; + break; + case T_SHORT: + stub_id = StubId::stubgen_large_arrays_hashcode_short_id; + break; + case T_INT: + stub_id = StubId::stubgen_large_arrays_hashcode_int_id; + break; + default: + stub_id = StubId::NO_STUBID; + ShouldNotReachHere(); + }; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } const Register result = r0, ary = r1, cnt = r2; const FloatRegister vdata0 = v3, vdata1 = v2, vdata2 = v1, vdata3 = v0; const FloatRegister vmul0 = v4, vmul1 = v5, vmul2 = v6, vmul3 = v7; @@ -8472,28 +9170,6 @@ class StubGenerator: public StubCodeGenerator { __ align(CodeEntryAlignment); - StubId stub_id; - switch (eltype) { - case T_BOOLEAN: - stub_id = StubId::stubgen_large_arrays_hashcode_boolean_id; - break; - case T_BYTE: - stub_id = StubId::stubgen_large_arrays_hashcode_byte_id; - break; - case T_CHAR: - stub_id = StubId::stubgen_large_arrays_hashcode_char_id; - break; - case T_SHORT: - stub_id = StubId::stubgen_large_arrays_hashcode_short_id; - break; - case T_INT: - stub_id = StubId::stubgen_large_arrays_hashcode_int_id; - break; - default: - stub_id = StubId::NO_STUBID; - ShouldNotReachHere(); - }; - StubCodeMark mark(this, stub_id); address entry = __ pc(); @@ -8728,19 +9404,32 @@ class StubGenerator: public StubCodeGenerator { __ leave(); __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, entry, __ pc()); + return entry; } address generate_dsin_dcos(bool isCos) { - __ align(CodeEntryAlignment); StubId stub_id = (isCos ? StubId::stubgen_dcos_id : StubId::stubgen_dsin_id); + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ generate_dsin_dcos(isCos, (address)StubRoutines::aarch64::_npio2_hw, (address)StubRoutines::aarch64::_two_over_pi, (address)StubRoutines::aarch64::_pio2, (address)StubRoutines::aarch64::_dsin_coef, (address)StubRoutines::aarch64::_dcos_coef); + + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -8784,8 +9473,14 @@ class StubGenerator: public StubCodeGenerator { // r10 = tmp1 // r11 = tmp2 address generate_compare_long_string_different_encoding(bool isLU) { - __ align(CodeEntryAlignment); StubId stub_id = (isLU ? StubId::stubgen_compare_long_string_LU_id : StubId::stubgen_compare_long_string_UL_id); + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); address entry = __ pc(); Label SMALL_LOOP, TAIL, TAIL_LOAD_16, LOAD_LAST, DIFF1, DIFF2, @@ -8887,20 +9582,34 @@ class StubGenerator: public StubCodeGenerator { __ subw(result, tmp1, rscratch1); __ bind(DONE); __ ret(lr); - return entry; + + // record the stub entry and end + store_archive_data(stub_id, entry, __ pc()); + + return entry; } // r0 = input (float16) // v0 = result (float) // v1 = temporary float register address generate_float16ToFloat() { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_hf2f_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); address entry = __ pc(); BLOCK_COMMENT("Entry:"); __ flt16_to_flt(v0, r0, v1); __ ret(lr); + + // record the stub entry and end + store_archive_data(stub_id, entry, __ pc()); + return entry; } @@ -8908,24 +9617,40 @@ class StubGenerator: public StubCodeGenerator { // r0 = result (float16) // v1 = temporary float register address generate_floatToFloat16() { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_f2hf_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); address entry = __ pc(); BLOCK_COMMENT("Entry:"); __ flt_to_flt16(r0, v0, v1); __ ret(lr); + + // record the stub entry and end + store_archive_data(stub_id, entry, __ pc()); + return entry; } address generate_method_entry_barrier() { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_method_entry_barrier_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); Label deoptimize_label; - address start = __ pc(); + start = __ pc(); BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler(); @@ -8974,6 +9699,9 @@ class StubGenerator: public StubCodeGenerator { __ mov(sp, rscratch1); __ br(rscratch2); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -8985,8 +9713,14 @@ class StubGenerator: public StubCodeGenerator { // r10 = tmp1 // r11 = tmp2 address generate_compare_long_string_same_encoding(bool isLL) { - __ align(CodeEntryAlignment); StubId stub_id = (isLL ? StubId::stubgen_compare_long_string_LL_id : StubId::stubgen_compare_long_string_UU_id); + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); address entry = __ pc(); Register result = r0, str1 = r1, cnt1 = r2, str2 = r3, cnt2 = r4, @@ -9094,6 +9828,10 @@ class StubGenerator: public StubCodeGenerator { __ bind(LENGTH_DIFF); __ ret(lr); + + // record the stub entry and end + store_archive_data(stub_id, entry, __ pc()); + return entry; } @@ -9125,8 +9863,14 @@ class StubGenerator: public StubCodeGenerator { case UU: stub_id = StubId::stubgen_compare_long_string_UU_id; break; default: ShouldNotReachHere(); } - + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } __ align(CodeEntryAlignment); + StubCodeMark mark(this, stub_id); address entry = __ pc(); Register result = r0, str1 = r1, cnt1 = r2, str2 = r3, cnt2 = r4, tmp1 = r10, tmp2 = r11; @@ -9161,8 +9905,6 @@ class StubGenerator: public StubCodeGenerator { ShouldNotReachHere(); \ } - StubCodeMark mark(this, stub_id); - __ mov(idx, 0); __ sve_whilelt(pgtmp1, mode == LL ? __ B : __ H, idx, cnt); @@ -9206,6 +9948,10 @@ class StubGenerator: public StubCodeGenerator { __ bind(DONE); __ ret(lr); #undef LOAD_PAIR + + // record the stub entry and end + store_archive_data(stub_id, entry, __ pc()); + return entry; } @@ -9267,6 +10013,12 @@ class StubGenerator: public StubCodeGenerator { stub_id = StubId::stubgen_string_indexof_linear_uu_id; } } + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); address entry = __ pc(); @@ -9535,6 +10287,10 @@ class StubGenerator: public StubCodeGenerator { __ BIND(DONE); __ pop(spilled_regs, sp); __ ret(lr); + + // record the stub entry and end + store_archive_data(stub_id, entry, __ pc()); + return entry; } @@ -9565,8 +10321,14 @@ class StubGenerator: public StubCodeGenerator { // v1 = loaded 8 bytes // Clobbers: r0, r1, r3, rscratch1, rflags, v0-v6 address generate_large_byte_array_inflate() { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_large_byte_array_inflate_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); address entry = __ pc(); Label LOOP, LOOP_START, LOOP_PRFM, LOOP_PRFM_START, DONE; @@ -9605,6 +10367,10 @@ class StubGenerator: public StubCodeGenerator { __ br(__ GE, LOOP); __ bind(DONE); __ ret(lr); + + // record the stub entry and end + store_archive_data(stub_id, entry, __ pc()); + return entry; } @@ -9620,7 +10386,7 @@ class StubGenerator: public StubCodeGenerator { * Output: * Updated state at c_rarg0 */ - address generate_ghash_processBlocks() { + address generate_ghash_processBlocks_small() { // Bafflingly, GCM uses little-endian for the byte order, but // big-endian for the bit order. For example, the polynomial 1 is // represented as the 16-byte string 80 00 00 00 | 12 bytes of 00. @@ -9632,11 +10398,17 @@ class StubGenerator: public StubCodeGenerator { // that) and keep the data in little-endian bit order through the // calculation, bit-reversing the inputs and outputs. - StubId stub_id = StubId::stubgen_ghash_processBlocks_id; + StubId stub_id = StubId::stubgen_ghash_processBlocks_small_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); Label polynomial; // local data generated at end of stub - __ align(CodeEntryAlignment); - address start = __ pc(); + start = __ pc(); Register state = c_rarg0; Register subkeyH = c_rarg1; @@ -9696,17 +10468,24 @@ class StubGenerator: public StubCodeGenerator { // 128-bit vector __ emit_int64(0x87); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } - address generate_ghash_processBlocks_wide() { - address small = generate_ghash_processBlocks(); - - StubId stub_id = StubId::stubgen_ghash_processBlocks_wide_id; - StubCodeMark mark(this, stub_id); + address generate_ghash_processBlocks(address small) { + StubId stub_id = StubId::stubgen_ghash_processBlocks_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } Label polynomial; // local data generated after stub __ align(CodeEntryAlignment); - address start = __ pc(); + StubCodeMark mark(this, stub_id); + start = __ pc(); Register state = c_rarg0; Register subkeyH = c_rarg1; @@ -9748,8 +10527,10 @@ class StubGenerator: public StubCodeGenerator { // 128-bit vector __ emit_int64(0x87); - return start; + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } void generate_base64_encode_simdround(Register src, Register dst, @@ -9800,26 +10581,16 @@ class StubGenerator: public StubCodeGenerator { */ address generate_base64_encodeBlock() { - static const char toBase64[64] = { - 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', - 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', - 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', - 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', - '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '/' - }; - - static const char toBase64URL[64] = { - 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', - 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', - 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', - 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', - '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-', '_' - }; - - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_base64_encodeBlock_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Register src = c_rarg0; // source array Register soff = c_rarg1; // source start offset @@ -9839,9 +10610,9 @@ class StubGenerator: public StubCodeGenerator { __ sub(length, send, soff); // load the codec base address - __ lea(codec, ExternalAddress((address) toBase64)); + __ lea(codec, ExternalAddress((address) _encodeBlock_toBase64)); __ cbz(isURL, ProcessData); - __ lea(codec, ExternalAddress((address) toBase64URL)); + __ lea(codec, ExternalAddress((address) _encodeBlock_toBase64URL)); __ BIND(ProcessData); @@ -9894,6 +10665,9 @@ class StubGenerator: public StubCodeGenerator { __ BIND(Exit); __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -10015,80 +10789,16 @@ class StubGenerator: public StubCodeGenerator { // on http://0x80.pl/articles/base64-simd-neon.html#encoding-quadwords, in section // titled "Base64 decoding". - // Non-SIMD lookup tables are mostly dumped from fromBase64 array used in java.util.Base64, - // except the trailing character '=' is also treated illegal value in this intrinsic. That - // is java.util.Base64.fromBase64['='] = -2, while fromBase(URL)64ForNoSIMD['='] = 255 here. - static const uint8_t fromBase64ForNoSIMD[256] = { - 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, - 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, - 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 62u, 255u, 255u, 255u, 63u, - 52u, 53u, 54u, 55u, 56u, 57u, 58u, 59u, 60u, 61u, 255u, 255u, 255u, 255u, 255u, 255u, - 255u, 0u, 1u, 2u, 3u, 4u, 5u, 6u, 7u, 8u, 9u, 10u, 11u, 12u, 13u, 14u, - 15u, 16u, 17u, 18u, 19u, 20u, 21u, 22u, 23u, 24u, 25u, 255u, 255u, 255u, 255u, 255u, - 255u, 26u, 27u, 28u, 29u, 30u, 31u, 32u, 33u, 34u, 35u, 36u, 37u, 38u, 39u, 40u, - 41u, 42u, 43u, 44u, 45u, 46u, 47u, 48u, 49u, 50u, 51u, 255u, 255u, 255u, 255u, 255u, - 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, - 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, - 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, - 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, - 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, - 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, - 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, - 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, - }; - - static const uint8_t fromBase64URLForNoSIMD[256] = { - 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, - 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, - 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 62u, 255u, 255u, - 52u, 53u, 54u, 55u, 56u, 57u, 58u, 59u, 60u, 61u, 255u, 255u, 255u, 255u, 255u, 255u, - 255u, 0u, 1u, 2u, 3u, 4u, 5u, 6u, 7u, 8u, 9u, 10u, 11u, 12u, 13u, 14u, - 15u, 16u, 17u, 18u, 19u, 20u, 21u, 22u, 23u, 24u, 25u, 255u, 255u, 255u, 255u, 63u, - 255u, 26u, 27u, 28u, 29u, 30u, 31u, 32u, 33u, 34u, 35u, 36u, 37u, 38u, 39u, 40u, - 41u, 42u, 43u, 44u, 45u, 46u, 47u, 48u, 49u, 50u, 51u, 255u, 255u, 255u, 255u, 255u, - 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, - 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, - 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, - 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, - 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, - 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, - 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, - 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, - }; - - // A legal value of base64 code is in range [0, 127]. We need two lookups - // with tbl/tbx and combine them to get the decode data. The 1st table vector - // lookup use tbl, out of range indices are set to 0 in destination. The 2nd - // table vector lookup use tbx, out of range indices are unchanged in - // destination. Input [64..126] is mapped to index [65, 127] in second lookup. - // The value of index 64 is set to 0, so that we know that we already get the - // decoded data with the 1st lookup. - static const uint8_t fromBase64ForSIMD[128] = { - 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, - 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, - 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 62u, 255u, 255u, 255u, 63u, - 52u, 53u, 54u, 55u, 56u, 57u, 58u, 59u, 60u, 61u, 255u, 255u, 255u, 255u, 255u, 255u, - 0u, 255u, 0u, 1u, 2u, 3u, 4u, 5u, 6u, 7u, 8u, 9u, 10u, 11u, 12u, 13u, - 14u, 15u, 16u, 17u, 18u, 19u, 20u, 21u, 22u, 23u, 24u, 25u, 255u, 255u, 255u, 255u, - 255u, 255u, 26u, 27u, 28u, 29u, 30u, 31u, 32u, 33u, 34u, 35u, 36u, 37u, 38u, 39u, - 40u, 41u, 42u, 43u, 44u, 45u, 46u, 47u, 48u, 49u, 50u, 51u, 255u, 255u, 255u, 255u, - }; - - static const uint8_t fromBase64URLForSIMD[128] = { - 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, - 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, - 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 255u, 62u, 255u, 255u, - 52u, 53u, 54u, 55u, 56u, 57u, 58u, 59u, 60u, 61u, 255u, 255u, 255u, 255u, 255u, 255u, - 0u, 255u, 0u, 1u, 2u, 3u, 4u, 5u, 6u, 7u, 8u, 9u, 10u, 11u, 12u, 13u, - 14u, 15u, 16u, 17u, 18u, 19u, 20u, 21u, 22u, 23u, 24u, 25u, 255u, 255u, 255u, 255u, - 63u, 255u, 26u, 27u, 28u, 29u, 30u, 31u, 32u, 33u, 34u, 35u, 36u, 37u, 38u, 39u, - 40u, 41u, 42u, 43u, 44u, 45u, 46u, 47u, 48u, 49u, 50u, 51u, 255u, 255u, 255u, 255u, - }; - - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_base64_decodeBlock_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Register src = c_rarg0; // source array Register soff = c_rarg1; // source start offset @@ -10115,9 +10825,9 @@ class StubGenerator: public StubCodeGenerator { __ sub(length, send, soff); __ bfm(length, zr, 0, 1); - __ lea(nosimd_codec, ExternalAddress((address) fromBase64ForNoSIMD)); + __ lea(nosimd_codec, ExternalAddress((address) _decodeBlock_fromBase64ForNoSIMD)); __ cbz(isURL, ProcessData); - __ lea(nosimd_codec, ExternalAddress((address) fromBase64URLForNoSIMD)); + __ lea(nosimd_codec, ExternalAddress((address) _decodeBlock_fromBase64URLForNoSIMD)); __ BIND(ProcessData); __ mov(rscratch1, length); @@ -10162,9 +10872,9 @@ class StubGenerator: public StubCodeGenerator { __ cbzw(rscratch1, Exit); __ sub(length, length, 80); - __ lea(simd_codec, ExternalAddress((address) fromBase64ForSIMD)); + __ lea(simd_codec, ExternalAddress((address) _decodeBlock_fromBase64ForSIMD)); __ cbz(isURL, SIMDEnter); - __ lea(simd_codec, ExternalAddress((address) fromBase64URLForSIMD)); + __ lea(simd_codec, ExternalAddress((address) _decodeBlock_fromBase64URLForSIMD)); __ BIND(SIMDEnter); __ ld1(v0, v1, v2, v3, __ T16B, __ post(simd_codec, 64)); @@ -10197,24 +10907,50 @@ class StubGenerator: public StubCodeGenerator { __ leave(); __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } // Support for spin waits. address generate_spin_wait() { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_spin_wait_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ spin_wait(); __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } void generate_lookup_secondary_supers_table_stub() { StubId stub_id = StubId::stubgen_lookup_secondary_supers_table_id; + GrowableArray
entries; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == Klass::SECONDARY_SUPERS_TABLE_SIZE, "sanity check"); + address start = load_archive_data(stub_id, &entries); + if (start != nullptr) { + assert(entries.length() == Klass::SECONDARY_SUPERS_TABLE_SIZE - 1, + "unexpected extra entry count %d", entries.length()); + StubRoutines::_lookup_secondary_supers_table_stubs[0] = start; + for (int slot = 1; slot < Klass::SECONDARY_SUPERS_TABLE_SIZE; slot++) { + StubRoutines::_lookup_secondary_supers_table_stubs[slot] = entries.at(slot - 1); + } + return; + } + StubCodeMark mark(this, stub_id); const Register @@ -10229,7 +10965,13 @@ class StubGenerator: public StubCodeGenerator { vtemp = v0; for (int slot = 0; slot < Klass::SECONDARY_SUPERS_TABLE_SIZE; slot++) { - StubRoutines::_lookup_secondary_supers_table_stubs[slot] = __ pc(); + address next_entry = __ pc(); + StubRoutines::_lookup_secondary_supers_table_stubs[slot] = next_entry; + if (slot == 0) { + start = next_entry; + } else { + entries.append(next_entry); + } Label L_success; __ enter(); __ lookup_secondary_supers_table_const(r_sub_klass, r_super_klass, @@ -10239,14 +10981,21 @@ class StubGenerator: public StubCodeGenerator { __ leave(); __ ret(lr); } + // record the stub entry and end plus all the auxiliary entries + store_archive_data(stub_id, start, __ pc(), &entries); } // Slow path implementation for UseSecondarySupersTable. address generate_lookup_secondary_supers_table_slow_path_stub() { StubId stub_id = StubId::stubgen_lookup_secondary_supers_table_slow_path_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); - - address start = __ pc(); + start = __ pc(); const Register r_super_klass = r0, // argument r_array_base = r1, // argument @@ -10258,6 +11007,9 @@ class StubGenerator: public StubCodeGenerator { __ lookup_secondary_supers_table_slow_path(r_super_klass, r_array_base, r_array_index, r_bitmap, temp1, result); __ ret(lr); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -10397,14 +11149,43 @@ class StubGenerator: public StubCodeGenerator { if (! UseLSE) { return; } - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_atomic_entry_points_id; - StubCodeMark mark(this, stub_id); - address first_entry = __ pc(); + GrowableArray
entries; + int entry_count = StubInfo::entry_count(stub_id); + address start = load_archive_data(stub_id, &entries); + if (start != nullptr) { + assert(entries.length() == entry_count - 1, + "unexpected extra entry count %d", entries.length()); + aarch64_atomic_fetch_add_4_impl = (aarch64_atomic_stub_t)start; + int idx = 0; + aarch64_atomic_fetch_add_8_impl = (aarch64_atomic_stub_t)entries.at(idx++); + aarch64_atomic_fetch_add_4_relaxed_impl = (aarch64_atomic_stub_t)entries.at(idx++); + aarch64_atomic_fetch_add_8_relaxed_impl = (aarch64_atomic_stub_t)entries.at(idx++); + aarch64_atomic_xchg_4_impl = (aarch64_atomic_stub_t)entries.at(idx++); + aarch64_atomic_xchg_8_impl = (aarch64_atomic_stub_t)entries.at(idx++); + aarch64_atomic_cmpxchg_1_impl = (aarch64_atomic_stub_t)entries.at(idx++); + aarch64_atomic_cmpxchg_4_impl = (aarch64_atomic_stub_t)entries.at(idx++); + aarch64_atomic_cmpxchg_8_impl = (aarch64_atomic_stub_t)entries.at(idx++); + aarch64_atomic_cmpxchg_1_relaxed_impl = (aarch64_atomic_stub_t)entries.at(idx++); + aarch64_atomic_cmpxchg_4_relaxed_impl = (aarch64_atomic_stub_t)entries.at(idx++); + aarch64_atomic_cmpxchg_8_relaxed_impl = (aarch64_atomic_stub_t)entries.at(idx++); + aarch64_atomic_cmpxchg_4_release_impl = (aarch64_atomic_stub_t)entries.at(idx++); + aarch64_atomic_cmpxchg_8_release_impl = (aarch64_atomic_stub_t)entries.at(idx++); + aarch64_atomic_cmpxchg_4_seq_cst_impl = (aarch64_atomic_stub_t)entries.at(idx++); + aarch64_atomic_cmpxchg_8_seq_cst_impl = (aarch64_atomic_stub_t)entries.at(idx++); + assert(idx == entries.length(), "sanity!"); + return; + } + __ align(CodeEntryAlignment); + StubCodeMark mark(this, stub_id); + start = __ pc(); + address end; + { // ADD, memory_order_conservative AtomicStubMark mark_fetch_add_4(_masm, &aarch64_atomic_fetch_add_4_impl); gen_ldadd_entry(Assembler::word, memory_order_conservative); + AtomicStubMark mark_fetch_add_8(_masm, &aarch64_atomic_fetch_add_8_impl); gen_ldadd_entry(Assembler::xword, memory_order_conservative); @@ -10412,6 +11193,7 @@ class StubGenerator: public StubCodeGenerator { AtomicStubMark mark_fetch_add_4_relaxed (_masm, &aarch64_atomic_fetch_add_4_relaxed_impl); gen_ldadd_entry(MacroAssembler::word, memory_order_relaxed); + AtomicStubMark mark_fetch_add_8_relaxed (_masm, &aarch64_atomic_fetch_add_8_relaxed_impl); gen_ldadd_entry(MacroAssembler::xword, memory_order_relaxed); @@ -10419,14 +11201,17 @@ class StubGenerator: public StubCodeGenerator { // XCHG, memory_order_conservative AtomicStubMark mark_xchg_4(_masm, &aarch64_atomic_xchg_4_impl); gen_swpal_entry(Assembler::word); - AtomicStubMark mark_xchg_8_impl(_masm, &aarch64_atomic_xchg_8_impl); + + AtomicStubMark mark_xchg_8(_masm, &aarch64_atomic_xchg_8_impl); gen_swpal_entry(Assembler::xword); // CAS, memory_order_conservative AtomicStubMark mark_cmpxchg_1(_masm, &aarch64_atomic_cmpxchg_1_impl); gen_cas_entry(MacroAssembler::byte, memory_order_conservative); + AtomicStubMark mark_cmpxchg_4(_masm, &aarch64_atomic_cmpxchg_4_impl); gen_cas_entry(MacroAssembler::word, memory_order_conservative); + AtomicStubMark mark_cmpxchg_8(_masm, &aarch64_atomic_cmpxchg_8_impl); gen_cas_entry(MacroAssembler::xword, memory_order_conservative); @@ -10434,9 +11219,11 @@ class StubGenerator: public StubCodeGenerator { AtomicStubMark mark_cmpxchg_1_relaxed (_masm, &aarch64_atomic_cmpxchg_1_relaxed_impl); gen_cas_entry(MacroAssembler::byte, memory_order_relaxed); + AtomicStubMark mark_cmpxchg_4_relaxed (_masm, &aarch64_atomic_cmpxchg_4_relaxed_impl); gen_cas_entry(MacroAssembler::word, memory_order_relaxed); + AtomicStubMark mark_cmpxchg_8_relaxed (_masm, &aarch64_atomic_cmpxchg_8_relaxed_impl); gen_cas_entry(MacroAssembler::xword, memory_order_relaxed); @@ -10444,6 +11231,7 @@ class StubGenerator: public StubCodeGenerator { AtomicStubMark mark_cmpxchg_4_release (_masm, &aarch64_atomic_cmpxchg_4_release_impl); gen_cas_entry(MacroAssembler::word, memory_order_release); + AtomicStubMark mark_cmpxchg_8_release (_masm, &aarch64_atomic_cmpxchg_8_release_impl); gen_cas_entry(MacroAssembler::xword, memory_order_release); @@ -10451,11 +11239,41 @@ class StubGenerator: public StubCodeGenerator { AtomicStubMark mark_cmpxchg_4_seq_cst (_masm, &aarch64_atomic_cmpxchg_4_seq_cst_impl); gen_cas_entry(MacroAssembler::word, memory_order_seq_cst); + AtomicStubMark mark_cmpxchg_8_seq_cst (_masm, &aarch64_atomic_cmpxchg_8_seq_cst_impl); gen_cas_entry(MacroAssembler::xword, memory_order_seq_cst); - ICache::invalidate_range(first_entry, __ pc() - first_entry); + end = __ pc(); + + ICache::invalidate_range(start, end - start); + // exit block to force update of AtomicStubMark targets + } + + assert(start == (address)aarch64_atomic_fetch_add_4_impl, + "atomic stub should be at start of buffer"); + // record the stub start and end plus all the entries saved by the + // AtomicStubMark destructor + entries.append((address)aarch64_atomic_fetch_add_8_impl); + entries.append((address)aarch64_atomic_fetch_add_4_relaxed_impl); + entries.append((address)aarch64_atomic_fetch_add_8_relaxed_impl); + entries.append((address)aarch64_atomic_xchg_4_impl); + entries.append((address)aarch64_atomic_xchg_8_impl); + entries.append((address)aarch64_atomic_cmpxchg_1_impl); + entries.append((address)aarch64_atomic_cmpxchg_4_impl); + entries.append((address)aarch64_atomic_cmpxchg_8_impl); + entries.append((address)aarch64_atomic_cmpxchg_1_relaxed_impl); + entries.append((address)aarch64_atomic_cmpxchg_4_relaxed_impl); + entries.append((address)aarch64_atomic_cmpxchg_8_relaxed_impl); + entries.append((address)aarch64_atomic_cmpxchg_4_release_impl); + entries.append((address)aarch64_atomic_cmpxchg_8_release_impl); + entries.append((address)aarch64_atomic_cmpxchg_4_seq_cst_impl); + entries.append((address)aarch64_atomic_cmpxchg_8_seq_cst_impl); + + assert(entries.length() == entry_count - 1, + "unexpected extra entry count %d", entries.length()); + + store_archive_data(stub_id, start, end, &entries); } #endif // LINUX @@ -10559,9 +11377,19 @@ class StubGenerator: public StubCodeGenerator { if (!Continuations::enabled()) return nullptr; StubId stub_id = StubId::stubgen_cont_thaw_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); generate_cont_thaw(Continuation::thaw_top); + + // record the stub start and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -10570,11 +11398,20 @@ class StubGenerator: public StubCodeGenerator { // TODO: will probably need multiple return barriers depending on return type StubId stub_id = StubId::stubgen_cont_returnBarrier_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); generate_cont_thaw(Continuation::thaw_return_barrier); + // record the stub start and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -10582,19 +11419,34 @@ class StubGenerator: public StubCodeGenerator { if (!Continuations::enabled()) return nullptr; StubId stub_id = StubId::stubgen_cont_returnBarrierExc_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); generate_cont_thaw(Continuation::thaw_return_barrier_exception); + // record the stub start and end + store_archive_data(stub_id, start, __ pc()); + return start; } address generate_cont_preempt_stub() { if (!Continuations::enabled()) return nullptr; StubId stub_id = StubId::stubgen_cont_preempt_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ reset_last_Java_frame(true); @@ -10619,6 +11471,9 @@ class StubGenerator: public StubCodeGenerator { __ ldr(rscratch1, Address(rscratch1)); __ br(rscratch1); + // record the stub start and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -10674,10 +11529,16 @@ class StubGenerator: public StubCodeGenerator { // computation. address generate_poly1305_processBlocks() { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_poly1305_processBlocks_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Label here; __ enter(); RegSet callee_saved = RegSet::range(r19, r28); @@ -10785,14 +11646,23 @@ class StubGenerator: public StubCodeGenerator { __ leave(); __ ret(lr); + // record the stub start and end + store_archive_data(stub_id, start, __ pc()); + return start; } // exception handler for upcall stubs address generate_upcall_stub_exception_handler() { StubId stub_id = StubId::stubgen_upcall_stub_exception_handler_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); // Native caller has no idea how to handle exceptions, // so we just crash here. Up to callee to catch exceptions. @@ -10801,6 +11671,9 @@ class StubGenerator: public StubCodeGenerator { __ blr(rscratch1); __ should_not_reach_here(); + // record the stub start and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -10809,8 +11682,14 @@ class StubGenerator: public StubCodeGenerator { // rmethod = result address generate_upcall_stub_load_target() { StubId stub_id = StubId::stubgen_upcall_stub_load_target_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ resolve_global_jobject(j_rarg0, rscratch1, rscratch2); // Load target method from receiver @@ -10824,6 +11703,9 @@ class StubGenerator: public StubCodeGenerator { __ ret(lr); + // record the stub start and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -11223,8 +12105,6 @@ class StubGenerator: public StubCodeGenerator { */ address generate_multiply() { Label argh, nothing; - bind(argh); - stop("MontgomeryMultiply total_allocation must be <= 8192"); align(CodeEntryAlignment); address entry = pc(); @@ -11331,6 +12211,10 @@ class StubGenerator: public StubCodeGenerator { bind(nothing); ret(lr); + // handler for error case + bind(argh); + stop("MontgomeryMultiply total_allocation must be <= 8192"); + return entry; } // In C, approximately: @@ -11434,8 +12318,6 @@ class StubGenerator: public StubCodeGenerator { */ address generate_square() { Label argh; - bind(argh); - stop("MontgomeryMultiply total_allocation must be <= 8192"); align(CodeEntryAlignment); address entry = pc(); @@ -11544,6 +12426,10 @@ class StubGenerator: public StubCodeGenerator { leave(); ret(lr); + // handler for error case + bind(argh); + stop("MontgomeryMultiply total_allocation must be <= 8192"); + return entry; } // In C, approximately: @@ -11753,7 +12639,7 @@ class StubGenerator: public StubCodeGenerator { #if COMPILER2_OR_JVMCI if (UseSVE == 0) { - StubRoutines::aarch64::_vector_iota_indices = generate_iota_indices(StubId::stubgen_vector_iota_indices_id); + generate_iota_indices(StubId::stubgen_vector_iota_indices_id); } // array equals stub for large arrays. @@ -11798,18 +12684,32 @@ class StubGenerator: public StubCodeGenerator { if (UseMontgomeryMultiplyIntrinsic) { StubId stub_id = StubId::stubgen_montgomeryMultiply_id; - StubCodeMark mark(this, stub_id); - MontgomeryMultiplyGenerator g(_masm, /*squaring*/false); - StubRoutines::_montgomeryMultiply = g.generate_multiply(); + address start = load_archive_data(stub_id); + if (start == nullptr) { + // we have to generate it + StubCodeMark mark(this, stub_id); + MontgomeryMultiplyGenerator g(_masm, /*squaring*/false); + start = g.generate_multiply(); + // record the stub start and end + store_archive_data(stub_id, start, _masm->pc()); + } + StubRoutines::_montgomeryMultiply = start; } if (UseMontgomerySquareIntrinsic) { StubId stub_id = StubId::stubgen_montgomerySquare_id; - StubCodeMark mark(this, stub_id); - MontgomeryMultiplyGenerator g(_masm, /*squaring*/true); - // We use generate_multiply() rather than generate_square() - // because it's faster for the sizes of modulus we care about. - StubRoutines::_montgomerySquare = g.generate_multiply(); + address start = load_archive_data(stub_id); + if (start == nullptr) { + // we have to generate it + StubCodeMark mark(this, stub_id); + MontgomeryMultiplyGenerator g(_masm, /*squaring*/true); + // We use generate_multiply() rather than generate_square() + // because it's faster for the sizes of modulus we care about. + start = g.generate_multiply(); + // record the stub start and end + store_archive_data(stub_id, start, _masm->pc()); + } + StubRoutines::_montgomerySquare = start; } #endif // COMPILER2 @@ -11854,7 +12754,8 @@ class StubGenerator: public StubCodeGenerator { } if (UseGHASHIntrinsics) { // StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks(); - StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks_wide(); + StubRoutines::aarch64::_ghash_processBlocks_small = generate_ghash_processBlocks_small(); + StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks(StubRoutines::aarch64::_ghash_processBlocks_small); } if (UseAESIntrinsics && UseGHASHIntrinsics) { StubRoutines::_galoisCounterMode_AESCrypt = generate_galoisCounterMode_AESCrypt(); @@ -11876,16 +12777,13 @@ class StubGenerator: public StubCodeGenerator { StubRoutines::_sha512_implCompress = generate_sha512_implCompress(StubId::stubgen_sha512_implCompress_id); StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(StubId::stubgen_sha512_implCompressMB_id); } - if (UseSHA3Intrinsics) { - + if (UseSHA3Intrinsics && UseSIMDForSHA3Intrinsic) { StubRoutines::_double_keccak = generate_double_keccak(); - if (UseSIMDForSHA3Intrinsic) { - StubRoutines::_sha3_implCompress = generate_sha3_implCompress(StubId::stubgen_sha3_implCompress_id); - StubRoutines::_sha3_implCompressMB = generate_sha3_implCompress(StubId::stubgen_sha3_implCompressMB_id); - } else { - StubRoutines::_sha3_implCompress = generate_sha3_implCompress_gpr(StubId::stubgen_sha3_implCompress_id); - StubRoutines::_sha3_implCompressMB = generate_sha3_implCompress_gpr(StubId::stubgen_sha3_implCompressMB_id); - } + StubRoutines::_sha3_implCompress = generate_sha3_implCompress(StubId::stubgen_sha3_implCompress_id); + StubRoutines::_sha3_implCompressMB = generate_sha3_implCompress(StubId::stubgen_sha3_implCompressMB_id); + } else if (UseSHA3Intrinsics) { + StubRoutines::_sha3_implCompress = generate_sha3_implCompress_gpr(StubId::stubgen_sha3_implCompress_id); + StubRoutines::_sha3_implCompressMB = generate_sha3_implCompress_gpr(StubId::stubgen_sha3_implCompressMB_id); } if (UsePoly1305Intrinsics) { @@ -11901,7 +12799,7 @@ class StubGenerator: public StubCodeGenerator { } public: - StubGenerator(CodeBuffer* code, BlobId blob_id) : StubCodeGenerator(code, blob_id) { + StubGenerator(CodeBuffer* code, BlobId blob_id, AOTStubData* stub_data) : StubCodeGenerator(code, blob_id, stub_data) { switch(blob_id) { case BlobId::stubgen_preuniverse_id: generate_preuniverse_stubs(); @@ -11923,12 +12821,35 @@ class StubGenerator: public StubCodeGenerator { break; }; } + +#if INCLUDE_CDS + static void init_AOTAddressTable(GrowableArray
& external_addresses) { + // external data defined in this file +#define ADD(addr) external_addresses.append((address)(addr)); + ADD(_sha256_round_consts); + ADD(_sha512_round_consts); + ADD(_sha3_round_consts); + ADD(_double_keccak_round_consts); + ADD(_encodeBlock_toBase64); + ADD(_encodeBlock_toBase64URL); + ADD(_decodeBlock_fromBase64ForNoSIMD); + ADD(_decodeBlock_fromBase64URLForNoSIMD); + ADD(_decodeBlock_fromBase64ForSIMD); + ADD(_decodeBlock_fromBase64URLForSIMD); +#undef ADD + } +#endif // INCLUDE_CDS }; // end class declaration -void StubGenerator_generate(CodeBuffer* code, BlobId blob_id) { - StubGenerator g(code, blob_id); +void StubGenerator_generate(CodeBuffer* code, BlobId blob_id, AOTStubData* stub_data) { + StubGenerator g(code, blob_id, stub_data); } +#if INCLUDE_CDS +void StubGenerator_init_AOTAddressTable(GrowableArray
& addresses) { + StubGenerator::init_AOTAddressTable(addresses); +} +#endif // INCLUDE_CDS #if defined (LINUX) diff --git a/src/hotspot/cpu/aarch64/stubRoutines_aarch64.cpp b/src/hotspot/cpu/aarch64/stubRoutines_aarch64.cpp index 88993818b47..f02b681ca10 100644 --- a/src/hotspot/cpu/aarch64/stubRoutines_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/stubRoutines_aarch64.cpp @@ -41,8 +41,12 @@ static void empty_spin_wait() { } #define DEFINE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \ address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) = CAST_FROM_FN_PTR(address, init_function); -STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY, DEFINE_ARCH_ENTRY_INIT) +#define DEFINE_ARCH_ENTRY_ARRAY(arch, blob_name, stub_name, field_name, getter_name, count) \ + address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) [count]; +STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY, DEFINE_ARCH_ENTRY_INIT, DEFINE_ARCH_ENTRY_ARRAY) + +#undef DEFINE_ARCH_ENTRY_ARARAY #undef DEFINE_ARCH_ENTRY_INIT #undef DEFINE_ARCH_ENTRY @@ -413,3 +417,36 @@ ATTRIBUTE_ALIGNED(64) jdouble StubRoutines::aarch64::_pio2[] = { 2.73370053816464559624e-44, // 0x36E3822280000000 2.16741683877804819444e-51, // 0x3569F31D00000000 }; + +#if INCLUDE_CDS +extern void StubGenerator_init_AOTAddressTable(GrowableArray
& addresses); + +void StubRoutines::init_AOTAddressTable() { + ResourceMark rm; + GrowableArray
external_addresses; + // publish static addresses referred to by aarch64 generator + // n.b. we have to use use an extern call here because class + // StubGenerator, which provides the static method that knows how to + // add the relevant addresses, is declared in a source file rather + // than in a separately includeable header. + StubGenerator_init_AOTAddressTable(external_addresses); + // publish external data addresses defined in nested aarch64 class + StubRoutines::aarch64::init_AOTAddressTable(external_addresses); + AOTCodeCache::publish_external_addresses(external_addresses); +} + +void StubRoutines::aarch64::init_AOTAddressTable(GrowableArray
& external_addresses) { +#define ADD(addr) external_addresses.append((address)(addr)); + ADD(_kyberConsts); + ADD(_dilithiumConsts); + // this is added in generic code + // ADD(_crc_table); + ADD(_adler_table); + ADD(_npio2_hw); + ADD(_dsin_coef); + ADD(_dcos_coef); + ADD(_two_over_pi); + ADD(_pio2); +#undef ADD +} +#endif // INCLUDE_CDS diff --git a/src/hotspot/cpu/aarch64/stubRoutines_aarch64.hpp b/src/hotspot/cpu/aarch64/stubRoutines_aarch64.hpp index c35371e1083..6067408ef13 100644 --- a/src/hotspot/cpu/aarch64/stubRoutines_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/stubRoutines_aarch64.hpp @@ -60,9 +60,13 @@ class aarch64 { #define DECLARE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \ DECLARE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name) -private: - STUBGEN_ARCH_ENTRIES_DO(DECLARE_ARCH_ENTRY, DECLARE_ARCH_ENTRY_INIT) +#define DECLARE_ARCH_ENTRY_ARRAY(arch, blob_name, stub_name, field_name, getter_name, count) \ + static address STUB_FIELD_NAME(field_name) [count]; +private: + STUBGEN_ARCH_ENTRIES_DO(DECLARE_ARCH_ENTRY, DECLARE_ARCH_ENTRY_INIT, DECLARE_ARCH_ENTRY_ARRAY) + +#undef DECLARE_ARCH_ENTRY_ARRAY #undef DECLARE_ARCH_ENTRY_INIT #undef DECLARE_ARCH_ENTRY @@ -78,8 +82,15 @@ private: #define DEFINE_ARCH_ENTRY_GETTER_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \ DEFINE_ARCH_ENTRY_GETTER(arch, blob_name, stub_name, field_name, getter_name) - STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY_GETTER, DEFINE_ARCH_ENTRY_GETTER_INIT) +#define DEFINE_ARCH_ENTRY_GETTER_ARRAY(arch, blob_name, stub_name, field_name, getter_name, count) \ + static address getter_name(int idx) { \ + assert(0 <= idx && idx < count, "entry array index out of range"); \ + return STUB_FIELD_NAME(field_name) [idx]; \ + } + STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY_GETTER, DEFINE_ARCH_ENTRY_GETTER_INIT, DEFINE_ARCH_ENTRY_GETTER_ARRAY) + +#undef DEFINE_ARCH_ENTRY_GETTER_ARRAY #undef DEFINE_ARCH_ENTRY_GETTER_INIT #undef DEFINE_ARCH_ENTRY_GETTER @@ -110,6 +121,11 @@ private: _completed = true; } +#if INCLUDE_CDS + static void init_AOTAddressTable(GrowableArray
& external_addresses); +#endif // INCLUDE_CDS + + private: static uint16_t _kyberConsts[]; static uint32_t _dilithiumConsts[]; diff --git a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp index 9b85733ed08..441bd4859fe 100644 --- a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp @@ -1,7 +1,7 @@ /* * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2020, Red Hat Inc. All rights reserved. - * Copyright 2025 Arm Limited and/or its affiliates. + * Copyright 2025, 2026 Arm Limited and/or its affiliates. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ * */ +#include "logging/log.hpp" #include "pauth_aarch64.hpp" #include "register_aarch64.hpp" #include "runtime/arguments.hpp" @@ -52,17 +53,56 @@ uintptr_t VM_Version::_pac_mask; SpinWait VM_Version::_spin_wait; +bool VM_Version::_cache_dic_enabled; +bool VM_Version::_cache_idc_enabled; +bool VM_Version::_ic_ivau_trapped; + const char* VM_Version::_features_names[MAX_CPU_FEATURES] = { nullptr }; static SpinWait get_spin_wait_desc() { - SpinWait spin_wait(OnSpinWaitInst, OnSpinWaitInstCount); + SpinWait spin_wait(OnSpinWaitInst, OnSpinWaitInstCount, OnSpinWaitDelay); if (spin_wait.inst() == SpinWait::SB && !VM_Version::supports_sb()) { vm_exit_during_initialization("OnSpinWaitInst is SB but current CPU does not support SB instruction"); } + if (spin_wait.inst() == SpinWait::WFET) { + if (!VM_Version::supports_wfxt()) { + vm_exit_during_initialization("OnSpinWaitInst is WFET but the CPU does not support the WFET instruction"); + } + + if (!VM_Version::supports_ecv()) { + vm_exit_during_initialization("The CPU does not support the FEAT_ECV required by the -XX:OnSpinWaitInst=wfet implementation"); + } + + if (!VM_Version::supports_sb()) { + vm_exit_during_initialization("The CPU does not support the SB instruction required by the -XX:OnSpinWaitInst=wfet implementation"); + } + + if (OnSpinWaitInstCount != 1) { + vm_exit_during_initialization("OnSpinWaitInstCount for OnSpinWaitInst 'wfet' must be 1"); + } + } else { + if (!FLAG_IS_DEFAULT(OnSpinWaitDelay)) { + vm_exit_during_initialization("OnSpinWaitDelay can only be used with -XX:OnSpinWaitInst=wfet"); + } + } + return spin_wait; } +static bool has_neoverse_n1_errata_1542419() { + const int major_rev_num = VM_Version::cpu_variant(); + const int minor_rev_num = VM_Version::cpu_revision(); + // Neoverse N1: 0xd0c + // Erratum 1542419 affects r3p0, r3p1 and r4p0. + // It is fixed in r4p1 and later revisions, which are not affected. + return (VM_Version::cpu_family() == VM_Version::CPU_ARM && + VM_Version::model_is(0xd0c) && + ((major_rev_num == 3 && minor_rev_num == 0) || + (major_rev_num == 3 && minor_rev_num == 1) || + (major_rev_num == 4 && minor_rev_num == 0))); +} + void VM_Version::initialize() { #define SET_CPU_FEATURE_NAME(id, name, bit) \ _features_names[bit] = XSTR(name); @@ -74,9 +114,14 @@ void VM_Version::initialize() { _supports_atomic_getset8 = true; _supports_atomic_getadd8 = true; - get_os_cpu_info(); + _cache_dic_enabled = false; + _cache_idc_enabled = false; + _ic_ivau_trapped = false; - int dcache_line = VM_Version::dcache_line_size(); + get_os_cpu_info(); + _cpu_features = _features; + + int dcache_line = dcache_line_size(); // Limit AllocatePrefetchDistance so that it does not exceed the // static constraint of 512 defined in runtime/globals.hpp. @@ -124,7 +169,7 @@ void VM_Version::initialize() { // if dcpop is available publish data cache line flush size via // generic field, otherwise let if default to zero thereby // disabling writeback - if (VM_Version::supports_dcpop()) { + if (supports_dcpop()) { _data_cache_line_flush_size = dcache_line; } } @@ -245,14 +290,24 @@ void VM_Version::initialize() { } } - if (FLAG_IS_DEFAULT(UseCRC32)) { - UseCRC32 = VM_Version::supports_crc32(); + if (supports_sha1() || supports_sha256() || + supports_sha3() || supports_sha512()) { + if (FLAG_IS_DEFAULT(UseSHA)) { + FLAG_SET_DEFAULT(UseSHA, true); + } else if (!UseSHA) { + clear_feature(CPU_SHA1); + clear_feature(CPU_SHA2); + clear_feature(CPU_SHA3); + clear_feature(CPU_SHA512); + } + } else if (UseSHA) { + warning("SHA instructions are not available on this CPU"); + FLAG_SET_DEFAULT(UseSHA, false); } - if (UseCRC32 && !VM_Version::supports_crc32()) { - warning("UseCRC32 specified, but not supported on this CPU"); - FLAG_SET_DEFAULT(UseCRC32, false); - } + CHECK_CPU_FEATURE(supports_crc32, CRC32); + CHECK_CPU_FEATURE(supports_lse, LSE); + CHECK_CPU_FEATURE(supports_aes, AES); if (_cpu == CPU_ARM && model_is_in({ CPU_MODEL_ARM_NEOVERSE_V1, CPU_MODEL_ARM_NEOVERSE_V2, @@ -265,7 +320,7 @@ void VM_Version::initialize() { } } - if (UseCryptoPmullForCRC32 && (!VM_Version::supports_pmull() || !VM_Version::supports_sha3() || !VM_Version::supports_crc32())) { + if (UseCryptoPmullForCRC32 && (!supports_pmull() || !supports_sha3() || !supports_crc32())) { warning("UseCryptoPmullForCRC32 specified, but not supported on this CPU"); FLAG_SET_DEFAULT(UseCryptoPmullForCRC32, false); } @@ -279,48 +334,40 @@ void VM_Version::initialize() { FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false); } - if (VM_Version::supports_lse()) { - if (FLAG_IS_DEFAULT(UseLSE)) - FLAG_SET_DEFAULT(UseLSE, true); - } else { - if (UseLSE) { - warning("UseLSE specified, but not supported on this CPU"); - FLAG_SET_DEFAULT(UseLSE, false); - } - } - - if (VM_Version::supports_aes()) { - UseAES = UseAES || FLAG_IS_DEFAULT(UseAES); - UseAESIntrinsics = - UseAESIntrinsics || (UseAES && FLAG_IS_DEFAULT(UseAESIntrinsics)); - if (UseAESIntrinsics && !UseAES) { - warning("UseAESIntrinsics enabled, but UseAES not, enabling"); - UseAES = true; + if (supports_aes()) { + if (FLAG_IS_DEFAULT(UseAESIntrinsics)) { + FLAG_SET_DEFAULT(UseAESIntrinsics, true); } if (FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { FLAG_SET_DEFAULT(UseAESCTRIntrinsics, true); } } else { - if (UseAES) { - warning("AES instructions are not available on this CPU"); - FLAG_SET_DEFAULT(UseAES, false); - } - if (UseAESIntrinsics) { - warning("AES intrinsics are not available on this CPU"); - FLAG_SET_DEFAULT(UseAESIntrinsics, false); - } - if (UseAESCTRIntrinsics) { - warning("AES/CTR intrinsics are not available on this CPU"); - FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); + if (!UseAES) { + if (UseAESIntrinsics) { + warning("AES intrinsics require UseAES flag to be enabled. Intrinsics will be disabled."); + FLAG_SET_DEFAULT(UseAESIntrinsics, false); + } + if (UseAESCTRIntrinsics) { + warning("AES/CTR intrinsics require UseAES flag to be enabled. Intrinsics will be disabled."); + FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); + } + } else if (!cpu_supports_aes()) { + if (UseAESIntrinsics) { + warning("AES intrinsics are not available on this CPU"); + FLAG_SET_DEFAULT(UseAESIntrinsics, false); + } + if (UseAESCTRIntrinsics) { + warning("AES/CTR intrinsics are not available on this CPU"); + FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); + } } } - if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) { UseCRC32Intrinsics = true; } - if (VM_Version::supports_crc32()) { + if (supports_crc32()) { if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) { FLAG_SET_DEFAULT(UseCRC32CIntrinsics, true); } @@ -337,17 +384,7 @@ void VM_Version::initialize() { UseMD5Intrinsics = true; } - if (VM_Version::supports_sha1() || VM_Version::supports_sha256() || - VM_Version::supports_sha3() || VM_Version::supports_sha512()) { - if (FLAG_IS_DEFAULT(UseSHA)) { - FLAG_SET_DEFAULT(UseSHA, true); - } - } else if (UseSHA) { - warning("SHA instructions are not available on this CPU"); - FLAG_SET_DEFAULT(UseSHA, false); - } - - if (UseSHA && VM_Version::supports_sha1()) { + if (UseSHA && supports_sha1()) { if (FLAG_IS_DEFAULT(UseSHA1Intrinsics)) { FLAG_SET_DEFAULT(UseSHA1Intrinsics, true); } @@ -356,7 +393,7 @@ void VM_Version::initialize() { FLAG_SET_DEFAULT(UseSHA1Intrinsics, false); } - if (UseSHA && VM_Version::supports_sha256()) { + if (UseSHA && supports_sha256()) { if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) { FLAG_SET_DEFAULT(UseSHA256Intrinsics, true); } @@ -365,21 +402,33 @@ void VM_Version::initialize() { FLAG_SET_DEFAULT(UseSHA256Intrinsics, false); } - if (UseSHA && VM_Version::supports_sha3()) { - // Auto-enable UseSHA3Intrinsics on hardware with performance benefit. - // Note that the evaluation of UseSHA3Intrinsics shows better performance + if (UseSHA) { + // No need to check supports_sha3(), since a fallback GPR intrinsic implementation is provided. + if (FLAG_IS_DEFAULT(UseSHA3Intrinsics)) { + FLAG_SET_DEFAULT(UseSHA3Intrinsics, true); + } + } else if (UseSHA3Intrinsics) { + // Matches the documented and tested behavior: the -UseSHA option disables all SHA intrinsics. + warning("UseSHA3Intrinsics requires that UseSHA is enabled."); + FLAG_SET_DEFAULT(UseSHA3Intrinsics, false); + } + + if (UseSHA3Intrinsics && supports_sha3()) { + // Auto-enable UseSIMDForSHA3Intrinsic on hardware with performance benefit. + // Note that the evaluation of SHA3 extension Intrinsics shows better performance // on Apple and Qualcomm silicon but worse performance on Neoverse V1 and N2. if (_cpu == CPU_APPLE || _cpu == CPU_QUALCOMM) { // Apple or Qualcomm silicon - if (FLAG_IS_DEFAULT(UseSHA3Intrinsics)) { - FLAG_SET_DEFAULT(UseSHA3Intrinsics, true); + if (FLAG_IS_DEFAULT(UseSIMDForSHA3Intrinsic)) { + FLAG_SET_DEFAULT(UseSIMDForSHA3Intrinsic, true); } } - } else if (UseSHA3Intrinsics && UseSIMDForSHA3Intrinsic) { + } + if (UseSHA3Intrinsics && UseSIMDForSHA3Intrinsic && !supports_sha3()) { warning("Intrinsics for SHA3-224, SHA3-256, SHA3-384 and SHA3-512 crypto hash functions not available on this CPU."); FLAG_SET_DEFAULT(UseSHA3Intrinsics, false); } - if (UseSHA && VM_Version::supports_sha512()) { + if (UseSHA && supports_sha512()) { if (FLAG_IS_DEFAULT(UseSHA512Intrinsics)) { FLAG_SET_DEFAULT(UseSHA512Intrinsics, true); } @@ -388,11 +437,7 @@ void VM_Version::initialize() { FLAG_SET_DEFAULT(UseSHA512Intrinsics, false); } - if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA3Intrinsics || UseSHA512Intrinsics)) { - FLAG_SET_DEFAULT(UseSHA, false); - } - - if (VM_Version::supports_pmull()) { + if (supports_pmull()) { if (FLAG_IS_DEFAULT(UseGHASHIntrinsics)) { FLAG_SET_DEFAULT(UseGHASHIntrinsics, true); } @@ -443,7 +488,7 @@ void VM_Version::initialize() { FLAG_SET_DEFAULT(UseBlockZeroing, true); } if (FLAG_IS_DEFAULT(BlockZeroingLowLimit)) { - FLAG_SET_DEFAULT(BlockZeroingLowLimit, 4 * VM_Version::zva_length()); + FLAG_SET_DEFAULT(BlockZeroingLowLimit, 4 * zva_length()); } } else if (UseBlockZeroing) { if (!FLAG_IS_DEFAULT(UseBlockZeroing)) { @@ -452,11 +497,11 @@ void VM_Version::initialize() { FLAG_SET_DEFAULT(UseBlockZeroing, false); } - if (VM_Version::supports_sve2()) { + if (supports_sve2()) { if (FLAG_IS_DEFAULT(UseSVE)) { FLAG_SET_DEFAULT(UseSVE, 2); } - } else if (VM_Version::supports_sve()) { + } else if (supports_sve()) { if (FLAG_IS_DEFAULT(UseSVE)) { FLAG_SET_DEFAULT(UseSVE, 1); } else if (UseSVE > 1) { @@ -507,7 +552,7 @@ void VM_Version::initialize() { // 1) this code has been built with branch-protection and // 2) the CPU/OS supports it #ifdef __ARM_FEATURE_PAC_DEFAULT - if (!VM_Version::supports_paca()) { + if (!supports_paca()) { // Disable PAC to prevent illegal instruction crashes. warning("ROP-protection specified, but not supported on this CPU. Disabling ROP-protection."); } else { @@ -649,6 +694,43 @@ void VM_Version::initialize() { clear_feature(CPU_SVE); } + if (FLAG_IS_DEFAULT(UseSingleICacheInvalidation) && is_cache_idc_enabled() && is_cache_dic_enabled()) { + FLAG_SET_DEFAULT(UseSingleICacheInvalidation, true); + } + + if (FLAG_IS_DEFAULT(NeoverseN1ICacheErratumMitigation) && has_neoverse_n1_errata_1542419() + && is_cache_idc_enabled() && !is_cache_dic_enabled()) { + if (_ic_ivau_trapped) { + FLAG_SET_DEFAULT(NeoverseN1ICacheErratumMitigation, true); + } else { + log_info(os)("IC IVAU is not trapped; disabling NeoverseN1ICacheErratumMitigation"); + FLAG_SET_DEFAULT(NeoverseN1ICacheErratumMitigation, false); + } + } + + if (NeoverseN1ICacheErratumMitigation) { + if (!has_neoverse_n1_errata_1542419()) { + vm_exit_during_initialization("NeoverseN1ICacheErratumMitigation is set for the CPU not having Neoverse N1 errata 1542419"); + } + // If the user explicitly set the flag, verify the trap is active. + if (!FLAG_IS_DEFAULT(NeoverseN1ICacheErratumMitigation) && !_ic_ivau_trapped) { + vm_exit_during_initialization("NeoverseN1ICacheErratumMitigation is set but IC IVAU is not trapped. " + "The optimization is not safe on this system."); + } + if (FLAG_IS_DEFAULT(UseSingleICacheInvalidation)) { + FLAG_SET_DEFAULT(UseSingleICacheInvalidation, true); + } + + if (!UseSingleICacheInvalidation) { + vm_exit_during_initialization("NeoverseN1ICacheErratumMitigation is set but UseSingleICacheInvalidation is not enabled"); + } + } + + if (UseSingleICacheInvalidation + && (!is_cache_idc_enabled() || (!is_cache_dic_enabled() && !NeoverseN1ICacheErratumMitigation))) { + vm_exit_during_initialization("UseSingleICacheInvalidation is set but neither IDC nor DIC nor NeoverseN1ICacheErratumMitigation is enabled"); + } + // Construct the "features" string stringStream ss(512); ss.print("0x%02x:0x%x:0x%03x:%d", _cpu, _variant, _model, _revision); diff --git a/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp b/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp index 0213872852b..30f1a5d86ca 100644 --- a/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp @@ -55,6 +55,15 @@ protected: static int _max_supported_sve_vector_length; static bool _rop_protection; static uintptr_t _pac_mask; + // When _prefer_sve_merging_mode_cpy is true, `cpy (imm, zeroing)` is + // implemented as `movi; cpy(imm, merging)`. + static constexpr bool _prefer_sve_merging_mode_cpy = true; + static bool _cache_dic_enabled; + static bool _cache_idc_enabled; + + // IC IVAU trap probe for Neoverse N1 erratum 1542419. + // Set by get_os_cpu_info() on Linux via ic_ivau_probe_linux_aarch64.S. + static bool _ic_ivau_trapped; static SpinWait _spin_wait; @@ -156,7 +165,9 @@ public: /* flags above must follow Linux HWCAP */ \ decl(SVEBITPERM, svebitperm, 27) \ decl(SVE2, sve2, 28) \ - decl(A53MAC, a53mac, 31) + decl(A53MAC, a53mac, 31) \ + decl(ECV, ecv, 32) \ + decl(WFXT, wfxt, 33) enum Feature_Flag { #define DECLARE_CPU_FEATURE_FLAG(id, name, bit) CPU_##id = bit, @@ -188,6 +199,8 @@ public: return (features & BIT_MASK(flag)) != 0; } + static bool cpu_supports_aes() { return supports_feature(_cpu_features, CPU_AES); } + static int cpu_family() { return _cpu; } static int cpu_model() { return _model; } static int cpu_model2() { return _model2; } @@ -242,12 +255,18 @@ public: static bool use_rop_protection() { return _rop_protection; } + static bool prefer_sve_merging_mode_cpy() { return _prefer_sve_merging_mode_cpy; } + // For common 64/128-bit unpredicated vector operations, we may prefer // emitting NEON instructions rather than the corresponding SVE instructions. static bool use_neon_for_vector(int vector_length_in_bytes) { return vector_length_in_bytes <= 16; } + static bool is_cache_dic_enabled() { return _cache_dic_enabled; } + static bool is_cache_idc_enabled() { return _cache_idc_enabled; } + static bool is_ic_ivau_trapped() { return _ic_ivau_trapped; } + static void get_cpu_features_name(void* features_buffer, stringStream& ss); // Returns names of features present in features_set1 but not in features_set2 diff --git a/src/hotspot/cpu/arm/arm.ad b/src/hotspot/cpu/arm/arm.ad index 606275d7666..60a0ef307b5 100644 --- a/src/hotspot/cpu/arm/arm.ad +++ b/src/hotspot/cpu/arm/arm.ad @@ -1,5 +1,5 @@ // -// Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved. +// Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -1088,10 +1088,8 @@ bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, return clone_base_plus_offset_address(m, mstack, address_visited); } -// Return whether or not this register is ever used as an argument. This -// function is used on startup to build the trampoline stubs in generateOptoStub. -// Registers not mentioned will be killed by the VM call in the trampoline, and -// arguments in those registers not be available to the callee. +#ifdef ASSERT +// Return whether or not this register is ever used as an argument. bool Matcher::can_be_java_arg( int reg ) { if (reg == R_R0_num || reg == R_R1_num || @@ -1102,10 +1100,7 @@ bool Matcher::can_be_java_arg( int reg ) { reg <= R_S13_num) return true; return false; } - -bool Matcher::is_spillable_arg( int reg ) { - return can_be_java_arg(reg); -} +#endif uint Matcher::int_pressure_limit() { @@ -1117,10 +1112,6 @@ uint Matcher::float_pressure_limit() return (FLOATPRESSURE == -1) ? 30 : FLOATPRESSURE; } -bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) { - return false; -} - // Register for DIVI projection of divmodI const RegMask& Matcher::divI_proj_mask() { ShouldNotReachHere(); @@ -4445,6 +4436,18 @@ instruct membar_release_lock() %{ ins_pipe(empty); %} +instruct membar_storeload() %{ + match(MemBarStoreLoad); + ins_cost(4*MEMORY_REF_COST); + + size(4); + format %{ "MEMBAR-storeload" %} + ins_encode %{ + __ membar(MacroAssembler::StoreLoad, noreg); + %} + ins_pipe(long_memory_op); +%} + instruct membar_volatile() %{ match(MemBarVolatile); ins_cost(4*MEMORY_REF_COST); @@ -4468,6 +4471,18 @@ instruct unnecessary_membar_volatile() %{ ins_pipe(empty); %} +instruct membar_full() %{ + match(MemBarFull); + ins_cost(4*MEMORY_REF_COST); + + size(4); + format %{ "MEMBAR-full" %} + ins_encode %{ + __ membar(MacroAssembler::StoreLoad, noreg); + %} + ins_pipe(long_memory_op); +%} + //----------Register Move Instructions----------------------------------------- // Cast Index to Pointer for unsafe natives diff --git a/src/hotspot/cpu/arm/c1_globals_arm.hpp b/src/hotspot/cpu/arm/c1_globals_arm.hpp index 1fe5f1a23ee..9db999e81b3 100644 --- a/src/hotspot/cpu/arm/c1_globals_arm.hpp +++ b/src/hotspot/cpu/arm/c1_globals_arm.hpp @@ -43,7 +43,6 @@ define_pd_global(bool, TieredCompilation, false); define_pd_global(intx, CompileThreshold, 1500 ); define_pd_global(intx, OnStackReplacePercentage, 933 ); -define_pd_global(size_t, NewSizeThreadIncrease, 4*K ); define_pd_global(size_t, InitialCodeCacheSize, 160*K); define_pd_global(size_t, ReservedCodeCacheSize, 32*M ); define_pd_global(size_t, NonProfiledCodeHeapSize, 13*M ); @@ -53,7 +52,6 @@ define_pd_global(bool, ProfileInterpreter, false); define_pd_global(size_t, CodeCacheExpansionSize, 32*K ); define_pd_global(size_t, CodeCacheMinBlockLength, 1); define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K); -define_pd_global(bool, NeverActAsServerClassMachine, true); define_pd_global(bool, CICompileOSR, true ); #endif // COMPILER2 define_pd_global(bool, UseTypeProfile, false); diff --git a/src/hotspot/cpu/arm/c2_globals_arm.hpp b/src/hotspot/cpu/arm/c2_globals_arm.hpp index 0849bd594f0..34da47792ae 100644 --- a/src/hotspot/cpu/arm/c2_globals_arm.hpp +++ b/src/hotspot/cpu/arm/c2_globals_arm.hpp @@ -47,7 +47,6 @@ define_pd_global(intx, ConditionalMoveLimit, 4); // C2 gets to use all the float/double registers define_pd_global(intx, FreqInlineSize, 175); define_pd_global(intx, InteriorEntryAlignment, 16); // = CodeEntryAlignment -define_pd_global(size_t, NewSizeThreadIncrease, ScaleForWordSize(4*K)); // The default setting 16/16 seems to work best. // (For _228_jack 16/16 is 2% better than 4/4, 16/4, 32/32, 32/16, or 16/32.) //define_pd_global(intx, OptoLoopAlignment, 16); // = 4*wordSize @@ -94,7 +93,4 @@ define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K); define_pd_global(bool, TrapBasedRangeChecks, false); // Not needed -// Ergonomics related flags -define_pd_global(bool, NeverActAsServerClassMachine, false); - #endif // CPU_ARM_C2_GLOBALS_ARM_HPP diff --git a/src/hotspot/cpu/arm/interp_masm_arm.cpp b/src/hotspot/cpu/arm/interp_masm_arm.cpp index 23ecea24eb2..aee407864ee 100644 --- a/src/hotspot/cpu/arm/interp_masm_arm.cpp +++ b/src/hotspot/cpu/arm/interp_masm_arm.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1210,7 +1210,7 @@ void InterpreterMacroAssembler::profile_final_call(Register mdp) { // Sets mdp, blows Rtemp. -void InterpreterMacroAssembler::profile_virtual_call(Register mdp, Register receiver, bool receiver_can_be_null) { +void InterpreterMacroAssembler::profile_virtual_call(Register mdp, Register receiver) { assert_different_registers(mdp, receiver, Rtemp); if (ProfileInterpreter) { @@ -1219,19 +1219,8 @@ void InterpreterMacroAssembler::profile_virtual_call(Register mdp, Register rece // If no method data exists, go to profile_continue. test_method_data_pointer(mdp, profile_continue); - Label skip_receiver_profile; - if (receiver_can_be_null) { - Label not_null; - cbnz(receiver, not_null); - // We are making a call. Increment the count for null receiver. - increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()), Rtemp); - b(skip_receiver_profile); - bind(not_null); - } - // Record the receiver type. record_klass_in_profile(receiver, mdp, Rtemp, true); - bind(skip_receiver_profile); // The method data pointer needs to be updated to reflect the new target. update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size())); diff --git a/src/hotspot/cpu/arm/interp_masm_arm.hpp b/src/hotspot/cpu/arm/interp_masm_arm.hpp index 530be1c577e..147cd252b2c 100644 --- a/src/hotspot/cpu/arm/interp_masm_arm.hpp +++ b/src/hotspot/cpu/arm/interp_masm_arm.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -239,8 +239,7 @@ class InterpreterMacroAssembler: public MacroAssembler { void profile_call(Register mdp); // Sets mdp, blows Rtemp. void profile_final_call(Register mdp); // Sets mdp, blows Rtemp. - void profile_virtual_call(Register mdp, Register receiver, // Sets mdp, blows Rtemp. - bool receiver_can_be_null = false); + void profile_virtual_call(Register mdp, Register receiver); // Sets mdp, blows Rtemp. void profile_ret(Register mdp, Register return_bci); // Sets mdp, blows R0-R3/R0-R18, Rtemp, LR void profile_null_seen(Register mdp); // Sets mdp. void profile_typecheck(Register mdp, Register klass); // Sets mdp, blows Rtemp. diff --git a/src/hotspot/cpu/arm/matcher_arm.hpp b/src/hotspot/cpu/arm/matcher_arm.hpp index 6c818e1f20d..7978a5b7090 100644 --- a/src/hotspot/cpu/arm/matcher_arm.hpp +++ b/src/hotspot/cpu/arm/matcher_arm.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -75,7 +75,6 @@ static bool narrow_klass_use_complex_address() { NOT_LP64(ShouldNotCallThis()); - assert(UseCompressedClassPointers, "only for compressed klass code"); return false; } diff --git a/src/hotspot/cpu/arm/methodHandles_arm.cpp b/src/hotspot/cpu/arm/methodHandles_arm.cpp index 3710fa33f36..2da14d8ffed 100644 --- a/src/hotspot/cpu/arm/methodHandles_arm.cpp +++ b/src/hotspot/cpu/arm/methodHandles_arm.cpp @@ -104,14 +104,13 @@ void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Registe __ andr(temp, temp, (unsigned)java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK); __ cmp(temp, ref_kind); __ b(L, eq); - { char* buf = NEW_C_HEAP_ARRAY(char, 100, mtInternal); - jio_snprintf(buf, 100, "verify_ref_kind expected %x", ref_kind); + const char* msg = ref_kind_to_verify_msg(ref_kind); if (ref_kind == JVM_REF_invokeVirtual || - ref_kind == JVM_REF_invokeSpecial) + ref_kind == JVM_REF_invokeSpecial) { // could do this for all ref_kinds, but would explode assembly code size - trace_method_handle(_masm, buf); - __ stop(buf); + trace_method_handle(_masm, msg); } + __ stop(msg); BLOCK_COMMENT("} verify_ref_kind"); __ bind(L); } diff --git a/src/hotspot/cpu/arm/stubDeclarations_arm.hpp b/src/hotspot/cpu/arm/stubDeclarations_arm.hpp index 5f768a205a5..5fb0d4e901f 100644 --- a/src/hotspot/cpu/arm/stubDeclarations_arm.hpp +++ b/src/hotspot/cpu/arm/stubDeclarations_arm.hpp @@ -29,7 +29,8 @@ #define STUBGEN_PREUNIVERSE_BLOBS_ARCH_DO(do_stub, \ do_arch_blob, \ do_arch_entry, \ - do_arch_entry_init) \ + do_arch_entry_init, \ + do_arch_entry_array) \ do_arch_blob(preuniverse, 500) \ do_stub(preuniverse, atomic_load_long) \ do_arch_entry(Arm, preuniverse, atomic_load_long, \ @@ -42,7 +43,8 @@ #define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \ do_arch_blob, \ do_arch_entry, \ - do_arch_entry_init) \ + do_arch_entry_init, \ + do_arch_entry_array) \ do_arch_blob(initial, 9000) \ do_stub(initial, idiv_irem) \ do_arch_entry(Arm, initial, idiv_irem, \ @@ -51,14 +53,16 @@ #define STUBGEN_CONTINUATION_BLOBS_ARCH_DO(do_stub, \ do_arch_blob, \ do_arch_entry, \ - do_arch_entry_init) \ + do_arch_entry_init, \ + do_arch_entry_array) \ do_arch_blob(continuation, 2000) \ #define STUBGEN_COMPILER_BLOBS_ARCH_DO(do_stub, \ do_arch_blob, \ do_arch_entry, \ - do_arch_entry_init) \ + do_arch_entry_init, \ + do_arch_entry_array) \ do_arch_blob(compiler, 22000) \ do_stub(compiler, partial_subtype_check) \ do_arch_entry(Arm, compiler, partial_subtype_check, \ @@ -68,7 +72,8 @@ #define STUBGEN_FINAL_BLOBS_ARCH_DO(do_stub, \ do_arch_blob, \ do_arch_entry, \ - do_arch_entry_init) \ + do_arch_entry_init, \ + do_arch_entry_array) \ do_arch_blob(final, 22000) \ diff --git a/src/hotspot/cpu/arm/stubGenerator_arm.cpp b/src/hotspot/cpu/arm/stubGenerator_arm.cpp index a36ad3a0c47..a705b15eff5 100644 --- a/src/hotspot/cpu/arm/stubGenerator_arm.cpp +++ b/src/hotspot/cpu/arm/stubGenerator_arm.cpp @@ -3211,7 +3211,7 @@ class StubGenerator: public StubCodeGenerator { } public: - StubGenerator(CodeBuffer* code, BlobId blob_id) : StubCodeGenerator(code, blob_id) { + StubGenerator(CodeBuffer* code, BlobId blob_id, AOTStubData* stub_data) : StubCodeGenerator(code, blob_id, stub_data) { switch(blob_id) { case BlobId::stubgen_preuniverse_id: generate_preuniverse_stubs(); @@ -3235,8 +3235,8 @@ class StubGenerator: public StubCodeGenerator { } }; // end class declaration -void StubGenerator_generate(CodeBuffer* code, BlobId blob_id) { - StubGenerator g(code, blob_id); +void StubGenerator_generate(CodeBuffer* code, BlobId blob_id, AOTStubData* stub_data) { + StubGenerator g(code, blob_id, stub_data); } // implementation of internal development flag diff --git a/src/hotspot/cpu/arm/stubRoutines_arm.cpp b/src/hotspot/cpu/arm/stubRoutines_arm.cpp index a4f2b5e1bd9..38a9b298562 100644 --- a/src/hotspot/cpu/arm/stubRoutines_arm.cpp +++ b/src/hotspot/cpu/arm/stubRoutines_arm.cpp @@ -32,10 +32,16 @@ #define DEFINE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \ address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) = CAST_FROM_FN_PTR(address, init_function); -STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY, DEFINE_ARCH_ENTRY_INIT) +STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY, DEFINE_ARCH_ENTRY_INIT, DEFINE_ARCH_ENTRY_ARRAY) #undef DEFINE_ARCH_ENTRY_INIT #undef DEFINE_ARCH_ENTRY address StubRoutines::crc_table_addr() { ShouldNotCallThis(); return nullptr; } address StubRoutines::crc32c_table_addr() { ShouldNotCallThis(); return nullptr; } + +#if INCLUDE_CDS +// nothing to do for arm +void StubRoutines::init_AOTAddressTable() { +} +#endif // INCLUDE_CDS diff --git a/src/hotspot/cpu/arm/stubRoutines_arm.hpp b/src/hotspot/cpu/arm/stubRoutines_arm.hpp index 45ab10d14f9..29d96d0e653 100644 --- a/src/hotspot/cpu/arm/stubRoutines_arm.hpp +++ b/src/hotspot/cpu/arm/stubRoutines_arm.hpp @@ -55,9 +55,13 @@ class Arm { #define DECLARE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \ DECLARE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name) -private: - STUBGEN_ARCH_ENTRIES_DO(DECLARE_ARCH_ENTRY, DECLARE_ARCH_ENTRY_INIT) +#define DECLARE_ARCH_ENTRY_ARRAY(arch, blob_name, stub_name, field_name, getter_name, count) \ + static address STUB_FIELD_NAME(field_name) [count] ; +private: + STUBGEN_ARCH_ENTRIES_DO(DECLARE_ARCH_ENTRY, DECLARE_ARCH_ENTRY_INIT, DECLARE_ARCH_ENTRY_ARRAY) + +#undef DECLARE_ARCH_ENTRY_ARRAY #undef DECLARE_ARCH_ENTRY_INIT #undef DECLARE_ARCH_ENTRY @@ -71,8 +75,12 @@ public: #define DEFINE_ARCH_ENTRY_GETTER_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \ DEFINE_ARCH_ENTRY_GETTER(arch, blob_name, stub_name, field_name, getter_name) - STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY_GETTER, DEFINE_ARCH_ENTRY_GETTER_INIT) +#define DEFINE_ARCH_ENTRY_GETTER_ARRAY(arch, blob_name, stub_name, field_name, getter_name, count) \ + static address getter_name(int idx) { return STUB_FIELD_NAME(field_name) [idx] ; } + STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY_GETTER, DEFINE_ARCH_ENTRY_GETTER_INIT, DEFINE_ARCH_ENTRY_GETTER_ARRAY) + +#undef DEFINE_ARCH_ENTRY_GETTER_ARRAY #undef DEFINE_ARCH_ENTRY_GETTER_INIT #undef DEFINE_ARCH_ENTRY_GETTER diff --git a/src/hotspot/cpu/ppc/assembler_ppc.hpp b/src/hotspot/cpu/ppc/assembler_ppc.hpp index da2daffd579..378e01fc1cc 100644 --- a/src/hotspot/cpu/ppc/assembler_ppc.hpp +++ b/src/hotspot/cpu/ppc/assembler_ppc.hpp @@ -1580,10 +1580,6 @@ class Assembler : public AbstractAssembler { static bool is_nop(int x) { return x == 0x60000000; } - // endgroup opcode for Power6 - static bool is_endgroup(int x) { - return is_ori(x) && inv_ra_field(x) == 1 && inv_rs_field(x) == 1 && inv_d1_field(x) == 0; - } private: @@ -1659,9 +1655,6 @@ class Assembler : public AbstractAssembler { inline void ori_opt( Register d, int ui16); inline void oris_opt(Register d, int ui16); - // endgroup opcode for Power6 - inline void endgroup(); - // count instructions inline void cntlzw( Register a, Register s); inline void cntlzw_( Register a, Register s); diff --git a/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp b/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp index bd6f3300606..d349bbc6f87 100644 --- a/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp +++ b/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp @@ -253,8 +253,6 @@ inline void Assembler::mr( Register d, Register s) { Assembler::orr(d, s, inline void Assembler::ori_opt( Register d, int ui16) { if (ui16!=0) Assembler::ori( d, d, ui16); } inline void Assembler::oris_opt(Register d, int ui16) { if (ui16!=0) Assembler::oris(d, d, ui16); } -inline void Assembler::endgroup() { Assembler::ori(R1, R1, 0); } - // count instructions inline void Assembler::cntlzw( Register a, Register s) { emit_int32(CNTLZW_OPCODE | rta(a) | rs(s) | rc(0)); } inline void Assembler::cntlzw_( Register a, Register s) { emit_int32(CNTLZW_OPCODE | rta(a) | rs(s) | rc(1)); } diff --git a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp index 798451446e5..4d7af0e4a71 100644 --- a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2025 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -144,7 +144,7 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register if (len->is_valid()) { stw(len, arrayOopDesc::length_offset_in_bytes(), obj); - } else if (UseCompressedClassPointers && !UseCompactObjectHeaders) { + } else if (!UseCompactObjectHeaders) { // Otherwise length is in the class gap. store_klass_gap(obj); } diff --git a/src/hotspot/cpu/ppc/c1_globals_ppc.hpp b/src/hotspot/cpu/ppc/c1_globals_ppc.hpp index 77d9acd1cd1..c6fe15aac07 100644 --- a/src/hotspot/cpu/ppc/c1_globals_ppc.hpp +++ b/src/hotspot/cpu/ppc/c1_globals_ppc.hpp @@ -51,8 +51,6 @@ define_pd_global(size_t, NonNMethodCodeHeapSize, 5*M ); define_pd_global(size_t, CodeCacheExpansionSize, 32*K); define_pd_global(size_t, CodeCacheMinBlockLength, 1); define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K); -define_pd_global(bool, NeverActAsServerClassMachine, true); -define_pd_global(size_t, NewSizeThreadIncrease, 16*K); define_pd_global(size_t, InitialCodeCacheSize, 160*K); #endif // !COMPILER2 diff --git a/src/hotspot/cpu/ppc/c2_globals_ppc.hpp b/src/hotspot/cpu/ppc/c2_globals_ppc.hpp index caef322d4a1..e4942fa1850 100644 --- a/src/hotspot/cpu/ppc/c2_globals_ppc.hpp +++ b/src/hotspot/cpu/ppc/c2_globals_ppc.hpp @@ -47,7 +47,6 @@ define_pd_global(intx, ConditionalMoveLimit, 3); define_pd_global(intx, FreqInlineSize, 325); define_pd_global(intx, MinJumpTableSize, 10); define_pd_global(intx, InteriorEntryAlignment, 16); -define_pd_global(size_t, NewSizeThreadIncrease, ScaleForWordSize(4*K)); define_pd_global(intx, RegisterCostAreaRatio, 16000); define_pd_global(intx, LoopUnrollLimit, 60); define_pd_global(intx, LoopPercentProfileLimit, 10); @@ -91,7 +90,4 @@ define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K); define_pd_global(bool, TrapBasedRangeChecks, true); -// Ergonomics related flags -define_pd_global(bool, NeverActAsServerClassMachine, false); - #endif // CPU_PPC_C2_GLOBALS_PPC_HPP diff --git a/src/hotspot/cpu/ppc/disassembler_ppc.cpp b/src/hotspot/cpu/ppc/disassembler_ppc.cpp index fb3cb50cdec..2e16e1a301f 100644 --- a/src/hotspot/cpu/ppc/disassembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/disassembler_ppc.cpp @@ -119,9 +119,6 @@ address Disassembler::decode_instruction0(address here, outputStream * st, addre } else if (instruction == 0xbadbabe) { st->print(".data 0xbadbabe"); next = here + Assembler::instr_len(here); - } else if (Assembler::is_endgroup(instruction)) { - st->print("endgroup"); - next = here + Assembler::instr_len(here); } else { next = here; } diff --git a/src/hotspot/cpu/ppc/downcallLinker_ppc.cpp b/src/hotspot/cpu/ppc/downcallLinker_ppc.cpp index f12d25ac611..d149fc33ac3 100644 --- a/src/hotspot/cpu/ppc/downcallLinker_ppc.cpp +++ b/src/hotspot/cpu/ppc/downcallLinker_ppc.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2020, 2025 SAP SE. All rights reserved. - * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -135,10 +135,10 @@ void DowncallLinker::StubGenerator::generate() { bool should_save_return_value = !_needs_return_buffer; RegSpiller out_reg_spiller(_output_registers); - int spill_offset = -1; + int out_spill_offset = -1; if (should_save_return_value) { - spill_offset = frame::native_abi_reg_args_size; + out_spill_offset = frame::native_abi_reg_args_size; // Spill area can be shared with additional out args (>8), // since it is only used after the call. int frame_size_including_spill_area = frame::native_abi_reg_args_size + out_reg_spiller.spill_size_bytes(); @@ -170,6 +170,18 @@ void DowncallLinker::StubGenerator::generate() { ArgumentShuffle arg_shuffle(filtered_java_regs, out_regs, _abi._scratch1); + // Need to spill for state capturing runtime call. + // The area spilled into is distinct from the capture state buffer. + RegSpiller in_reg_spiller(out_regs); + int in_spill_offset = -1; + if (_captured_state_mask != 0) { + // The spill area cannot be shared with the out_spill since + // spilling needs to happen before the call. Allocate a new + // region in the stack for this spill space. + in_spill_offset = allocated_frame_size; + allocated_frame_size += in_reg_spiller.spill_size_bytes(); + } + #ifndef PRODUCT LogTarget(Trace, foreign, downcall) lt; if (lt.is_enabled()) { @@ -211,6 +223,21 @@ void DowncallLinker::StubGenerator::generate() { arg_shuffle.generate(_masm, as_VMStorage(callerSP), frame::jit_out_preserve_size, frame::native_abi_minframe_size); __ block_comment("} argument shuffle"); + if (_captured_state_mask != 0) { + assert(in_spill_offset != -1, "must be"); + __ block_comment("{ load initial thread local"); + in_reg_spiller.generate_spill(_masm, in_spill_offset); + + // Copy the contents of the capture state buffer into thread local + __ load_const_optimized(call_target_address, CAST_FROM_FN_PTR(uint64_t, DowncallLinker::capture_state_pre), R0); + __ ld(R3_ARG1, locs.data_offset(StubLocations::CAPTURED_STATE_BUFFER), R1_SP); + __ load_const_optimized(R4_ARG2, _captured_state_mask, R0); + __ call_c(call_target_address); + + in_reg_spiller.generate_fill(_masm, in_spill_offset); + __ block_comment("} load initial thread local"); + } + __ call_c(call_target_address); if (_needs_return_buffer) { @@ -247,16 +274,16 @@ void DowncallLinker::StubGenerator::generate() { __ block_comment("{ save thread local"); if (should_save_return_value) { - out_reg_spiller.generate_spill(_masm, spill_offset); + out_reg_spiller.generate_spill(_masm, out_spill_offset); } - __ load_const_optimized(call_target_address, CAST_FROM_FN_PTR(uint64_t, DowncallLinker::capture_state), R0); + __ load_const_optimized(call_target_address, CAST_FROM_FN_PTR(uint64_t, DowncallLinker::capture_state_post), R0); __ ld(R3_ARG1, locs.data_offset(StubLocations::CAPTURED_STATE_BUFFER), R1_SP); __ load_const_optimized(R4_ARG2, _captured_state_mask, R0); __ call_c(call_target_address); if (should_save_return_value) { - out_reg_spiller.generate_fill(_masm, spill_offset); + out_reg_spiller.generate_fill(_masm, out_spill_offset); } __ block_comment("} save thread local"); @@ -310,7 +337,7 @@ void DowncallLinker::StubGenerator::generate() { if (should_save_return_value) { // Need to save the native result registers around any runtime calls. - out_reg_spiller.generate_spill(_masm, spill_offset); + out_reg_spiller.generate_spill(_masm, out_spill_offset); } __ load_const_optimized(call_target_address, CAST_FROM_FN_PTR(uint64_t, JavaThread::check_special_condition_for_native_trans), R0); @@ -318,7 +345,7 @@ void DowncallLinker::StubGenerator::generate() { __ call_c(call_target_address); if (should_save_return_value) { - out_reg_spiller.generate_fill(_masm, spill_offset); + out_reg_spiller.generate_fill(_masm, out_spill_offset); } __ b(L_after_safepoint_poll); @@ -330,14 +357,14 @@ void DowncallLinker::StubGenerator::generate() { __ bind(L_reguard); if (should_save_return_value) { - out_reg_spiller.generate_spill(_masm, spill_offset); + out_reg_spiller.generate_spill(_masm, out_spill_offset); } __ load_const_optimized(call_target_address, CAST_FROM_FN_PTR(uint64_t, SharedRuntime::reguard_yellow_pages), R0); __ call_c(call_target_address); if (should_save_return_value) { - out_reg_spiller.generate_fill(_masm, spill_offset); + out_reg_spiller.generate_fill(_masm, out_spill_offset); } __ b(L_after_reguard); diff --git a/src/hotspot/cpu/ppc/frame_ppc.inline.hpp b/src/hotspot/cpu/ppc/frame_ppc.inline.hpp index bb711f2d053..3c05f950d0c 100644 --- a/src/hotspot/cpu/ppc/frame_ppc.inline.hpp +++ b/src/hotspot/cpu/ppc/frame_ppc.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2025 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -137,10 +137,10 @@ inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address // Return unique id for this frame. The id must have a value where we // can distinguish identity and younger/older relationship. null -// represents an invalid (incomparable) frame. +// represents an invalid (incomparable) frame. Should not be called for heap frames. inline intptr_t* frame::id(void) const { // Use _fp. _sp or _unextended_sp wouldn't be correct due to resizing. - return _fp; + return real_fp(); } // Return true if this frame is older (less recent activation) than @@ -319,6 +319,9 @@ inline frame frame::sender(RegisterMap* map) const { StackWatermarkSet::on_iteration(map->thread(), result); } + // Calling frame::id() is currently not supported for heap frames. + assert(result._on_heap || this->_on_heap || result.is_older(this->id()), "Must be"); + return result; } diff --git a/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.cpp b/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.cpp index 82d06f6c685..3692b247989 100644 --- a/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2018, 2025 SAP SE. All rights reserved. + * Copyright (c) 2018, 2026 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -179,6 +179,11 @@ void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Re __ ld(dst, 0, dst); // Resolve (untagged) jobject. } +void BarrierSetAssembler::try_resolve_weak_handle(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path) { + // Load the oop from the weak handle. + __ ld(obj, 0, obj); +} + void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Register tmp) { BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); assert_different_registers(tmp, R0); @@ -275,11 +280,6 @@ OptoReg::Name BarrierSetAssembler::refine_register(const Node* node, OptoReg::Na return opto_reg; } -void BarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path) { - // Load the oop from the weak handle. - __ ld(obj, 0, obj); -} - #undef __ #define __ _masm-> diff --git a/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.hpp b/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.hpp index d78071f2ee0..8112542d761 100644 --- a/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.hpp +++ b/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.hpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2018, 2022 SAP SE. All rights reserved. + * Copyright (c) 2018, 2026 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -70,6 +70,12 @@ public: virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register dst, Register jni_env, Register obj, Register tmp, Label& slowpath); + // Can be used in nmethods including native wrappers. + // Attention: obj will only be valid until next safepoint (no SATB barrier). + // TODO: maybe rename to try_peek_weak_handle on all platforms (try: operation may fail, peek: obj is not kept alive) + // (other platforms currently use it for C2 only: try_resolve_weak_handle_in_c2) + virtual void try_resolve_weak_handle(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path); + virtual void barrier_stubs_init() {} virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::stw_instruction_and_data_patch; } @@ -81,8 +87,6 @@ public: #ifdef COMPILER2 OptoReg::Name refine_register(const Node* node, OptoReg::Name opto_reg) const; - virtual void try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, - Register tmp, Label& slow_path); #endif // COMPILER2 }; diff --git a/src/hotspot/cpu/ppc/gc/shenandoah/shenandoahBarrierSetAssembler_ppc.cpp b/src/hotspot/cpu/ppc/gc/shenandoah/shenandoahBarrierSetAssembler_ppc.cpp index e1f0416d65d..8e99d23cc99 100644 --- a/src/hotspot/cpu/ppc/gc/shenandoah/shenandoahBarrierSetAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/gc/shenandoah/shenandoahBarrierSetAssembler_ppc.cpp @@ -1,7 +1,7 @@ /* * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2018, 2025, Red Hat, Inc. All rights reserved. - * Copyright (c) 2012, 2025 SAP SE. All rights reserved. + * Copyright (c) 2012, 2026 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -663,17 +663,16 @@ void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler __ block_comment("} try_resolve_jobject_in_native (shenandoahgc)"); } -#ifdef COMPILER2 -void ShenandoahBarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler *masm, Register obj, - Register tmp, Label &slow_path) { - __ block_comment("try_resolve_weak_handle_in_c2 (shenandoahgc) {"); +void ShenandoahBarrierSetAssembler::try_resolve_weak_handle(MacroAssembler *masm, Register obj, + Register tmp, Label &slow_path) { + __ block_comment("try_resolve_weak_handle (shenandoahgc) {"); assert_different_registers(obj, tmp); Label done; // Resolve weak handle using the standard implementation. - BarrierSetAssembler::try_resolve_weak_handle_in_c2(masm, obj, tmp, slow_path); + BarrierSetAssembler::try_resolve_weak_handle(masm, obj, tmp, slow_path); // Check if the reference is null, and if it is, take the fast path. __ cmpdi(CR0, obj, 0); @@ -686,9 +685,8 @@ void ShenandoahBarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler __ bne(CR0, slow_path); __ bind(done); - __ block_comment("} try_resolve_weak_handle_in_c2 (shenandoahgc)"); + __ block_comment("} try_resolve_weak_handle (shenandoahgc)"); } -#endif // Special shenandoah CAS implementation that handles false negatives due // to concurrent evacuation. That is, the CAS operation is intended to succeed in diff --git a/src/hotspot/cpu/ppc/gc/shenandoah/shenandoahBarrierSetAssembler_ppc.hpp b/src/hotspot/cpu/ppc/gc/shenandoah/shenandoahBarrierSetAssembler_ppc.hpp index 672f8122bcb..58180c49642 100644 --- a/src/hotspot/cpu/ppc/gc/shenandoah/shenandoahBarrierSetAssembler_ppc.hpp +++ b/src/hotspot/cpu/ppc/gc/shenandoah/shenandoahBarrierSetAssembler_ppc.hpp @@ -1,7 +1,7 @@ /* * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2018, 2022, Red Hat, Inc. All rights reserved. - * Copyright (c) 2012, 2022 SAP SE. All rights reserved. + * Copyright (c) 2012, 2026 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -122,9 +122,8 @@ public: virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register dst, Register jni_env, Register obj, Register tmp, Label& slowpath); -#ifdef COMPILER2 - virtual void try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path); -#endif + + virtual void try_resolve_weak_handle(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path); }; #endif // CPU_PPC_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_PPC_HPP diff --git a/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.cpp b/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.cpp index bfa3c87c179..3e74dfb88cb 100644 --- a/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2021, 2025 SAP SE. All rights reserved. + * Copyright (c) 2021, 2026 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -627,6 +627,19 @@ void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, R __ block_comment("} try_resolve_jobject_in_native (zgc)"); } +void ZBarrierSetAssembler::try_resolve_weak_handle(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path) { + // Resolve weak handle using the standard implementation. + BarrierSetAssembler::try_resolve_weak_handle(masm, obj, tmp, slow_path); + + // Check if the oop is bad, in which case we need to take the slow path. + __ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatMarkBadMask); + __ andi_(R0, obj, barrier_Relocation::unpatched); + __ bne(CR0, slow_path); + + // Oop is okay, so we uncolor it. + __ srdi(obj, obj, ZPointerLoadShift); +} + #undef __ #ifdef COMPILER1 @@ -950,19 +963,6 @@ void ZBarrierSetAssembler::generate_c2_store_barrier_stub(MacroAssembler* masm, __ b(*stub->continuation()); } -void ZBarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path) { - // Resolve weak handle using the standard implementation. - BarrierSetAssembler::try_resolve_weak_handle_in_c2(masm, obj, tmp, slow_path); - - // Check if the oop is bad, in which case we need to take the slow path. - __ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatMarkBadMask); - __ andi_(R0, obj, barrier_Relocation::unpatched); - __ bne(CR0, slow_path); - - // Oop is okay, so we uncolor it. - __ srdi(obj, obj, ZPointerLoadShift); -} - #undef __ #endif // COMPILER2 diff --git a/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.hpp b/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.hpp index e31817370d9..655184cf6a3 100644 --- a/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.hpp +++ b/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.hpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2021, 2022 SAP SE. All rights reserved. + * Copyright (c) 2021, 2026 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -72,6 +72,8 @@ public: virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register dst, Register jni_env, Register obj, Register tmp, Label& slowpath); + virtual void try_resolve_weak_handle(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path); + virtual void check_oop(MacroAssembler *masm, Register obj, const char* msg); virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::conc_instruction_and_data_patch; } @@ -108,8 +110,6 @@ public: void generate_c2_load_barrier_stub(MacroAssembler* masm, ZLoadBarrierStubC2* stub) const; void generate_c2_store_barrier_stub(MacroAssembler* masm, ZStoreBarrierStubC2* stub) const; - - void try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path); #endif // COMPILER2 void store_barrier_fast(MacroAssembler* masm, diff --git a/src/hotspot/cpu/ppc/icache_ppc.cpp b/src/hotspot/cpu/ppc/icache_ppc.cpp index 05ad3c7a30d..f3d51bad18c 100644 --- a/src/hotspot/cpu/ppc/icache_ppc.cpp +++ b/src/hotspot/cpu/ppc/icache_ppc.cpp @@ -1,6 +1,6 @@ /* - * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, 2018 SAP SE. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2026 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ */ #include "runtime/icache.hpp" +#include "runtime/vm_version.hpp" // Use inline assembler to implement icache flush. int ICache::ppc64_flush_icache(address start, int lines, int magic) { @@ -67,6 +68,9 @@ int ICache::ppc64_flush_icache(address start, int lines, int magic) { void ICacheStubGenerator::generate_icache_flush(ICache::flush_icache_stub_t* flush_icache_stub) { + guarantee(VM_Version::get_icache_line_size() >= ICache::line_size, + "processors with smaller cache line size are no longer supported"); + *flush_icache_stub = (ICache::flush_icache_stub_t)ICache::ppc64_flush_icache; // First call to flush itself. diff --git a/src/hotspot/cpu/ppc/icache_ppc.hpp b/src/hotspot/cpu/ppc/icache_ppc.hpp index d348cad1c72..024f706182a 100644 --- a/src/hotspot/cpu/ppc/icache_ppc.hpp +++ b/src/hotspot/cpu/ppc/icache_ppc.hpp @@ -1,6 +1,6 @@ /* - * Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, 2013 SAP SE. All rights reserved. + * Copyright (c) 2002, 2026, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2026 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,9 +35,8 @@ class ICache : public AbstractICache { public: enum { - // Actually, cache line size is 64, but keeping it as it is to be - // on the safe side on ALL PPC64 implementations. - log2_line_size = 5, + // Cache line size is 128 on all supported PPC64 implementations. + log2_line_size = 7, line_size = 1 << log2_line_size }; diff --git a/src/hotspot/cpu/ppc/interp_masm_ppc.hpp b/src/hotspot/cpu/ppc/interp_masm_ppc.hpp index 4ea33ebaf63..275ff92c699 100644 --- a/src/hotspot/cpu/ppc/interp_masm_ppc.hpp +++ b/src/hotspot/cpu/ppc/interp_masm_ppc.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2025 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -258,7 +258,7 @@ class InterpreterMacroAssembler: public MacroAssembler { void profile_not_taken_branch(Register scratch1, Register scratch2); void profile_call(Register scratch1, Register scratch2); void profile_final_call(Register scratch1, Register scratch2); - void profile_virtual_call(Register Rreceiver, Register Rscratch1, Register Rscratch2, bool receiver_can_be_null); + void profile_virtual_call(Register Rreceiver, Register Rscratch1, Register Rscratch2); void profile_typecheck(Register Rklass, Register Rscratch1, Register Rscratch2); void profile_ret(TosState state, Register return_bci, Register scratch1, Register scratch2); void profile_switch_default(Register scratch1, Register scratch2); diff --git a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp index f7bf457f72c..56eade8e533 100644 --- a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp +++ b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2025 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -1340,28 +1340,15 @@ void InterpreterMacroAssembler::profile_final_call(Register scratch1, Register s // Count a virtual call in the bytecodes. void InterpreterMacroAssembler::profile_virtual_call(Register Rreceiver, Register Rscratch1, - Register Rscratch2, - bool receiver_can_be_null) { + Register Rscratch2) { if (!ProfileInterpreter) { return; } Label profile_continue; // If no method data exists, go to profile_continue. test_method_data_pointer(profile_continue); - Label skip_receiver_profile; - if (receiver_can_be_null) { - Label not_null; - cmpdi(CR0, Rreceiver, 0); - bne(CR0, not_null); - // We are making a call. Increment the count for null receiver. - increment_mdp_data_at(in_bytes(CounterData::count_offset()), Rscratch1, Rscratch2); - b(skip_receiver_profile); - bind(not_null); - } - // Record the receiver type. record_klass_in_profile(Rreceiver, Rscratch1, Rscratch2); - bind(skip_receiver_profile); // The method data pointer needs to be updated to reflect the new target. update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size())); diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp index 986dd335816..5fbcce94029 100644 --- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, 2025 SAP SE. All rights reserved. + * Copyright (c) 2012, 2026 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -483,7 +483,7 @@ void MacroAssembler::set_dest_of_bc_far_at(address instruction_addr, address des // variant 3, far cond branch to the next instruction, already patched to nops: // // nop - // endgroup + // nop // SKIP/DEST: // return; @@ -500,7 +500,7 @@ void MacroAssembler::set_dest_of_bc_far_at(address instruction_addr, address des if (is_bc_far_variant2_at(instruction_addr) && dest == instruction_addr + 8) { // Far branch to next instruction: Optimize it by patching nops (produce variant 3). masm.nop(); - masm.endgroup(); + masm.nop(); } else { if (is_bc_far_variant1_at(instruction_addr)) { // variant 1, the 1st instruction contains the destination address: @@ -2800,7 +2800,7 @@ void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register // Check if object matches. ld(tmp3, in_bytes(ObjectMonitor::object_offset()), monitor); BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler(); - bs_asm->try_resolve_weak_handle_in_c2(this, tmp3, tmp2, slow_path); + bs_asm->try_resolve_weak_handle(this, tmp3, tmp2, slow_path); cmpd(CR0, tmp3, obj); bne(CR0, slow_path); @@ -3201,23 +3201,17 @@ Register MacroAssembler::encode_klass_not_null(Register dst, Register src) { void MacroAssembler::store_klass(Register dst_oop, Register klass, Register ck) { assert(!UseCompactObjectHeaders, "not with compact headers"); - if (UseCompressedClassPointers) { - Register compressedKlass = encode_klass_not_null(ck, klass); - stw(compressedKlass, oopDesc::klass_offset_in_bytes(), dst_oop); - } else { - std(klass, oopDesc::klass_offset_in_bytes(), dst_oop); - } + Register compressedKlass = encode_klass_not_null(ck, klass); + stw(compressedKlass, oopDesc::klass_offset_in_bytes(), dst_oop); } void MacroAssembler::store_klass_gap(Register dst_oop, Register val) { assert(!UseCompactObjectHeaders, "not with compact headers"); - if (UseCompressedClassPointers) { - if (val == noreg) { - val = R0; - li(val, 0); - } - stw(val, oopDesc::klass_gap_offset_in_bytes(), dst_oop); + if (val == noreg) { + val = R0; + li(val, 0); } + stw(val, oopDesc::klass_gap_offset_in_bytes(), dst_oop); } int MacroAssembler::instr_size_for_decode_klass_not_null() { @@ -3226,17 +3220,13 @@ int MacroAssembler::instr_size_for_decode_klass_not_null() { // Not yet computed? if (computed_size == -1) { - if (!UseCompressedClassPointers) { - computed_size = 0; - } else { - // Determine by scratch emit. - ResourceMark rm; - int code_size = 8 * BytesPerInstWord; - CodeBuffer cb("decode_klass_not_null scratch buffer", code_size, 0); - MacroAssembler* a = new MacroAssembler(&cb); - a->decode_klass_not_null(R11_scratch1); - computed_size = a->offset(); - } + // Determine by scratch emit. + ResourceMark rm; + int code_size = 8 * BytesPerInstWord; + CodeBuffer cb("decode_klass_not_null scratch buffer", code_size, 0); + MacroAssembler* a = new MacroAssembler(&cb); + a->decode_klass_not_null(R11_scratch1); + computed_size = a->offset(); } return computed_size; @@ -3259,18 +3249,14 @@ void MacroAssembler::decode_klass_not_null(Register dst, Register src) { void MacroAssembler::load_klass_no_decode(Register dst, Register src) { if (UseCompactObjectHeaders) { load_narrow_klass_compact(dst, src); - } else if (UseCompressedClassPointers) { - lwz(dst, oopDesc::klass_offset_in_bytes(), src); } else { - ld(dst, oopDesc::klass_offset_in_bytes(), src); + lwz(dst, oopDesc::klass_offset_in_bytes(), src); } } void MacroAssembler::load_klass(Register dst, Register src) { load_klass_no_decode(dst, src); - if (UseCompressedClassPointers) { // also true for UseCompactObjectHeaders - decode_klass_not_null(dst); - } + decode_klass_not_null(dst); } // Loads the obj's Klass* into dst. @@ -3286,18 +3272,13 @@ void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) { void MacroAssembler::cmp_klass(ConditionRegister dst, Register obj, Register klass, Register tmp, Register tmp2) { assert_different_registers(obj, klass, tmp); - if (UseCompressedClassPointers) { - if (UseCompactObjectHeaders) { - load_narrow_klass_compact(tmp, obj); - } else { - lwz(tmp, oopDesc::klass_offset_in_bytes(), obj); - } - Register encoded_klass = encode_klass_not_null(tmp2, klass); - cmpw(dst, tmp, encoded_klass); + if (UseCompactObjectHeaders) { + load_narrow_klass_compact(tmp, obj); } else { - ld(tmp, oopDesc::klass_offset_in_bytes(), obj); - cmpd(dst, tmp, klass); + lwz(tmp, oopDesc::klass_offset_in_bytes(), obj); } + Register encoded_klass = encode_klass_not_null(tmp2, klass); + cmpw(dst, tmp, encoded_klass); } void MacroAssembler::cmp_klasses_from_objects(ConditionRegister dst, Register obj1, Register obj2, Register tmp1, Register tmp2) { @@ -3305,14 +3286,10 @@ void MacroAssembler::cmp_klasses_from_objects(ConditionRegister dst, Register ob load_narrow_klass_compact(tmp1, obj1); load_narrow_klass_compact(tmp2, obj2); cmpw(dst, tmp1, tmp2); - } else if (UseCompressedClassPointers) { + } else { lwz(tmp1, oopDesc::klass_offset_in_bytes(), obj1); lwz(tmp2, oopDesc::klass_offset_in_bytes(), obj2); cmpw(dst, tmp1, tmp2); - } else { - ld(tmp1, oopDesc::klass_offset_in_bytes(), obj1); - ld(tmp2, oopDesc::klass_offset_in_bytes(), obj2); - cmpd(dst, tmp1, tmp2); } } diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp index 58dec702d7a..4be62098bdf 100644 --- a/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp +++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp @@ -70,14 +70,6 @@ class MacroAssembler: public Assembler { // Move register if destination register and target register are different inline void mr_if_needed(Register rd, Register rs, bool allow_invalid = false); inline void fmr_if_needed(FloatRegister rd, FloatRegister rs); - // This is dedicated for emitting scheduled mach nodes. For better - // readability of the ad file I put it here. - // Endgroups are not needed if - // - the scheduler is off - // - the scheduler found that there is a natural group end, in that - // case it reduced the size of the instruction used in the test - // yielding 'needed'. - inline void endgroup_if_needed(bool needed); // Memory barriers. inline void membar(int bits); diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.inline.hpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.inline.hpp index 2b19d84c69c..cdeb8527bea 100644 --- a/src/hotspot/cpu/ppc/macroAssembler_ppc.inline.hpp +++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.inline.hpp @@ -1,6 +1,6 @@ /* - * Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, 2025 SAP SE. All rights reserved. + * Copyright (c) 2002, 2026, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2026 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -72,11 +72,6 @@ inline void MacroAssembler::mr_if_needed(Register rd, Register rs, bool allow_no inline void MacroAssembler::fmr_if_needed(FloatRegister rd, FloatRegister rs) { if (rs != rd) fmr(rd, rs); } -inline void MacroAssembler::endgroup_if_needed(bool needed) { - if (needed) { - endgroup(); - } -} inline void MacroAssembler::membar(int bits) { // Comment: Usage of elemental_membar(bits) is not recommended for Power 8. @@ -240,13 +235,13 @@ inline bool MacroAssembler::is_bc_far_variant3_at(address instruction_addr) { // Variant 3, far cond branch to the next instruction, already patched to nops: // // nop - // endgroup + // nop // SKIP/DEST: // const int instruction_1 = *(int*)(instruction_addr); const int instruction_2 = *(int*)(instruction_addr + 4); return is_nop(instruction_1) && - is_endgroup(instruction_2); + is_nop(instruction_2); } // set dst to -1, 0, +1 as follows: if CR0bi is "greater than", dst is set to 1, diff --git a/src/hotspot/cpu/ppc/matcher_ppc.hpp b/src/hotspot/cpu/ppc/matcher_ppc.hpp index 2ddbec3e48c..cbe882648b8 100644 --- a/src/hotspot/cpu/ppc/matcher_ppc.hpp +++ b/src/hotspot/cpu/ppc/matcher_ppc.hpp @@ -87,7 +87,6 @@ static bool narrow_klass_use_complex_address() { NOT_LP64(ShouldNotCallThis()); - assert(UseCompressedClassPointers, "only for compressed klass code"); // TODO: PPC port if (MatchDecodeNodes) return true; return false; } diff --git a/src/hotspot/cpu/ppc/methodHandles_ppc.cpp b/src/hotspot/cpu/ppc/methodHandles_ppc.cpp index 45537e0ea96..ae94a9618b5 100644 --- a/src/hotspot/cpu/ppc/methodHandles_ppc.cpp +++ b/src/hotspot/cpu/ppc/methodHandles_ppc.cpp @@ -104,14 +104,13 @@ void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Registe __ andi(temp, temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK); __ cmpwi(CR1, temp, ref_kind); __ beq(CR1, L); - { char* buf = NEW_C_HEAP_ARRAY(char, 100, mtInternal); - jio_snprintf(buf, 100, "verify_ref_kind expected %x", ref_kind); - if (ref_kind == JVM_REF_invokeVirtual || - ref_kind == JVM_REF_invokeSpecial) - // could do this for all ref_kinds, but would explode assembly code size - trace_method_handle(_masm, buf); - __ stop(buf); + const char* msg = ref_kind_to_verify_msg(ref_kind); + if (ref_kind == JVM_REF_invokeVirtual || + ref_kind == JVM_REF_invokeSpecial) { + // could do this for all ref_kinds, but would explode assembly code size + trace_method_handle(_masm, msg); } + __ stop(msg); BLOCK_COMMENT("} verify_ref_kind"); __ BIND(L); } diff --git a/src/hotspot/cpu/ppc/ppc.ad b/src/hotspot/cpu/ppc/ppc.ad index 4cb9f8820a0..f3d33b4305d 100644 --- a/src/hotspot/cpu/ppc/ppc.ad +++ b/src/hotspot/cpu/ppc/ppc.ad @@ -2412,10 +2412,8 @@ bool Matcher::is_generic_vector(MachOper* opnd) { return false; } -// Return whether or not this register is ever used as an argument. This -// function is used on startup to build the trampoline stubs in generateOptoStub. -// Registers not mentioned will be killed by the VM call in the trampoline, and -// arguments in those registers not be available to the callee. +#ifdef ASSERT +// Return whether or not this register is ever used as an argument. bool Matcher::can_be_java_arg(int reg) { // We must include the virtual halves in order to get STDs and LDs // instead of STWs and LWs in the trampoline stubs. @@ -2447,10 +2445,7 @@ bool Matcher::can_be_java_arg(int reg) { return false; } - -bool Matcher::is_spillable_arg(int reg) { - return can_be_java_arg(reg); -} +#endif uint Matcher::int_pressure_limit() { @@ -2462,10 +2457,6 @@ uint Matcher::float_pressure_limit() return (FLOATPRESSURE == -1) ? 28 : FLOATPRESSURE; } -bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) { - return false; -} - // Register for DIVI projection of divmodI. const RegMask& Matcher::divI_proj_mask() { ShouldNotReachHere(); @@ -3715,13 +3706,6 @@ frame %{ // Compiled code's Frame Pointer. frame_pointer(R1); // R1_SP - // Interpreter stores its frame pointer in a register which is - // stored to the stack by I2CAdaptors. I2CAdaptors convert from - // interpreted java to compiled java. - // - // R14_state holds pointer to caller's cInterpreter. - interpreter_frame_pointer(R14); // R14_state - stack_alignment(frame::alignment_in_bytes); // Number of outgoing stack slots killed above the @@ -6339,36 +6323,8 @@ instruct loadConD_Ex(regD dst, immD src) %{ // Prefetch instructions. // Must be safe to execute with invalid address (cannot fault). -// Special prefetch versions which use the dcbz instruction. -instruct prefetch_alloc_zero(indirectMemory mem, iRegLsrc src) %{ - match(PrefetchAllocation (AddP mem src)); - predicate(AllocatePrefetchStyle == 3); - ins_cost(MEMORY_REF_COST); - - format %{ "PREFETCH $mem, 2, $src \t// Prefetch write-many with zero" %} - size(4); - ins_encode %{ - __ dcbz($src$$Register, $mem$$base$$Register); - %} - ins_pipe(pipe_class_memory); -%} - -instruct prefetch_alloc_zero_no_offset(indirectMemory mem) %{ - match(PrefetchAllocation mem); - predicate(AllocatePrefetchStyle == 3); - ins_cost(MEMORY_REF_COST); - - format %{ "PREFETCH $mem, 2 \t// Prefetch write-many with zero" %} - size(4); - ins_encode %{ - __ dcbz($mem$$base$$Register); - %} - ins_pipe(pipe_class_memory); -%} - instruct prefetch_alloc(indirectMemory mem, iRegLsrc src) %{ match(PrefetchAllocation (AddP mem src)); - predicate(AllocatePrefetchStyle != 3); ins_cost(MEMORY_REF_COST); format %{ "PREFETCH $mem, 2, $src \t// Prefetch write-many" %} @@ -6381,7 +6337,6 @@ instruct prefetch_alloc(indirectMemory mem, iRegLsrc src) %{ instruct prefetch_alloc_no_offset(indirectMemory mem) %{ match(PrefetchAllocation mem); - predicate(AllocatePrefetchStyle != 3); ins_cost(MEMORY_REF_COST); format %{ "PREFETCH $mem, 2 \t// Prefetch write-many" %} @@ -7175,6 +7130,18 @@ instruct membar_release_lock() %{ ins_pipe(pipe_class_default); %} +instruct membar_storeload() %{ + match(MemBarStoreLoad); + ins_cost(4*MEMORY_REF_COST); + + format %{ "MEMBAR-store-load" %} + size(4); + ins_encode %{ + __ fence(); + %} + ins_pipe(pipe_class_default); +%} + instruct membar_volatile() %{ match(MemBarVolatile); ins_cost(4*MEMORY_REF_COST); @@ -7217,6 +7184,18 @@ instruct membar_volatile() %{ // ins_pipe(pipe_class_default); //%} +instruct membar_full() %{ + match(MemBarFull); + ins_cost(4*MEMORY_REF_COST); + + format %{ "MEMBAR-full" %} + size(4); + ins_encode %{ + __ fence(); + %} + ins_pipe(pipe_class_default); +%} + instruct membar_CPUOrder() %{ match(MemBarCPUOrder); ins_cost(0); @@ -10324,7 +10303,7 @@ instruct cmovI_bso_stackSlotL(iRegIdst dst, flagsRegSrc crx, stackSlotL src) %{ ins_variable_size_depending_on_alignment(true); - format %{ "cmovI $crx, $dst, $src" %} + format %{ "CMOVI $crx, $dst, $src" %} size(8); ins_encode( enc_cmove_bso_stackSlotL(dst, crx, src) ); ins_pipe(pipe_class_default); @@ -10337,7 +10316,7 @@ instruct cmovI_bso_reg(iRegIdst dst, flagsRegSrc crx, regD src) %{ ins_variable_size_depending_on_alignment(true); - format %{ "cmovI $crx, $dst, $src" %} + format %{ "CMOVI $crx, $dst, $src" %} size(8); ins_encode( enc_cmove_bso_reg(dst, crx, src) ); ins_pipe(pipe_class_default); @@ -10349,7 +10328,7 @@ instruct cmovI_bso_reg_conLvalue0_Ex(iRegIdst dst, flagsRegSrc crx, regD src) %{ effect(DEF dst, USE crx, USE src); predicate(false); - format %{ "CmovI $dst, $crx, $src \t// postalloc expanded" %} + format %{ "CMOVI $dst, $crx, $src \t// postalloc expanded" %} postalloc_expand %{ // // replaces @@ -10499,7 +10478,7 @@ instruct cmovL_bso_stackSlotL(iRegLdst dst, flagsRegSrc crx, stackSlotL src) %{ ins_variable_size_depending_on_alignment(true); - format %{ "cmovL $crx, $dst, $src" %} + format %{ "CMOVL $crx, $dst, $src" %} size(8); ins_encode( enc_cmove_bso_stackSlotL(dst, crx, src) ); ins_pipe(pipe_class_default); @@ -10512,7 +10491,7 @@ instruct cmovL_bso_reg(iRegLdst dst, flagsRegSrc crx, regD src) %{ ins_variable_size_depending_on_alignment(true); - format %{ "cmovL $crx, $dst, $src" %} + format %{ "CMOVL $crx, $dst, $src" %} size(8); ins_encode( enc_cmove_bso_reg(dst, crx, src) ); ins_pipe(pipe_class_default); @@ -10524,7 +10503,7 @@ instruct cmovL_bso_reg_conLvalue0_Ex(iRegLdst dst, flagsRegSrc crx, regD src) %{ effect(DEF dst, USE crx, USE src); predicate(false); - format %{ "CmovL $dst, $crx, $src \t// postalloc expanded" %} + format %{ "CMOVL $dst, $crx, $src \t// postalloc expanded" %} postalloc_expand %{ // // replaces @@ -10725,9 +10704,9 @@ instruct convF2HF_reg_reg(iRegIdst dst, regF src, regF tmp) %{ effect(TEMP tmp); ins_cost(3 * DEFAULT_COST); size(12); - format %{ "xscvdphp $tmp, $src\t# convert to half precision\n\t" - "mffprd $dst, $tmp\t# move result from $tmp to $dst\n\t" - "extsh $dst, $dst\t# make it a proper short" + format %{ "XSCVDPHP $tmp, $src\t# convert to half precision\n\t" + "MFFPRD $dst, $tmp\t# move result from $tmp to $dst\n\t" + "EXTSH $dst, $dst\t# make it a proper short" %} ins_encode %{ __ f2hf($dst$$Register, $src$$FloatRegister, $tmp$$FloatRegister); @@ -10739,8 +10718,8 @@ instruct convHF2F_reg_reg(regF dst, iRegIsrc src) %{ match(Set dst (ConvHF2F src)); ins_cost(2 * DEFAULT_COST); size(8); - format %{ "mtfprd $dst, $src\t# move source from $src to $dst\n\t" - "xscvhpdp $dst, $dst\t# convert from half precision" + format %{ "MTFPRD $dst, $src\t# move source from $src to $dst\n\t" + "XSCVHPDP $dst, $dst\t# convert from half precision" %} ins_encode %{ __ hf2f($dst$$FloatRegister, $src$$Register); @@ -11138,7 +11117,7 @@ instruct cmov_bns_less(flagsReg crx) %{ ins_variable_size_depending_on_alignment(true); - format %{ "cmov $crx" %} + format %{ "CMOV $crx" %} size(12); ins_encode %{ Label done; @@ -11166,7 +11145,7 @@ instruct cmpF_reg_reg_Ex(flagsReg crx, regF src1, regF src2) %{ match(Set crx (CmpF src1 src2)); ins_cost(DEFAULT_COST+BRANCH_COST); - format %{ "CmpF $crx, $src1, $src2 \t// postalloc expanded" %} + format %{ "CMPF $crx, $src1, $src2 \t// postalloc expanded" %} postalloc_expand %{ // // replaces @@ -12324,7 +12303,7 @@ instruct minF(regF dst, regF src1, regF src2) %{ predicate(PowerArchitecturePPC64 >= 9); ins_cost(DEFAULT_COST); - format %{ "MinF $dst, $src1, $src2" %} + format %{ "XSMINJDP $dst, $src1, $src2\t// MinF" %} size(4); ins_encode %{ __ xsminjdp($dst$$FloatRegister->to_vsr(), $src1$$FloatRegister->to_vsr(), $src2$$FloatRegister->to_vsr()); @@ -12337,7 +12316,7 @@ instruct minD(regD dst, regD src1, regD src2) %{ predicate(PowerArchitecturePPC64 >= 9); ins_cost(DEFAULT_COST); - format %{ "MinD $dst, $src1, $src2" %} + format %{ "XSMINJDP $dst, $src1, $src2\t// MinD" %} size(4); ins_encode %{ __ xsminjdp($dst$$FloatRegister->to_vsr(), $src1$$FloatRegister->to_vsr(), $src2$$FloatRegister->to_vsr()); @@ -12350,7 +12329,7 @@ instruct maxF(regF dst, regF src1, regF src2) %{ predicate(PowerArchitecturePPC64 >= 9); ins_cost(DEFAULT_COST); - format %{ "MaxF $dst, $src1, $src2" %} + format %{ "XSMAXJDP $dst, $src1, $src2\t// MaxF" %} size(4); ins_encode %{ __ xsmaxjdp($dst$$FloatRegister->to_vsr(), $src1$$FloatRegister->to_vsr(), $src2$$FloatRegister->to_vsr()); @@ -12363,7 +12342,7 @@ instruct maxD(regD dst, regD src1, regD src2) %{ predicate(PowerArchitecturePPC64 >= 9); ins_cost(DEFAULT_COST); - format %{ "MaxD $dst, $src1, $src2" %} + format %{ "XSMAXJDP $dst, $src1, $src2\t// MaxD" %} size(4); ins_encode %{ __ xsmaxjdp($dst$$FloatRegister->to_vsr(), $src1$$FloatRegister->to_vsr(), $src2$$FloatRegister->to_vsr()); @@ -13893,7 +13872,7 @@ instruct vfma2D_neg2(vecX dst, vecX src1, vecX src2) %{ instruct overflowAddL_reg_reg(flagsRegCR0 cr0, iRegLsrc op1, iRegLsrc op2) %{ match(Set cr0 (OverflowAddL op1 op2)); - format %{ "add_ $op1, $op2\t# overflow check long" %} + format %{ "ADD_ $op1, $op2\t# overflow check long" %} size(12); ins_encode %{ __ li(R0, 0); @@ -13906,7 +13885,7 @@ instruct overflowAddL_reg_reg(flagsRegCR0 cr0, iRegLsrc op1, iRegLsrc op2) %{ instruct overflowSubL_reg_reg(flagsRegCR0 cr0, iRegLsrc op1, iRegLsrc op2) %{ match(Set cr0 (OverflowSubL op1 op2)); - format %{ "subfo_ R0, $op2, $op1\t# overflow check long" %} + format %{ "SUBFO_ R0, $op2, $op1\t# overflow check long" %} size(12); ins_encode %{ __ li(R0, 0); @@ -13919,7 +13898,7 @@ instruct overflowSubL_reg_reg(flagsRegCR0 cr0, iRegLsrc op1, iRegLsrc op2) %{ instruct overflowNegL_reg(flagsRegCR0 cr0, immL_0 zero, iRegLsrc op2) %{ match(Set cr0 (OverflowSubL zero op2)); - format %{ "nego_ R0, $op2\t# overflow check long" %} + format %{ "NEGO_ R0, $op2\t# overflow check long" %} size(12); ins_encode %{ __ li(R0, 0); @@ -13932,7 +13911,7 @@ instruct overflowNegL_reg(flagsRegCR0 cr0, immL_0 zero, iRegLsrc op2) %{ instruct overflowMulL_reg_reg(flagsRegCR0 cr0, iRegLsrc op1, iRegLsrc op2) %{ match(Set cr0 (OverflowMulL op1 op2)); - format %{ "mulldo_ R0, $op1, $op2\t# overflow check long" %} + format %{ "MULLDO_ R0, $op1, $op2\t# overflow check long" %} size(12); ins_encode %{ __ li(R0, 0); @@ -14309,7 +14288,7 @@ instruct ForwardExceptionjmp() match(ForwardException); ins_cost(CALL_COST); - format %{ "Jmp forward_exception_stub" %} + format %{ "JMP forward_exception_stub" %} ins_encode %{ __ set_inst_mark(); __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); @@ -14337,7 +14316,7 @@ instruct RethrowException() %{ match(Rethrow); ins_cost(CALL_COST); - format %{ "Jmp rethrow_stub" %} + format %{ "JMP rethrow_stub" %} ins_encode %{ __ set_inst_mark(); __ b64_patchable((address)OptoRuntime::rethrow_stub(), relocInfo::runtime_call_type); @@ -14379,20 +14358,6 @@ instruct tlsLoadP(threadRegP dst) %{ //---Some PPC specific nodes--------------------------------------------------- -// Stop a group. -instruct endGroup() %{ - ins_cost(0); - - ins_is_nop(true); - - format %{ "End Bundle (ori r1, r1, 0)" %} - size(4); - ins_encode %{ - __ endgroup(); - %} - ins_pipe(pipe_class_default); -%} - // Nop instructions instruct fxNop() %{ diff --git a/src/hotspot/cpu/ppc/registerMap_ppc.cpp b/src/hotspot/cpu/ppc/registerMap_ppc.cpp new file mode 100644 index 00000000000..2e7f8af89d3 --- /dev/null +++ b/src/hotspot/cpu/ppc/registerMap_ppc.cpp @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2026 SAP SE. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "runtime/registerMap.hpp" + +address RegisterMap::pd_location(VMReg base_reg, int slot_idx) const { + if (base_reg->is_VectorRegister()) { + // Not all physical slots belonging to a VectorRegister have corresponding + // valid VMReg locations in the RegisterMap. + // (See RegisterSaver::push_frame_reg_args_and_save_live_registers.) + // However, the slots are always saved to the stack in a contiguous region + // of memory so we can calculate the address of the upper slots by + // offsetting from the base address. + assert(base_reg->is_concrete(), "must pass base reg"); + address base_location = location(base_reg, nullptr); + if (base_location != nullptr) { + intptr_t offset_in_bytes = slot_idx * VMRegImpl::stack_slot_size; + return base_location + offset_in_bytes; + } else { + return nullptr; + } + } else { + return location(base_reg->next(slot_idx), nullptr); + } +} diff --git a/src/hotspot/cpu/ppc/registerMap_ppc.hpp b/src/hotspot/cpu/ppc/registerMap_ppc.hpp index 01eb642107c..607c712d10f 100644 --- a/src/hotspot/cpu/ppc/registerMap_ppc.hpp +++ b/src/hotspot/cpu/ppc/registerMap_ppc.hpp @@ -1,6 +1,6 @@ /* - * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, 2013 SAP SE. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2026 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,9 +35,7 @@ // Since there is none, we just return null. address pd_location(VMReg reg) const { return nullptr; } - address pd_location(VMReg base_reg, int slot_idx) const { - return location(base_reg->next(slot_idx), nullptr); - } + address pd_location(VMReg base_reg, int slot_idx) const; // no PD state to clear or copy: void pd_clear() {} diff --git a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp index 5260ed978ff..53644210415 100644 --- a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp +++ b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp @@ -102,7 +102,7 @@ class RegisterSaver { // During deoptimization only the result registers need to be restored // all the other values have already been extracted. - static void restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes); + static void restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes, bool save_vectors); // Constants and data structures: @@ -349,7 +349,7 @@ OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssemble } // Note that generate_oop_map in the following loop is only used for the - // polling_page_vectors_safepoint_handler_blob. + // polling_page_vectors_safepoint_handler_blob and the deopt_blob. // The order in which the vector contents are stored depends on Endianess and // the utilized instructions (PowerArchitecturePPC64). assert(is_aligned(offset, StackAlignmentInBytes), "should be"); @@ -361,6 +361,7 @@ OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssemble __ stxvp(as_VectorRegister(reg_num).to_vsr(), offset, R1_SP); // Note: The contents were read in the same order (see loadV16_Power9 node in ppc.ad). + // RegisterMap::pd_location only uses the first VMReg for each VectorRegister. if (generate_oop_map) { map->set_callee_saved(VMRegImpl::stack2reg(offset >> 2), RegisterSaver_LiveVecRegs[i LITTLE_ENDIAN_ONLY(+1) ].vmreg); @@ -380,6 +381,7 @@ OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssemble __ stxvd2x(as_VectorRegister(reg_num)->to_vsr(), R31, R1_SP); } // Note: The contents were read in the same order (see loadV16_Power8 / loadV16_Power9 node in ppc.ad). + // RegisterMap::pd_location only uses the first VMReg for each VectorRegister. if (generate_oop_map) { VMReg vsr = RegisterSaver_LiveVecRegs[i].vmreg; map->set_callee_saved(VMRegImpl::stack2reg(offset >> 2), vsr); @@ -566,10 +568,14 @@ void RegisterSaver::restore_argument_registers_and_pop_frame(MacroAssembler*masm } // Restore the registers that might be holding a result. -void RegisterSaver::restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes) { +void RegisterSaver::restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes, bool save_vectors) { const int regstosave_num = sizeof(RegisterSaver_LiveRegs) / sizeof(RegisterSaver::LiveRegType); - const int register_save_size = regstosave_num * reg_size; // VS registers not relevant here. + const int vecregstosave_num = save_vectors ? (sizeof(RegisterSaver_LiveVecRegs) / + sizeof(RegisterSaver::LiveRegType)) + : 0; + const int register_save_size = regstosave_num * reg_size + vecregstosave_num * vec_reg_size; + const int register_save_offset = frame_size_in_bytes - register_save_size; // restore all result registers (ints and floats) @@ -598,7 +604,7 @@ void RegisterSaver::restore_result_registers(MacroAssembler* masm, int frame_siz offset += reg_size; } - assert(offset == frame_size_in_bytes, "consistency check"); + assert(offset == frame_size_in_bytes - (save_vectors ? vecregstosave_num * vec_reg_size : 0), "consistency check"); } // Is vector's size (in bytes) bigger than a size saved by default? @@ -2909,7 +2915,8 @@ void SharedRuntime::generate_deopt_blob() { map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, &first_frame_size_in_bytes, /*generate_oop_map=*/ true, - RegisterSaver::return_pc_is_lr); + RegisterSaver::return_pc_is_lr, + /*save_vectors*/ SuperwordUseVSX); assert(map != nullptr, "OopMap must have been created"); __ li(exec_mode_reg, Deoptimization::Unpack_deopt); @@ -2943,7 +2950,8 @@ void SharedRuntime::generate_deopt_blob() { RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, &first_frame_size_in_bytes, /*generate_oop_map=*/ false, - RegisterSaver::return_pc_is_pre_saved); + RegisterSaver::return_pc_is_pre_saved, + /*save_vectors*/ SuperwordUseVSX); // Deopt during an exception. Save exec mode for unpack_frames. __ li(exec_mode_reg, Deoptimization::Unpack_exception); @@ -2958,7 +2966,8 @@ void SharedRuntime::generate_deopt_blob() { RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, &first_frame_size_in_bytes, /*generate_oop_map=*/ false, - RegisterSaver::return_pc_is_pre_saved); + RegisterSaver::return_pc_is_pre_saved, + /*save_vectors*/ SuperwordUseVSX); __ li(exec_mode_reg, Deoptimization::Unpack_reexecute); #endif @@ -2984,7 +2993,7 @@ void SharedRuntime::generate_deopt_blob() { // Restore only the result registers that have been saved // by save_volatile_registers(...). - RegisterSaver::restore_result_registers(masm, first_frame_size_in_bytes); + RegisterSaver::restore_result_registers(masm, first_frame_size_in_bytes, /*save_vectors*/ SuperwordUseVSX); // reload the exec mode from the UnrollBlock (it might have changed) __ lwz(exec_mode_reg, in_bytes(Deoptimization::UnrollBlock::unpack_kind_offset()), unroll_block_reg); diff --git a/src/hotspot/cpu/ppc/stubDeclarations_ppc.hpp b/src/hotspot/cpu/ppc/stubDeclarations_ppc.hpp index be51afe42a4..41b8b71486d 100644 --- a/src/hotspot/cpu/ppc/stubDeclarations_ppc.hpp +++ b/src/hotspot/cpu/ppc/stubDeclarations_ppc.hpp @@ -29,35 +29,40 @@ #define STUBGEN_PREUNIVERSE_BLOBS_ARCH_DO(do_stub, \ do_arch_blob, \ do_arch_entry, \ - do_arch_entry_init) \ + do_arch_entry_init, \ + do_arch_entry_array) \ do_arch_blob(preuniverse, 0) \ #define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \ do_arch_blob, \ do_arch_entry, \ - do_arch_entry_init) \ + do_arch_entry_init, \ + do_arch_entry_array) \ do_arch_blob(initial, 20000) \ #define STUBGEN_CONTINUATION_BLOBS_ARCH_DO(do_stub, \ do_arch_blob, \ do_arch_entry, \ - do_arch_entry_init) \ + do_arch_entry_init, \ + do_arch_entry_array) \ do_arch_blob(continuation, 2000) \ #define STUBGEN_COMPILER_BLOBS_ARCH_DO(do_stub, \ do_arch_blob, \ do_arch_entry, \ - do_arch_entry_init) \ + do_arch_entry_init, \ + do_arch_entry_array) \ do_arch_blob(compiler, 24000) \ #define STUBGEN_FINAL_BLOBS_ARCH_DO(do_stub, \ do_arch_blob, \ do_arch_entry, \ - do_arch_entry_init) \ + do_arch_entry_init, \ + do_arch_entry_array) \ do_arch_blob(final, 24000) \ diff --git a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp index e48778a8b9f..f528587a8bb 100644 --- a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp +++ b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp @@ -5095,7 +5095,7 @@ void generate_lookup_secondary_supers_table_stub() { } public: - StubGenerator(CodeBuffer* code, BlobId blob_id) : StubCodeGenerator(code, blob_id) { + StubGenerator(CodeBuffer* code, BlobId blob_id, AOTStubData *stub_data) : StubCodeGenerator(code, blob_id, stub_data) { switch(blob_id) { case BlobId::stubgen_preuniverse_id: generate_preuniverse_stubs(); @@ -5119,7 +5119,7 @@ void generate_lookup_secondary_supers_table_stub() { } }; -void StubGenerator_generate(CodeBuffer* code, BlobId blob_id) { - StubGenerator g(code, blob_id); +void StubGenerator_generate(CodeBuffer* code, BlobId blob_id, AOTStubData *stub_data) { + StubGenerator g(code, blob_id, stub_data); } diff --git a/src/hotspot/cpu/ppc/stubRoutines_ppc_64.cpp b/src/hotspot/cpu/ppc/stubRoutines_ppc_64.cpp index 914c5a17a19..3b7ee66348a 100644 --- a/src/hotspot/cpu/ppc/stubRoutines_ppc_64.cpp +++ b/src/hotspot/cpu/ppc/stubRoutines_ppc_64.cpp @@ -183,3 +183,9 @@ address StubRoutines::ppc::generate_crc_constants(juint reverse_poly) { return consts; } + +#if INCLUDE_CDS +// nothing to do for ppc +void StubRoutines::init_AOTAddressTable() { +} +#endif // INCLUDE_CDS diff --git a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp index 8a3af748fa1..37f780535b4 100644 --- a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp +++ b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp @@ -3489,7 +3489,7 @@ void TemplateTable::invokevirtual(int byte_no) { // Get receiver klass. __ load_klass_check_null_throw(Rrecv_klass, Rrecv, R11_scratch1); __ verify_klass_ptr(Rrecv_klass); - __ profile_virtual_call(Rrecv_klass, R11_scratch1, R12_scratch2, false); + __ profile_virtual_call(Rrecv_klass, R11_scratch1, R12_scratch2); generate_vtable_call(Rrecv_klass, Rvtableindex_or_method, Rret_addr, R11_scratch1); } @@ -3596,7 +3596,7 @@ void TemplateTable::invokeinterface_object_method(Register Rrecv_klass, // Non-final callc case. __ bind(LnotFinal); __ lhz(Rindex, in_bytes(ResolvedMethodEntry::table_index_offset()), Rcache); - __ profile_virtual_call(Rrecv_klass, Rtemp1, Rscratch, false); + __ profile_virtual_call(Rrecv_klass, Rtemp1, Rscratch); generate_vtable_call(Rrecv_klass, Rindex, Rret, Rscratch); } @@ -3664,7 +3664,7 @@ void TemplateTable::invokeinterface(int byte_no) { __ lookup_interface_method(Rrecv_klass, Rinterface_klass, noreg, noreg, Rscratch1, Rscratch2, L_no_such_interface, /*return_method=*/false); - __ profile_virtual_call(Rrecv_klass, Rscratch1, Rscratch2, false); + __ profile_virtual_call(Rrecv_klass, Rscratch1, Rscratch2); // Find entry point to call. diff --git a/src/hotspot/cpu/ppc/vm_version_ppc.cpp b/src/hotspot/cpu/ppc/vm_version_ppc.cpp index 75feb389298..3e3b1103c86 100644 --- a/src/hotspot/cpu/ppc/vm_version_ppc.cpp +++ b/src/hotspot/cpu/ppc/vm_version_ppc.cpp @@ -1,6 +1,6 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, 2025 SAP SE. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2026 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #include "asm/assembler.inline.hpp" #include "asm/macroAssembler.inline.hpp" +#include "compiler/compilerDefinitions.inline.hpp" #include "compiler/disassembler.hpp" #include "jvm.h" #include "memory/resourceArea.hpp" @@ -105,7 +106,7 @@ void VM_Version::initialize() { if (PowerArchitecturePPC64 >= 9) { // Performance is good since Power9. - if (FLAG_IS_DEFAULT(SuperwordUseVSX)) { + if (FLAG_IS_DEFAULT(SuperwordUseVSX) && CompilerConfig::is_c2_enabled()) { FLAG_SET_ERGO(SuperwordUseVSX, true); } } @@ -310,11 +311,6 @@ void VM_Version::initialize() { FLAG_SET_DEFAULT(UseSHA3Intrinsics, false); } - if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) { - FLAG_SET_DEFAULT(UseSHA, false); - } - - #ifdef COMPILER2 if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) { UseSquareToLenIntrinsic = true; @@ -475,19 +471,12 @@ void VM_Version::print_features() { void VM_Version::determine_features() { #if defined(ABI_ELFv2) - // 1 InstWord per call for the blr instruction. - const int code_size = (num_features+1+2*1)*BytesPerInstWord; + const int code_size = (num_features + 1 /*blr*/) * BytesPerInstWord; #else - // 7 InstWords for each call (function descriptor + blr instruction). - const int code_size = (num_features+1+2*7)*BytesPerInstWord; + const int code_size = (num_features + 1 /*blr*/ + 6 /* fd */) * BytesPerInstWord; #endif int features = 0; - // create test area - enum { BUFFER_SIZE = 2*4*K }; // Needs to be >=2* max cache line size (cache line size can't exceed min page size). - char test_area[BUFFER_SIZE]; - char *mid_of_test_area = &test_area[BUFFER_SIZE>>1]; - // Allocate space for the code. ResourceMark rm; CodeBuffer cb("detect_cpu_features", code_size, 0); @@ -497,20 +486,13 @@ void VM_Version::determine_features() { _features = VM_Version::all_features_m; // Emit code. - void (*test)(address addr, uint64_t offset)=(void(*)(address addr, uint64_t offset))(void *)a->function_entry(); + void (*test)() = (void(*)())(void *)a->function_entry(); uint32_t *code = (uint32_t *)a->pc(); - // Keep R3_ARG1 unmodified, it contains &field (see below). - // Keep R4_ARG2 unmodified, it contains offset = 0 (see below). a->mfdscr(R0); a->darn(R7); a->brw(R5, R6); a->blr(); - // Emit function to set one cache line to zero. Emit function descriptor and get pointer to it. - void (*zero_cacheline_func_ptr)(char*) = (void(*)(char*))(void *)a->function_entry(); - a->dcbz(R3_ARG1); // R3_ARG1 = addr - a->blr(); - uint32_t *code_end = (uint32_t *)a->pc(); a->flush(); _features = VM_Version::unknown_m; @@ -522,18 +504,9 @@ void VM_Version::determine_features() { Disassembler::decode((u_char*)code, (u_char*)code_end, tty); } - // Measure cache line size. - memset(test_area, 0xFF, BUFFER_SIZE); // Fill test area with 0xFF. - (*zero_cacheline_func_ptr)(mid_of_test_area); // Call function which executes dcbz to the middle. - int count = 0; // count zeroed bytes - for (int i = 0; i < BUFFER_SIZE; i++) if (test_area[i] == 0) count++; - guarantee(is_power_of_2(count), "cache line size needs to be a power of 2"); - _L1_data_cache_line_size = count; - // Execute code. Illegal instructions will be replaced by 0 in the signal handler. VM_Version::_is_determine_features_test_running = true; - // We must align the first argument to 16 bytes because of the lqarx check. - (*test)(align_up((address)mid_of_test_area, 16), 0); + (*test)(); VM_Version::_is_determine_features_test_running = false; // determine which instructions are legal. @@ -550,6 +523,10 @@ void VM_Version::determine_features() { } _features = features; + + _L1_data_cache_line_size = VM_Version::get_dcache_line_size(); + assert(_L1_data_cache_line_size >= DEFAULT_CACHE_LINE_SIZE, + "processors with smaller cache line size are no longer supported"); } // Power 8: Configure Data Stream Control Register. diff --git a/src/hotspot/cpu/ppc/vm_version_ppc.hpp b/src/hotspot/cpu/ppc/vm_version_ppc.hpp index 11dce83bed0..0f4eb3593a3 100644 --- a/src/hotspot/cpu/ppc/vm_version_ppc.hpp +++ b/src/hotspot/cpu/ppc/vm_version_ppc.hpp @@ -1,6 +1,6 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, 2025 SAP SE. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2026 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -81,6 +81,9 @@ public: static uint64_t _dscr_val; static void initialize_cpu_information(void); + + static int get_dcache_line_size(); + static int get_icache_line_size(); }; #endif // CPU_PPC_VM_VERSION_PPC_HPP diff --git a/src/hotspot/cpu/riscv/c1_LIRAssembler_arraycopy_riscv.cpp b/src/hotspot/cpu/riscv/c1_LIRAssembler_arraycopy_riscv.cpp index 819d6c05654..8aced227a06 100644 --- a/src/hotspot/cpu/riscv/c1_LIRAssembler_arraycopy_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_LIRAssembler_arraycopy_riscv.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -196,12 +196,9 @@ void LIR_Assembler::arraycopy_type_check(Register src, Register src_pos, Registe if (UseCompactObjectHeaders) { __ load_narrow_klass_compact(tmp, src); __ load_narrow_klass_compact(t0, dst); - } else if (UseCompressedClassPointers) { + } else { __ lwu(tmp, Address(src, oopDesc::klass_offset_in_bytes())); __ lwu(t0, Address(dst, oopDesc::klass_offset_in_bytes())); - } else { - __ ld(tmp, Address(src, oopDesc::klass_offset_in_bytes())); - __ ld(t0, Address(dst, oopDesc::klass_offset_in_bytes())); } __ bne(tmp, t0, *stub->entry(), /* is_far */ true); } else { @@ -243,37 +240,6 @@ void LIR_Assembler::arraycopy_type_check(Register src, Register src_pos, Registe } } -void LIR_Assembler::arraycopy_assert(Register src, Register dst, Register tmp, ciArrayKlass *default_type, int flags) { - assert(default_type != nullptr, "null default_type!"); - BasicType basic_type = default_type->element_type()->basic_type(); - if (basic_type == T_ARRAY) { basic_type = T_OBJECT; } - if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { - // Sanity check the known type with the incoming class. For the - // primitive case the types must match exactly with src.klass and - // dst.klass each exactly matching the default type. For the - // object array case, if no type check is needed then either the - // dst type is exactly the expected type and the src type is a - // subtype which we can't check or src is the same array as dst - // but not necessarily exactly of type default_type. - Label known_ok, halt; - __ mov_metadata(tmp, default_type->constant_encoding()); - if (UseCompressedClassPointers) { - __ encode_klass_not_null(tmp); - } - - if (basic_type != T_OBJECT) { - __ cmp_klass_compressed(dst, tmp, t0, halt, false); - __ cmp_klass_compressed(src, tmp, t0, known_ok, true); - } else { - __ cmp_klass_compressed(dst, tmp, t0, known_ok, true); - __ beq(src, dst, known_ok); - } - __ bind(halt); - __ stop("incorrect type information in arraycopy"); - __ bind(known_ok); - } -} - void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { ciArrayKlass *default_type = op->expected_type(); Register src = op->src()->as_register(); @@ -304,7 +270,28 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { } #ifdef ASSERT - arraycopy_assert(src, dst, tmp, default_type, flags); + if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { + // Sanity check the known type with the incoming class. For the + // primitive case the types must match exactly with src.klass and + // dst.klass each exactly matching the default type. For the + // object array case, if no type check is needed then either the + // dst type is exactly the expected type and the src type is a + // subtype which we can't check or src is the same array as dst + // but not necessarily exactly of type default_type. + Label known_ok, halt; + __ mov_metadata(tmp, default_type->constant_encoding()); + + if (basic_type != T_OBJECT) { + __ cmp_klass_bne(dst, tmp, t0, t1, halt); + __ cmp_klass_beq(src, tmp, t0, t1, known_ok); + } else { + __ cmp_klass_beq(dst, tmp, t0, t1, known_ok); + __ beq(src, dst, known_ok); + } + __ bind(halt); + __ stop("incorrect type information in arraycopy"); + __ bind(known_ok); + } #endif #ifndef PRODUCT diff --git a/src/hotspot/cpu/riscv/c1_LIRAssembler_arraycopy_riscv.hpp b/src/hotspot/cpu/riscv/c1_LIRAssembler_arraycopy_riscv.hpp index 06a0f248ca6..b5452f3e4cd 100644 --- a/src/hotspot/cpu/riscv/c1_LIRAssembler_arraycopy_riscv.hpp +++ b/src/hotspot/cpu/riscv/c1_LIRAssembler_arraycopy_riscv.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -39,7 +39,6 @@ void arraycopy_type_check(Register src, Register src_pos, Register length, Register dst, Register dst_pos, Register tmp, CodeStub *stub, BasicType basic_type, int flags); - void arraycopy_assert(Register src, Register dst, Register tmp, ciArrayKlass *default_type, int flags); void arraycopy_prepare_params(Register src, Register src_pos, Register length, Register dst, Register dst_pos, BasicType basic_type); void arraycopy_checkcast_prepare_params(Register src, Register src_pos, Register length, diff --git a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp index 63e2fd015d7..29e5d86d0cc 100644 --- a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -55,20 +55,6 @@ const Register SHIFT_count = x10; // where count for shift operations must be #define __ _masm-> -static void select_different_registers(Register preserve, - Register extra, - Register &tmp1, - Register &tmp2) { - if (tmp1 == preserve) { - assert_different_registers(tmp1, tmp2, extra); - tmp1 = extra; - } else if (tmp2 == preserve) { - assert_different_registers(tmp1, tmp2, extra); - tmp2 = extra; - } - assert_different_registers(preserve, tmp1, tmp2); -} - static void select_different_registers(Register preserve, Register extra, Register &tmp1, @@ -1155,12 +1141,8 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L } else if (obj == klass_RInfo) { klass_RInfo = dst; } - if (k->is_loaded() && !UseCompressedClassPointers) { - select_different_registers(obj, dst, k_RInfo, klass_RInfo); - } else { - Rtmp1 = op->tmp3()->as_register(); - select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1); - } + Rtmp1 = op->tmp3()->as_register(); + select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1); assert_different_registers(obj, k_RInfo, klass_RInfo); diff --git a/src/hotspot/cpu/riscv/c1_LIRGenerator_riscv.cpp b/src/hotspot/cpu/riscv/c1_LIRGenerator_riscv.cpp index 88565d9136f..f290708a231 100644 --- a/src/hotspot/cpu/riscv/c1_LIRGenerator_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_LIRGenerator_riscv.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -1073,9 +1073,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) { } LIR_Opr reg = rlock_result(x); LIR_Opr tmp3 = LIR_OprFact::illegalOpr; - if (!x->klass()->is_loaded() || UseCompressedClassPointers) { - tmp3 = new_register(objectType); - } + tmp3 = new_register(objectType); __ checkcast(reg, obj.result(), x->klass(), new_register(objectType), new_register(objectType), tmp3, x->direct_compare(), info_for_exception, patching_info, stub, @@ -1094,9 +1092,7 @@ void LIRGenerator::do_InstanceOf(InstanceOf* x) { } obj.load_item(); LIR_Opr tmp3 = LIR_OprFact::illegalOpr; - if (!x->klass()->is_loaded() || UseCompressedClassPointers) { - tmp3 = new_register(objectType); - } + tmp3 = new_register(objectType); __ instanceof(reg, obj.result(), x->klass(), new_register(objectType), new_register(objectType), tmp3, x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci()); diff --git a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp index aeb077ba0a0..abcc070b253 100644 --- a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -92,12 +92,8 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register // This assumes that all prototype bits fitr in an int32_t mv(tmp1, checked_cast(markWord::prototype().value())); sd(tmp1, Address(obj, oopDesc::mark_offset_in_bytes())); - if (UseCompressedClassPointers) { // Take care not to kill klass - encode_klass_not_null(tmp1, klass, tmp2); - sw(tmp1, Address(obj, oopDesc::klass_offset_in_bytes())); - } else { - sd(klass, Address(obj, oopDesc::klass_offset_in_bytes())); - } + encode_klass_not_null(tmp1, klass, tmp2); + sw(tmp1, Address(obj, oopDesc::klass_offset_in_bytes())); } if (len->is_valid()) { @@ -108,7 +104,7 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register // Clear gap/first 4 bytes following the length field. sw(zr, Address(obj, base_offset)); } - } else if (UseCompressedClassPointers && !UseCompactObjectHeaders) { + } else if (!UseCompactObjectHeaders) { store_klass_gap(obj, zr); } } diff --git a/src/hotspot/cpu/riscv/c1_globals_riscv.hpp b/src/hotspot/cpu/riscv/c1_globals_riscv.hpp index b15bb5c23c3..b940393f063 100644 --- a/src/hotspot/cpu/riscv/c1_globals_riscv.hpp +++ b/src/hotspot/cpu/riscv/c1_globals_riscv.hpp @@ -42,7 +42,6 @@ define_pd_global(bool, TieredCompilation, false); define_pd_global(intx, CompileThreshold, 1500 ); define_pd_global(intx, OnStackReplacePercentage, 933 ); -define_pd_global(intx, NewSizeThreadIncrease, 4*K ); define_pd_global(size_t, InitialCodeCacheSize, 160*K); define_pd_global(size_t, ReservedCodeCacheSize, 32*M ); define_pd_global(size_t, NonProfiledCodeHeapSize, 13*M ); @@ -52,7 +51,6 @@ define_pd_global(bool, ProfileInterpreter, false); define_pd_global(size_t, CodeCacheExpansionSize, 32*K ); define_pd_global(size_t, CodeCacheMinBlockLength, 1); define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K); -define_pd_global(bool, NeverActAsServerClassMachine, true ); define_pd_global(bool, CICompileOSR, true ); #endif // !COMPILER2 define_pd_global(bool, UseTypeProfile, false); diff --git a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp index 72a90ddde1f..0d06fd469de 100644 --- a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp @@ -1175,8 +1175,7 @@ void C2_MacroAssembler::string_compare_long_same_encoding(Register result, Regis Label TAIL_CHECK, TAIL, NEXT_WORD, DIFFERENCE; const int base_offset = arrayOopDesc::base_offset_in_bytes(T_BYTE); - assert((base_offset % (UseCompactObjectHeaders ? 4 : - (UseCompressedClassPointers ? 8 : 4))) == 0, "Must be"); + assert((base_offset % (UseCompactObjectHeaders ? 4 : 8)) == 0, "Must be"); const int minCharsInWord = isLL ? wordSize : wordSize / 2; @@ -1269,8 +1268,7 @@ void C2_MacroAssembler::string_compare_long_different_encoding(Register result, Label TAIL, NEXT_WORD, DIFFERENCE; const int base_offset = arrayOopDesc::base_offset_in_bytes(T_BYTE); - assert((base_offset % (UseCompactObjectHeaders ? 4 : - (UseCompressedClassPointers ? 8 : 4))) == 0, "Must be"); + assert((base_offset % (UseCompactObjectHeaders ? 4 : 8)) == 0, "Must be"); Register strL = isLU ? str1 : str2; Register strU = isLU ? str2 : str1; @@ -1485,8 +1483,7 @@ void C2_MacroAssembler::arrays_equals(Register a1, Register a2, int length_offset = arrayOopDesc::length_offset_in_bytes(); int base_offset = arrayOopDesc::base_offset_in_bytes(elem_size == 2 ? T_CHAR : T_BYTE); - assert((base_offset % (UseCompactObjectHeaders ? 4 : - (UseCompressedClassPointers ? 8 : 4))) == 0, "Must be"); + assert((base_offset % (UseCompactObjectHeaders ? 4 : 8)) == 0, "Must be"); Register cnt1 = tmp3; Register cnt2 = tmp1; // cnt2 only used in array length compare @@ -1611,8 +1608,7 @@ void C2_MacroAssembler::string_equals(Register a1, Register a2, int base_offset = arrayOopDesc::base_offset_in_bytes(T_BYTE); - assert((base_offset % (UseCompactObjectHeaders ? 4 : - (UseCompressedClassPointers ? 8 : 4))) == 0, "Must be"); + assert((base_offset % (UseCompactObjectHeaders ? 4 : 8)) == 0, "Must be"); BLOCK_COMMENT("string_equals {"); @@ -2699,8 +2695,7 @@ void C2_MacroAssembler::arrays_equals_v(Register a1, Register a2, Register resul int length_offset = arrayOopDesc::length_offset_in_bytes(); int base_offset = arrayOopDesc::base_offset_in_bytes(elem_size == 2 ? T_CHAR : T_BYTE); - assert((base_offset % (UseCompactObjectHeaders ? 4 : - (UseCompressedClassPointers ? 8 : 4))) == 0, "Must be"); + assert((base_offset % (UseCompactObjectHeaders ? 4 : 8)) == 0, "Must be"); BLOCK_COMMENT("arrays_equals_v {"); diff --git a/src/hotspot/cpu/riscv/c2_globals_riscv.hpp b/src/hotspot/cpu/riscv/c2_globals_riscv.hpp index 648c24ee98b..73ef97939ed 100644 --- a/src/hotspot/cpu/riscv/c2_globals_riscv.hpp +++ b/src/hotspot/cpu/riscv/c2_globals_riscv.hpp @@ -47,7 +47,6 @@ define_pd_global(intx, ConditionalMoveLimit, 3); define_pd_global(intx, FreqInlineSize, 325); define_pd_global(intx, MinJumpTableSize, 10); define_pd_global(intx, InteriorEntryAlignment, 16); -define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K)); define_pd_global(intx, LoopUnrollLimit, 60); define_pd_global(intx, LoopPercentProfileLimit, 10); // InitialCodeCacheSize derived from specjbb2000 run. @@ -75,9 +74,6 @@ define_pd_global(size_t, NonNMethodCodeHeapSize, 5*M ); define_pd_global(size_t, CodeCacheMinBlockLength, 6); define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K); -// Ergonomics related flags -define_pd_global(bool, NeverActAsServerClassMachine, false); - define_pd_global(bool, TrapBasedRangeChecks, false); // Not needed. #endif // CPU_RISCV_C2_GLOBALS_RISCV_HPP diff --git a/src/hotspot/cpu/riscv/downcallLinker_riscv.cpp b/src/hotspot/cpu/riscv/downcallLinker_riscv.cpp index cc685645ec5..f9d7ce78ff0 100644 --- a/src/hotspot/cpu/riscv/downcallLinker_riscv.cpp +++ b/src/hotspot/cpu/riscv/downcallLinker_riscv.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -140,10 +140,10 @@ void DowncallLinker::StubGenerator::generate() { bool should_save_return_value = !_needs_return_buffer; RegSpiller out_reg_spiller(_output_registers); - int spill_offset = -1; + int out_spill_offset = -1; if (should_save_return_value) { - spill_offset = 0; + out_spill_offset = 0; // spill area can be shared with shadow space and out args, // since they are only used before the call, // and spill area is only used after. @@ -168,6 +168,9 @@ void DowncallLinker::StubGenerator::generate() { // FP-> | | // |---------------------| = frame_bottom_offset = frame_size // | (optional) | + // | in_reg_spiller area | + // |---------------------| + // | (optional) | // | capture state buf | // |---------------------| = StubLocations::CAPTURED_STATE_BUFFER // | (optional) | @@ -181,6 +184,18 @@ void DowncallLinker::StubGenerator::generate() { GrowableArray out_regs = ForeignGlobals::replace_place_holders(_input_registers, locs); ArgumentShuffle arg_shuffle(filtered_java_regs, out_regs, shuffle_reg); + // Need to spill for state capturing runtime call. + // The area spilled into is distinct from the capture state buffer. + RegSpiller in_reg_spiller(out_regs); + int in_spill_offset = -1; + if (_captured_state_mask != 0) { + // The spill area cannot be shared with the out_spill since + // spilling needs to happen before the call. Allocate a new + // region in the stack for this spill space. + in_spill_offset = allocated_frame_size; + allocated_frame_size += in_reg_spiller.spill_size_bytes(); + } + #ifndef PRODUCT LogTarget(Trace, foreign, downcall) lt; if (lt.is_enabled()) { @@ -226,6 +241,20 @@ void DowncallLinker::StubGenerator::generate() { arg_shuffle.generate(_masm, shuffle_reg, 0, _abi._shadow_space_bytes); __ block_comment("} argument shuffle"); + if (_captured_state_mask != 0) { + assert(in_spill_offset != -1, "must be"); + __ block_comment("{ load initial thread local"); + in_reg_spiller.generate_spill(_masm, in_spill_offset); + + // Copy the contents of the capture state buffer into thread local + __ ld(c_rarg0, Address(sp, locs.data_offset(StubLocations::CAPTURED_STATE_BUFFER))); + __ mv(c_rarg1, _captured_state_mask); + __ rt_call(CAST_FROM_FN_PTR(address, DowncallLinker::capture_state_pre)); + + in_reg_spiller.generate_fill(_masm, in_spill_offset); + __ block_comment("} load initial thread local"); + } + __ jalr(as_Register(locs.get(StubLocations::TARGET_ADDRESS))); // this call is assumed not to have killed xthread @@ -254,15 +283,15 @@ void DowncallLinker::StubGenerator::generate() { __ block_comment("{ save thread local"); if (should_save_return_value) { - out_reg_spiller.generate_spill(_masm, spill_offset); + out_reg_spiller.generate_spill(_masm, out_spill_offset); } __ ld(c_rarg0, Address(sp, locs.data_offset(StubLocations::CAPTURED_STATE_BUFFER))); __ mv(c_rarg1, _captured_state_mask); - __ rt_call(CAST_FROM_FN_PTR(address, DowncallLinker::capture_state)); + __ rt_call(CAST_FROM_FN_PTR(address, DowncallLinker::capture_state_post)); if (should_save_return_value) { - out_reg_spiller.generate_fill(_masm, spill_offset); + out_reg_spiller.generate_fill(_masm, out_spill_offset); } __ block_comment("} save thread local"); @@ -319,7 +348,7 @@ void DowncallLinker::StubGenerator::generate() { if (should_save_return_value) { // Need to save the native result registers around any runtime calls. - out_reg_spiller.generate_spill(_masm, spill_offset); + out_reg_spiller.generate_spill(_masm, out_spill_offset); } __ mv(c_rarg0, xthread); @@ -327,7 +356,7 @@ void DowncallLinker::StubGenerator::generate() { __ rt_call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)); if (should_save_return_value) { - out_reg_spiller.generate_fill(_masm, spill_offset); + out_reg_spiller.generate_fill(_masm, out_spill_offset); } __ j(L_after_safepoint_poll); __ block_comment("} L_safepoint_poll_slow_path"); @@ -339,13 +368,13 @@ void DowncallLinker::StubGenerator::generate() { if (should_save_return_value) { // Need to save the native result registers around any runtime calls. - out_reg_spiller.generate_spill(_masm, spill_offset); + out_reg_spiller.generate_spill(_masm, out_spill_offset); } __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); if (should_save_return_value) { - out_reg_spiller.generate_fill(_masm, spill_offset); + out_reg_spiller.generate_fill(_masm, out_spill_offset); } __ j(L_after_reguard); diff --git a/src/hotspot/cpu/riscv/frame_riscv.inline.hpp b/src/hotspot/cpu/riscv/frame_riscv.inline.hpp index 51a203c548c..d1841a347e9 100644 --- a/src/hotspot/cpu/riscv/frame_riscv.inline.hpp +++ b/src/hotspot/cpu/riscv/frame_riscv.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -236,8 +236,8 @@ inline bool frame::equal(frame other) const { // Return unique id for this frame. The id must have a value where we can distinguish // identity and younger/older relationship. null represents an invalid (incomparable) -// frame. -inline intptr_t* frame::id(void) const { return unextended_sp(); } +// frame. Should not be called for heap frames. +inline intptr_t* frame::id(void) const { return real_fp(); } // Return true if the frame is older (less recent activation) than the frame represented by id inline bool frame::is_older(intptr_t* id) const { assert(this->id() != nullptr && id != nullptr, "null frame id"); @@ -398,6 +398,9 @@ frame frame::sender(RegisterMap* map) const { StackWatermarkSet::on_iteration(map->thread(), result); } + // Calling frame::id() is currently not supported for heap frames. + assert(result._on_heap || this->_on_heap || result.is_older(this->id()), "Must be"); + return result; } diff --git a/src/hotspot/cpu/riscv/gc/shared/cardTableBarrierSetAssembler_riscv.cpp b/src/hotspot/cpu/riscv/gc/shared/cardTableBarrierSetAssembler_riscv.cpp index d94bf428fd2..9eb546a1888 100644 --- a/src/hotspot/cpu/riscv/gc/shared/cardTableBarrierSetAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/gc/shared/cardTableBarrierSetAssembler_riscv.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -56,8 +56,10 @@ void CardTableBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet d } } -void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj, Register tmp) { - assert_different_registers(obj, tmp); +void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2) { + precond(tmp1 != noreg); + precond(tmp2 != noreg); + assert_different_registers(obj, tmp1, tmp2); BarrierSet* bs = BarrierSet::barrier_set(); assert(bs->kind() == BarrierSet::CardTableBarrierSet, "Wrong barrier set kind"); @@ -65,17 +67,17 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register ob assert(CardTable::dirty_card_val() == 0, "must be"); - __ load_byte_map_base(tmp); - __ add(tmp, obj, tmp); + __ load_byte_map_base(tmp1); + __ add(tmp1, obj, tmp1); if (UseCondCardMark) { Label L_already_dirty; - __ lbu(t1, Address(tmp)); - __ beqz(t1, L_already_dirty); - __ sb(zr, Address(tmp)); + __ lbu(tmp2, Address(tmp1)); + __ beqz(tmp2, L_already_dirty); + __ sb(zr, Address(tmp1)); __ bind(L_already_dirty); } else { - __ sb(zr, Address(tmp)); + __ sb(zr, Address(tmp1)); } } @@ -119,10 +121,10 @@ void CardTableBarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorS if (needs_post_barrier) { // flatten object address if needed if (!precise || dst.offset() == 0) { - store_check(masm, dst.base(), tmp3); + store_check(masm, dst.base(), tmp1, tmp2); } else { __ la(tmp3, dst); - store_check(masm, tmp3, t0); + store_check(masm, tmp3, tmp1, tmp2); } } } diff --git a/src/hotspot/cpu/riscv/gc/shared/cardTableBarrierSetAssembler_riscv.hpp b/src/hotspot/cpu/riscv/gc/shared/cardTableBarrierSetAssembler_riscv.hpp index 6f6e9065103..1576f0a6dd8 100644 --- a/src/hotspot/cpu/riscv/gc/shared/cardTableBarrierSetAssembler_riscv.hpp +++ b/src/hotspot/cpu/riscv/gc/shared/cardTableBarrierSetAssembler_riscv.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -31,7 +31,7 @@ class CardTableBarrierSetAssembler: public BarrierSetAssembler { protected: - void store_check(MacroAssembler* masm, Register obj, Register tmp); + void store_check(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2); virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count, RegSet saved_regs) {} diff --git a/src/hotspot/cpu/riscv/interp_masm_riscv.cpp b/src/hotspot/cpu/riscv/interp_masm_riscv.cpp index 744590bec2b..804c2072ba5 100644 --- a/src/hotspot/cpu/riscv/interp_masm_riscv.cpp +++ b/src/hotspot/cpu/riscv/interp_masm_riscv.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -1040,26 +1040,15 @@ void InterpreterMacroAssembler::profile_final_call(Register mdp) { void InterpreterMacroAssembler::profile_virtual_call(Register receiver, - Register mdp, - bool receiver_can_be_null) { + Register mdp) { if (ProfileInterpreter) { Label profile_continue; // If no method data exists, go to profile_continue. test_method_data_pointer(mdp, profile_continue); - Label skip_receiver_profile; - if (receiver_can_be_null) { - Label not_null; - // We are making a call. Increment the count for null receiver. - increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); - j(skip_receiver_profile); - bind(not_null); - } - // Record the receiver type. profile_receiver_type(receiver, mdp, 0); - bind(skip_receiver_profile); // The method data pointer needs to be updated to reflect the new target. diff --git a/src/hotspot/cpu/riscv/interp_masm_riscv.hpp b/src/hotspot/cpu/riscv/interp_masm_riscv.hpp index 59cc76b022f..df86f0dc532 100644 --- a/src/hotspot/cpu/riscv/interp_masm_riscv.hpp +++ b/src/hotspot/cpu/riscv/interp_masm_riscv.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved. * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -274,8 +274,7 @@ class InterpreterMacroAssembler: public MacroAssembler { void profile_not_taken_branch(Register mdp); void profile_call(Register mdp); void profile_final_call(Register mdp); - void profile_virtual_call(Register receiver, Register mdp, - bool receiver_can_be_null = false); + void profile_virtual_call(Register receiver, Register mdp); void profile_ret(Register return_bci, Register mdp); void profile_null_seen(Register mdp); void profile_typecheck(Register mdp, Register klass); diff --git a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp index 4f5e7afc166..0e32c602d95 100644 --- a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. * Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -49,6 +49,7 @@ #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" #include "utilities/globalDefinitions.hpp" +#include "utilities/integerCast.hpp" #include "utilities/powerOfTwo.hpp" #ifdef COMPILER2 #include "opto/compile.hpp" @@ -1947,14 +1948,12 @@ void MacroAssembler::restore_cpu_control_state_after_jni(Register tmp) { } } -void MacroAssembler::push_reg(Register Rs) -{ +void MacroAssembler::push_reg(Register Rs) { subi(esp, esp, wordSize); sd(Rs, Address(esp, 0)); } -void MacroAssembler::pop_reg(Register Rd) -{ +void MacroAssembler::pop_reg(Register Rd) { ld(Rd, Address(esp, 0)); addi(esp, esp, wordSize); } @@ -1973,7 +1972,11 @@ int MacroAssembler::bitset_to_regs(unsigned int bitset, unsigned char* regs) { // Push integer registers in the bitset supplied. Don't push sp. // Return the number of words pushed -int MacroAssembler::push_reg(unsigned int bitset, Register stack) { +int MacroAssembler::push_reg(RegSet regset, Register stack) { + if (regset.bits() == 0) { + return 0; + } + auto bitset = integer_cast(regset.bits()); DEBUG_ONLY(int words_pushed = 0;) unsigned char regs[32]; int count = bitset_to_regs(bitset, regs); @@ -1993,7 +1996,11 @@ int MacroAssembler::push_reg(unsigned int bitset, Register stack) { return count; } -int MacroAssembler::pop_reg(unsigned int bitset, Register stack) { +int MacroAssembler::pop_reg(RegSet regset, Register stack) { + if (regset.bits() == 0) { + return 0; + } + auto bitset = integer_cast(regset.bits()); DEBUG_ONLY(int words_popped = 0;) unsigned char regs[32]; int count = bitset_to_regs(bitset, regs); @@ -2015,7 +2022,11 @@ int MacroAssembler::pop_reg(unsigned int bitset, Register stack) { // Push floating-point registers in the bitset supplied. // Return the number of words pushed -int MacroAssembler::push_fp(unsigned int bitset, Register stack) { +int MacroAssembler::push_fp(FloatRegSet regset, Register stack) { + if (regset.bits() == 0) { + return 0; + } + auto bitset = integer_cast(regset.bits()); DEBUG_ONLY(int words_pushed = 0;) unsigned char regs[32]; int count = bitset_to_regs(bitset, regs); @@ -2035,7 +2046,11 @@ int MacroAssembler::push_fp(unsigned int bitset, Register stack) { return count; } -int MacroAssembler::pop_fp(unsigned int bitset, Register stack) { +int MacroAssembler::pop_fp(FloatRegSet regset, Register stack) { + if (regset.bits() == 0) { + return 0; + } + auto bitset = integer_cast(regset.bits()); DEBUG_ONLY(int words_popped = 0;) unsigned char regs[32]; int count = bitset_to_regs(bitset, regs); @@ -2721,7 +2736,11 @@ void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, #ifdef COMPILER2 // Push vector registers in the bitset supplied. // Return the number of words pushed -int MacroAssembler::push_v(unsigned int bitset, Register stack) { +int MacroAssembler::push_v(VectorRegSet regset, Register stack) { + if (regset.bits() == 0) { + return 0; + } + auto bitset = integer_cast(regset.bits()); int vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE); // Scan bitset to accumulate register pairs @@ -2736,7 +2755,11 @@ int MacroAssembler::push_v(unsigned int bitset, Register stack) { return count * vector_size_in_bytes / wordSize; } -int MacroAssembler::pop_v(unsigned int bitset, Register stack) { +int MacroAssembler::pop_v(VectorRegSet regset, Register stack) { + if (regset.bits() == 0) { + return 0; + } + auto bitset = integer_cast(regset.bits()); int vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE); // Scan bitset to accumulate register pairs @@ -3511,19 +3534,30 @@ void MacroAssembler::orptr(Address adr, RegisterOrConstant src, Register tmp1, R sd(tmp1, adr); } -void MacroAssembler::cmp_klass_compressed(Register oop, Register trial_klass, Register tmp, Label &L, bool equal) { +void MacroAssembler::cmp_klass_beq(Register obj, Register klass, + Register tmp1, Register tmp2, + Label &L, bool is_far) { + assert_different_registers(obj, klass, tmp1, tmp2); if (UseCompactObjectHeaders) { - load_narrow_klass_compact(tmp, oop); - } else if (UseCompressedClassPointers) { - lwu(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); + load_narrow_klass_compact(tmp1, obj); } else { - ld(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); + lwu(tmp1, Address(obj, oopDesc::klass_offset_in_bytes())); } - if (equal) { - beq(trial_klass, tmp, L); + decode_klass_not_null(tmp1, tmp2); + beq(klass, tmp1, L, is_far); +} + +void MacroAssembler::cmp_klass_bne(Register obj, Register klass, + Register tmp1, Register tmp2, + Label &L, bool is_far) { + assert_different_registers(obj, klass, tmp1, tmp2); + if (UseCompactObjectHeaders) { + load_narrow_klass_compact(tmp1, obj); } else { - bne(trial_klass, tmp, L); + lwu(tmp1, Address(obj, oopDesc::klass_offset_in_bytes())); } + decode_klass_not_null(tmp1, tmp2); + bne(klass, tmp1, L, is_far); } // Move an oop into a register. @@ -3741,11 +3775,9 @@ void MacroAssembler::load_klass(Register dst, Register src, Register tmp) { if (UseCompactObjectHeaders) { load_narrow_klass_compact(dst, src); decode_klass_not_null(dst, tmp); - } else if (UseCompressedClassPointers) { + } else { lwu(dst, Address(src, oopDesc::klass_offset_in_bytes())); decode_klass_not_null(dst, tmp); - } else { - ld(dst, Address(src, oopDesc::klass_offset_in_bytes())); } } @@ -3753,20 +3785,15 @@ void MacroAssembler::store_klass(Register dst, Register src, Register tmp) { // FIXME: Should this be a store release? concurrent gcs assumes // klass length is valid if klass field is not null. assert(!UseCompactObjectHeaders, "not with compact headers"); - if (UseCompressedClassPointers) { - encode_klass_not_null(src, tmp); - sw(src, Address(dst, oopDesc::klass_offset_in_bytes())); - } else { - sd(src, Address(dst, oopDesc::klass_offset_in_bytes())); - } + encode_klass_not_null(src, tmp); + sw(src, Address(dst, oopDesc::klass_offset_in_bytes())); + } void MacroAssembler::store_klass_gap(Register dst, Register src) { assert(!UseCompactObjectHeaders, "not with compact headers"); - if (UseCompressedClassPointers) { - // Store to klass gap in destination - sw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes())); - } + // Store to klass gap in destination + sw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes())); } void MacroAssembler::decode_klass_not_null(Register r, Register tmp) { @@ -3775,7 +3802,6 @@ void MacroAssembler::decode_klass_not_null(Register r, Register tmp) { } void MacroAssembler::decode_klass_not_null(Register dst, Register src, Register tmp) { - assert(UseCompressedClassPointers, "should only be used for compressed headers"); assert_different_registers(dst, tmp); assert_different_registers(src, tmp); @@ -3806,8 +3832,6 @@ void MacroAssembler::encode_klass_not_null(Register r, Register tmp) { } void MacroAssembler::encode_klass_not_null(Register dst, Register src, Register tmp) { - assert(UseCompressedClassPointers, "should only be used for compressed headers"); - if (CompressedKlassPointers::base() == nullptr) { if (CompressedKlassPointers::shift() != 0) { srli(dst, src, CompressedKlassPointers::shift()); @@ -5337,7 +5361,6 @@ void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { } void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { - assert (UseCompressedClassPointers, "should only be used for compressed headers"); assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); int index = oop_recorder()->find_index(k); @@ -5417,12 +5440,9 @@ int MacroAssembler::ic_check(int end_alignment) { if (UseCompactObjectHeaders) { load_narrow_klass_compact(tmp1, receiver); lwu(tmp2, Address(data, CompiledICData::speculated_klass_offset())); - } else if (UseCompressedClassPointers) { + } else { lwu(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes())); lwu(tmp2, Address(data, CompiledICData::speculated_klass_offset())); - } else { - ld(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes())); - ld(tmp2, Address(data, CompiledICData::speculated_klass_offset())); } Label ic_hit; @@ -5543,13 +5563,6 @@ void MacroAssembler::decrementw(const Address dst, int32_t value, Register tmp1, sw(tmp1, adr); } -void MacroAssembler::cmpptr(Register src1, const Address &src2, Label& equal, Register tmp) { - assert_different_registers(src1, tmp); - assert(src2.getMode() == Address::literal, "must be applied to a literal address"); - ld(tmp, src2); - beq(src1, tmp, equal); -} - void MacroAssembler::load_method_holder_cld(Register result, Register method) { load_method_holder(result, method); ld(result, Address(result, InstanceKlass::class_loader_data_offset())); diff --git a/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp b/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp index f5e985c28a2..4cc55e7ae23 100644 --- a/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp +++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. * Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -198,7 +198,12 @@ class MacroAssembler: public Assembler { void load_klass(Register dst, Register src, Register tmp = t0); void load_narrow_klass_compact(Register dst, Register src); void store_klass(Register dst, Register src, Register tmp = t0); - void cmp_klass_compressed(Register oop, Register trial_klass, Register tmp, Label &L, bool equal); + void cmp_klass_beq(Register obj, Register klass, + Register tmp1, Register tmp2, + Label &L, bool is_far = false); + void cmp_klass_bne(Register obj, Register klass, + Register tmp1, Register tmp2, + Label &L, bool is_far = false); void encode_klass_not_null(Register r, Register tmp = t0); void decode_klass_not_null(Register r, Register tmp = t0); @@ -813,15 +818,6 @@ class MacroAssembler: public Assembler { void double_bgt(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false); private: - int push_reg(unsigned int bitset, Register stack); - int pop_reg(unsigned int bitset, Register stack); - int push_fp(unsigned int bitset, Register stack); - int pop_fp(unsigned int bitset, Register stack); -#ifdef COMPILER2 - int push_v(unsigned int bitset, Register stack); - int pop_v(unsigned int bitset, Register stack); -#endif // COMPILER2 - // The signed 20-bit upper imm can materialize at most negative 0xF...F80000000, two G. // The following signed 12-bit imm can at max subtract 0x800, two K, from that previously loaded two G. bool is_valid_32bit_offset(int64_t x) { @@ -839,15 +835,19 @@ private: } public: + // Stack push and pop individual 64 bit registers void push_reg(Register Rs); void pop_reg(Register Rd); - void push_reg(RegSet regs, Register stack) { if (regs.bits()) push_reg(regs.bits(), stack); } - void pop_reg(RegSet regs, Register stack) { if (regs.bits()) pop_reg(regs.bits(), stack); } - void push_fp(FloatRegSet regs, Register stack) { if (regs.bits()) push_fp(regs.bits(), stack); } - void pop_fp(FloatRegSet regs, Register stack) { if (regs.bits()) pop_fp(regs.bits(), stack); } + + int push_reg(RegSet regset, Register stack); + int pop_reg(RegSet regset, Register stack); + + int push_fp(FloatRegSet regset, Register stack); + int pop_fp(FloatRegSet regset, Register stack); + #ifdef COMPILER2 - void push_v(VectorRegSet regs, Register stack) { if (regs.bits()) push_v(regs.bits(), stack); } - void pop_v(VectorRegSet regs, Register stack) { if (regs.bits()) pop_v(regs.bits(), stack); } + int push_v(VectorRegSet regset, Register stack); + int pop_v(VectorRegSet regset, Register stack); #endif // COMPILER2 // Push and pop everything that might be clobbered by a native @@ -1348,9 +1348,8 @@ public: void decrement(const Address dst, int64_t value = 1, Register tmp1 = t0, Register tmp2 = t1); void decrementw(const Address dst, int32_t value = 1, Register tmp1 = t0, Register tmp2 = t1); - void cmpptr(Register src1, const Address &src2, Label& equal, Register tmp = t0); - void clinit_barrier(Register klass, Register tmp, Label* L_fast_path = nullptr, Label* L_slow_path = nullptr); + void load_method_holder_cld(Register result, Register method); void load_method_holder(Register holder, Register method); diff --git a/src/hotspot/cpu/riscv/methodHandles_riscv.cpp b/src/hotspot/cpu/riscv/methodHandles_riscv.cpp index d770999df96..e80dedf58ed 100644 --- a/src/hotspot/cpu/riscv/methodHandles_riscv.cpp +++ b/src/hotspot/cpu/riscv/methodHandles_riscv.cpp @@ -72,17 +72,22 @@ void MethodHandles::verify_klass(MacroAssembler* _masm, InstanceKlass** klass_addr = vmClasses::klass_addr_at(klass_id); Klass* klass = vmClasses::klass_at(klass_id); Register temp1 = t1; - Register temp2 = t0; // used by MacroAssembler::cmpptr + Register temp2 = t0; Label L_ok, L_bad; BLOCK_COMMENT("verify_klass {"); __ verify_oop(obj); __ beqz(obj, L_bad); + __ push_reg(RegSet::of(temp1, temp2), sp); __ load_klass(temp1, obj, temp2); - __ cmpptr(temp1, ExternalAddress((address) klass_addr), L_ok); + __ ld(temp2, ExternalAddress((address)klass_addr)); + __ beq(temp1, temp2, L_ok); + intptr_t super_check_offset = klass->super_check_offset(); __ ld(temp1, Address(temp1, super_check_offset)); - __ cmpptr(temp1, ExternalAddress((address) klass_addr), L_ok); + __ ld(temp2, ExternalAddress((address)klass_addr)); + __ beq(temp1, temp2, L_ok); + __ pop_reg(RegSet::of(temp1, temp2), sp); __ bind(L_bad); __ stop(error_message); diff --git a/src/hotspot/cpu/riscv/riscv.ad b/src/hotspot/cpu/riscv/riscv.ad index 730dd68dd88..e236d03e6d2 100644 --- a/src/hotspot/cpu/riscv/riscv.ad +++ b/src/hotspot/cpu/riscv/riscv.ad @@ -1,5 +1,5 @@ // -// Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. +// Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved. // Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. // Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -1801,13 +1801,8 @@ void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const { assert_cond(st != nullptr); st->print_cr("# MachUEPNode"); - if (UseCompressedClassPointers) { - st->print_cr("\tlwu t1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass"); - st->print_cr("\tlwu t2, [t0 + CompiledICData::speculated_klass_offset()]\t# compressed klass"); - } else { - st->print_cr("\tld t1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass"); - st->print_cr("\tld t2, [t0 + CompiledICData::speculated_klass_offset()]\t# compressed klass"); - } + st->print_cr("\tlwu t1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass"); + st->print_cr("\tlwu t2, [t0 + CompiledICData::speculated_klass_offset()]\t# compressed klass"); st->print_cr("\tbeq t1, t2, ic_hit"); st->print_cr("\tj, SharedRuntime::_ic_miss_stub\t # Inline cache check"); st->print_cr("\tic_hit:"); @@ -2060,11 +2055,8 @@ bool Matcher::is_generic_vector(MachOper* opnd) { return false; } +#ifdef ASSERT // Return whether or not this register is ever used as an argument. -// This function is used on startup to build the trampoline stubs in -// generateOptoStub. Registers not mentioned will be killed by the VM -// call in the trampoline, and arguments in those registers not be -// available to the callee. bool Matcher::can_be_java_arg(int reg) { return @@ -2085,11 +2077,7 @@ bool Matcher::can_be_java_arg(int reg) reg == F16_num || reg == F16_H_num || reg == F17_num || reg == F17_H_num; } - -bool Matcher::is_spillable_arg(int reg) -{ - return can_be_java_arg(reg); -} +#endif uint Matcher::int_pressure_limit() { @@ -2118,10 +2106,6 @@ uint Matcher::float_pressure_limit() return (FLOATPRESSURE == -1) ? _FLOAT_REG_mask.size() : FLOATPRESSURE; } -bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) { - return false; -} - const RegMask& Matcher::divI_proj_mask() { ShouldNotReachHere(); return RegMask::EMPTY; @@ -2274,7 +2258,7 @@ encode %{ } else if (rtype == relocInfo::metadata_type) { __ mov_metadata(dst_reg, (Metadata*)con); } else { - assert(rtype == relocInfo::none, "unexpected reloc type"); + assert(rtype == relocInfo::none || rtype == relocInfo::external_word_type, "unexpected reloc type"); __ mv(dst_reg, $src$$constant); } } @@ -2559,11 +2543,6 @@ frame %{ // Compiled code's Frame Pointer frame_pointer(R2); - // Interpreter stores its frame pointer in a register which is - // stored to the stack by I2CAdaptors. - // I2CAdaptors convert from interpreted java to compiled java. - interpreter_frame_pointer(R8); - // Stack alignment requirement stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes) @@ -8168,6 +8147,22 @@ instruct unnecessary_membar_rvtso() %{ ins_pipe(real_empty); %} +instruct membar_storeload_rvtso() %{ + predicate(UseZtso); + match(MemBarStoreLoad); + ins_cost(VOLATILE_REF_COST); + + format %{ "#@membar_storeload_rvtso\n\t" + "fence w, r"%} + + ins_encode %{ + __ block_comment("membar_storeload_rvtso"); + __ membar(MacroAssembler::StoreLoad); + %} + + ins_pipe(pipe_slow); +%} + instruct membar_volatile_rvtso() %{ predicate(UseZtso); match(MemBarVolatile); @@ -8198,6 +8193,22 @@ instruct unnecessary_membar_volatile_rvtso() %{ ins_pipe(real_empty); %} +instruct membar_full_rvtso() %{ + predicate(UseZtso); + match(MemBarFull); + ins_cost(VOLATILE_REF_COST); + + format %{ "#@membar_full_rvtso\n\t" + "fence rw, rw" %} + + ins_encode %{ + __ block_comment("membar_full_rvtso"); + __ membar(MacroAssembler::AnyAny); + %} + + ins_pipe(pipe_slow); +%} + // RVWMO instruct membar_aqcuire_rvwmo() %{ @@ -8247,6 +8258,22 @@ instruct membar_storestore_rvwmo() %{ ins_pipe(pipe_serial); %} +instruct membar_storeload_rvwmo() %{ + predicate(!UseZtso); + match(MemBarStoreLoad); + ins_cost(VOLATILE_REF_COST); + + format %{ "#@membar_storeload_rvwmo\n\t" + "fence w, r"%} + + ins_encode %{ + __ block_comment("membar_storeload_rvwmo"); + __ membar(MacroAssembler::StoreLoad); + %} + + ins_pipe(pipe_serial); +%} + instruct membar_volatile_rvwmo() %{ predicate(!UseZtso); match(MemBarVolatile); @@ -8291,6 +8318,22 @@ instruct unnecessary_membar_volatile_rvwmo() %{ ins_pipe(real_empty); %} +instruct membar_full_rvwmo() %{ + predicate(!UseZtso); + match(MemBarFull); + ins_cost(VOLATILE_REF_COST); + + format %{ "#@membar_full_rvwmo\n\t" + "fence rw, rw" %} + + ins_encode %{ + __ block_comment("membar_full_rvwmo"); + __ membar(MacroAssembler::AnyAny); + %} + + ins_pipe(pipe_serial); +%} + instruct spin_wait() %{ predicate(UseZihintpause); match(OnSpinWait); diff --git a/src/hotspot/cpu/riscv/stubDeclarations_riscv.hpp b/src/hotspot/cpu/riscv/stubDeclarations_riscv.hpp index f977d759d20..890e354fd27 100644 --- a/src/hotspot/cpu/riscv/stubDeclarations_riscv.hpp +++ b/src/hotspot/cpu/riscv/stubDeclarations_riscv.hpp @@ -29,28 +29,32 @@ #define STUBGEN_PREUNIVERSE_BLOBS_ARCH_DO(do_stub, \ do_arch_blob, \ do_arch_entry, \ - do_arch_entry_init) \ + do_arch_entry_init, \ + do_arch_entry_array) \ do_arch_blob(preuniverse, 0) \ #define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \ do_arch_blob, \ do_arch_entry, \ - do_arch_entry_init) \ + do_arch_entry_init, \ + do_arch_entry_array) \ do_arch_blob(initial, 10000) \ #define STUBGEN_CONTINUATION_BLOBS_ARCH_DO(do_stub, \ do_arch_blob, \ do_arch_entry, \ - do_arch_entry_init) \ + do_arch_entry_init, \ + do_arch_entry_array) \ do_arch_blob(continuation, 2000) \ #define STUBGEN_COMPILER_BLOBS_ARCH_DO(do_stub, \ do_arch_blob, \ do_arch_entry, \ - do_arch_entry_init) \ + do_arch_entry_init, \ + do_arch_entry_array) \ do_arch_blob(compiler, 45000) \ do_stub(compiler, compare_long_string_LL) \ do_arch_entry(riscv, compiler, compare_long_string_LL, \ @@ -81,7 +85,8 @@ #define STUBGEN_FINAL_BLOBS_ARCH_DO(do_stub, \ do_arch_blob, \ do_arch_entry, \ - do_arch_entry_init) \ + do_arch_entry_init, \ + do_arch_entry_array) \ do_arch_blob(final, 20000 ZGC_ONLY(+10000)) \ do_stub(final, copy_byte_f) \ do_arch_entry(riscv, final, copy_byte_f, copy_byte_f, \ diff --git a/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp b/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp index 127ac9f6951..4656b5c0d41 100644 --- a/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp +++ b/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2025, Red Hat Inc. All rights reserved. * Copyright (c) 2020, 2025, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -3070,8 +3070,7 @@ class StubGenerator: public StubCodeGenerator { const Register tmp = x30, tmpLval = x12; int base_offset = arrayOopDesc::base_offset_in_bytes(T_BYTE); - assert((base_offset % (UseCompactObjectHeaders ? 4 : - (UseCompressedClassPointers ? 8 : 4))) == 0, "Must be"); + assert((base_offset % (UseCompactObjectHeaders ? 4 : 8)) == 0, "Must be"); #ifdef ASSERT if (AvoidUnalignedAccesses) { @@ -3128,8 +3127,7 @@ class StubGenerator: public StubCodeGenerator { tmp1 = x28, tmp2 = x29, tmp3 = x30, tmp4 = x12; int base_offset = arrayOopDesc::base_offset_in_bytes(T_BYTE); - assert((base_offset % (UseCompactObjectHeaders ? 4 : - (UseCompressedClassPointers ? 8 : 4))) == 0, "Must be"); + assert((base_offset % (UseCompactObjectHeaders ? 4 : 8)) == 0, "Must be"); Register strU = isLU ? str2 : str1, strL = isLU ? str1 : str2, @@ -7350,7 +7348,7 @@ static const int64_t right_3_bits = right_n_bits(3); } public: - StubGenerator(CodeBuffer* code, BlobId blob_id) : StubCodeGenerator(code, blob_id) { + StubGenerator(CodeBuffer* code, BlobId blob_id, AOTStubData* stub_data) : StubCodeGenerator(code, blob_id, stub_data) { switch(blob_id) { case BlobId::stubgen_preuniverse_id: generate_preuniverse_stubs(); @@ -7374,6 +7372,6 @@ static const int64_t right_3_bits = right_n_bits(3); } }; // end class declaration -void StubGenerator_generate(CodeBuffer* code, BlobId blob_id) { - StubGenerator g(code, blob_id); +void StubGenerator_generate(CodeBuffer* code, BlobId blob_id, AOTStubData* stub_data) { + StubGenerator g(code, blob_id, stub_data); } diff --git a/src/hotspot/cpu/riscv/stubRoutines_riscv.cpp b/src/hotspot/cpu/riscv/stubRoutines_riscv.cpp index 2aac95d71fa..b7f69eff9fa 100644 --- a/src/hotspot/cpu/riscv/stubRoutines_riscv.cpp +++ b/src/hotspot/cpu/riscv/stubRoutines_riscv.cpp @@ -42,8 +42,12 @@ #define DEFINE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \ address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) = CAST_FROM_FN_PTR(address, init_function); -STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY, DEFINE_ARCH_ENTRY_INIT) +#define DEFINE_ARCH_ENTRY_ARRAY(arch, blob_name, stub_name, field_name, getter_name, count) \ + address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) [count] ; +STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY, DEFINE_ARCH_ENTRY_INIT, DEFINE_ARCH_ENTRY_ARRAY) + +#undef DEFINE_ARCH_ENTRY_ARRAY #undef DEFINE_ARCH_ENTRY_INIT #undef DEFINE_ARCH_ENTRY @@ -501,3 +505,9 @@ ATTRIBUTE_ALIGNED(4096) juint StubRoutines::riscv::_crc_table[] = 0x751997d0UL, 0x00000001UL, 0xccaa009eUL, 0x00000000UL, }; + +#if INCLUDE_CDS +// nothing to do for riscv +void StubRoutines::init_AOTAddressTable() { +} +#endif // INCLUDE_CDS diff --git a/src/hotspot/cpu/riscv/stubRoutines_riscv.hpp b/src/hotspot/cpu/riscv/stubRoutines_riscv.hpp index 2c4e7210413..ec67a338052 100644 --- a/src/hotspot/cpu/riscv/stubRoutines_riscv.hpp +++ b/src/hotspot/cpu/riscv/stubRoutines_riscv.hpp @@ -61,9 +61,13 @@ class riscv { #define DECLARE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \ DECLARE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name) -private: - STUBGEN_ARCH_ENTRIES_DO(DECLARE_ARCH_ENTRY, DECLARE_ARCH_ENTRY_INIT) +#define DECLARE_ARCH_ENTRY_ARRAY(arch, blob_name, stub_name, field_name, getter_name, count) \ + static address STUB_FIELD_NAME(field_name) [count] ; +private: + STUBGEN_ARCH_ENTRIES_DO(DECLARE_ARCH_ENTRY, DECLARE_ARCH_ENTRY_INIT, DECLARE_ARCH_ENTRY_ARRAY) + +#undef DECLARE_ARCH_ENTRY_ARRAY #undef DECLARE_ARCH_ENTRY_INIT #undef DECLARE_ARCH_ENTRY @@ -79,8 +83,12 @@ private: #define DEFINE_ARCH_ENTRY_GETTER_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \ DEFINE_ARCH_ENTRY_GETTER(arch, blob_name, stub_name, field_name, getter_name) - STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY_GETTER, DEFINE_ARCH_ENTRY_GETTER_INIT) +#define DEFINE_ARCH_ENTRY_GETTER_ARRAY(arch, blob_name, stub_name, field_name, getter_name, count) \ + static address getter_name(int idx) { return STUB_FIELD_NAME(field_name) [idx] ; } + STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY_GETTER, DEFINE_ARCH_ENTRY_GETTER_INIT, DEFINE_ARCH_ENTRY_GETTER_ARRAY) + +#undef DEFINE_ARCH_ENTRY_GETTER_ARRAY #undef DEFINE_ARCH_ENTRY_GETTER_INIT #undef DEFINE_ARCH_ENTRY_GETTER diff --git a/src/hotspot/cpu/riscv/vm_version_riscv.cpp b/src/hotspot/cpu/riscv/vm_version_riscv.cpp index 36f0864da0b..3a6415d52bd 100644 --- a/src/hotspot/cpu/riscv/vm_version_riscv.cpp +++ b/src/hotspot/cpu/riscv/vm_version_riscv.cpp @@ -420,11 +420,6 @@ void VM_Version::c2_initialize() { FLAG_SET_DEFAULT(UseSHA3Intrinsics, false); } - // UseSHA - if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA3Intrinsics || UseSHA512Intrinsics)) { - FLAG_SET_DEFAULT(UseSHA, false); - } - // AES if (UseZvkn) { UseAES = UseAES || FLAG_IS_DEFAULT(UseAES); diff --git a/src/hotspot/cpu/riscv/vm_version_riscv.hpp b/src/hotspot/cpu/riscv/vm_version_riscv.hpp index 03c843efc69..11a88dfedd7 100644 --- a/src/hotspot/cpu/riscv/vm_version_riscv.hpp +++ b/src/hotspot/cpu/riscv/vm_version_riscv.hpp @@ -55,7 +55,7 @@ class VM_Version : public Abstract_VM_Version { public: RVFeatureValue(const char* pretty, int linux_bit_num, bool fstring) : - _pretty(pretty), _feature_string(fstring), _linux_feature_bit(nth_bit(linux_bit_num)) { + _pretty(pretty), _feature_string(fstring), _linux_feature_bit(nth_bit(linux_bit_num)) { } virtual void enable_feature(int64_t value = 0) = 0; virtual void disable_feature() = 0; diff --git a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp index 93d6051aa76..e1d8d062c23 100644 --- a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2024 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -2251,9 +2251,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { // but not necessarily exactly of type default_type. NearLabel known_ok, halt; metadata2reg(default_type->constant_encoding(), tmp); - if (UseCompressedClassPointers) { - __ encode_klass_not_null(tmp); - } + __ encode_klass_not_null(tmp); if (basic_type != T_OBJECT) { __ cmp_klass(tmp, dst, Z_R1_scratch); @@ -2540,13 +2538,8 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L // Get object class. // Not a safepoint as obj null check happens earlier. if (op->fast_check()) { - if (UseCompressedClassPointers) { - __ load_klass(klass_RInfo, obj); - __ compareU64_and_branch(k_RInfo, klass_RInfo, Assembler::bcondNotEqual, *failure_target); - } else { - __ z_cg(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); - __ branch_optimized(Assembler::bcondNotEqual, *failure_target); - } + __ load_klass(klass_RInfo, obj); + __ compareU64_and_branch(k_RInfo, klass_RInfo, Assembler::bcondNotEqual, *failure_target); // Successful cast, fall through to profile or jump. } else { bool need_slow_path = !k->is_loaded() || diff --git a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp index 993c1a1b552..813143938f9 100644 --- a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2024 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -107,10 +107,10 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register } if (len->is_valid()) { - // Length will be in the klass gap, if one exists. + // Length will be in the klass gap. z_st(len, Address(obj, arrayOopDesc::length_offset_in_bytes())); - } else if (UseCompressedClassPointers && !UseCompactObjectHeaders) { - store_klass_gap(Rzero, obj); // Zero klass gap for compressed oops. + } else if (!UseCompactObjectHeaders) { + store_klass_gap(Rzero, obj); // Zero klass gap. } } diff --git a/src/hotspot/cpu/s390/c1_globals_s390.hpp b/src/hotspot/cpu/s390/c1_globals_s390.hpp index 25e46cd1509..64cc239800a 100644 --- a/src/hotspot/cpu/s390/c1_globals_s390.hpp +++ b/src/hotspot/cpu/s390/c1_globals_s390.hpp @@ -51,8 +51,6 @@ define_pd_global(size_t, NonNMethodCodeHeapSize, 5*M); define_pd_global(size_t, CodeCacheExpansionSize, 32*K); define_pd_global(size_t, CodeCacheMinBlockLength, 1); define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K); -define_pd_global(bool, NeverActAsServerClassMachine, true); -define_pd_global(size_t, NewSizeThreadIncrease, 16*K); define_pd_global(size_t, InitialCodeCacheSize, 160*K); #endif // !COMPILER2 diff --git a/src/hotspot/cpu/s390/c2_globals_s390.hpp b/src/hotspot/cpu/s390/c2_globals_s390.hpp index 125b317588d..eee3a8588c3 100644 --- a/src/hotspot/cpu/s390/c2_globals_s390.hpp +++ b/src/hotspot/cpu/s390/c2_globals_s390.hpp @@ -46,7 +46,6 @@ define_pd_global(intx, OnStackReplacePercentage, 140); define_pd_global(intx, ConditionalMoveLimit, 4); define_pd_global(intx, FreqInlineSize, 325); define_pd_global(intx, InteriorEntryAlignment, 4); -define_pd_global(size_t, NewSizeThreadIncrease, ScaleForWordSize(4*K)); define_pd_global(intx, RegisterCostAreaRatio, 12000); define_pd_global(intx, LoopUnrollLimit, 60); define_pd_global(intx, LoopPercentProfileLimit, 10); @@ -79,7 +78,4 @@ define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K); define_pd_global(bool, TrapBasedRangeChecks, false); // Not needed on z/Architecture. -// Ergonomics related flags -define_pd_global(bool, NeverActAsServerClassMachine, false); - #endif // CPU_S390_C2_GLOBALS_S390_HPP diff --git a/src/hotspot/cpu/s390/downcallLinker_s390.cpp b/src/hotspot/cpu/s390/downcallLinker_s390.cpp index ccd8002da37..f1c41d05b5c 100644 --- a/src/hotspot/cpu/s390/downcallLinker_s390.cpp +++ b/src/hotspot/cpu/s390/downcallLinker_s390.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, Red Hat, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -129,7 +129,7 @@ void DowncallLinker::StubGenerator::generate() { assert(!_needs_return_buffer, "unexpected needs_return_buffer"); RegSpiller out_reg_spiller(_output_registers); - int spill_offset = allocated_frame_size; + int out_spill_offset = allocated_frame_size; allocated_frame_size += BytesPerWord; StubLocations locs; @@ -153,6 +153,18 @@ void DowncallLinker::StubGenerator::generate() { GrowableArray out_regs = ForeignGlobals::replace_place_holders(_input_registers, locs); ArgumentShuffle arg_shuffle(filtered_java_regs, out_regs, _abi._scratch1); + // Need to spill for state capturing runtime call. + // The area spilled into is distinct from the capture state buffer. + RegSpiller in_reg_spiller(out_regs); + int in_spill_offset = -1; + if (_captured_state_mask != 0) { + // The spill area cannot be shared with the out_spill since + // spilling needs to happen before the call. Allocate a new + // region in the stack for this spill space. + in_spill_offset = allocated_frame_size; + allocated_frame_size += in_reg_spiller.spill_size_bytes(); + } + #ifndef PRODUCT LogTarget(Trace, foreign, downcall) lt; if (lt.is_enabled()) { @@ -192,6 +204,21 @@ void DowncallLinker::StubGenerator::generate() { arg_shuffle.generate(_masm, shuffle_reg, frame::z_jit_out_preserve_size, _abi._shadow_space_bytes); __ block_comment("} argument_shuffle"); + if (_captured_state_mask != 0) { + assert(in_spill_offset != -1, "must be"); + __ block_comment("{ load initial thread local"); + in_reg_spiller.generate_spill(_masm, in_spill_offset); + + // Copy the contents of the capture state buffer into thread local + __ load_const_optimized(call_target_address, CAST_FROM_FN_PTR(uint64_t, DowncallLinker::capture_state_pre)); + __ z_lg(Z_ARG1, Address(Z_SP, locs.data_offset(StubLocations::CAPTURED_STATE_BUFFER))); + __ load_const_optimized(Z_ARG2, _captured_state_mask); + __ call(call_target_address); + + in_reg_spiller.generate_fill(_masm, in_spill_offset); + __ block_comment("} load initial thread local"); + } + __ call(as_Register(locs.get(StubLocations::TARGET_ADDRESS))); ////////////////////////////////////////////////////////////////////////////// @@ -199,14 +226,14 @@ void DowncallLinker::StubGenerator::generate() { if (_captured_state_mask != 0) { __ block_comment("save_thread_local {"); - out_reg_spiller.generate_spill(_masm, spill_offset); + out_reg_spiller.generate_spill(_masm, out_spill_offset); - __ load_const_optimized(call_target_address, CAST_FROM_FN_PTR(uint64_t, DowncallLinker::capture_state)); + __ load_const_optimized(call_target_address, CAST_FROM_FN_PTR(uint64_t, DowncallLinker::capture_state_post)); __ z_lg(Z_ARG1, Address(Z_SP, locs.data_offset(StubLocations::CAPTURED_STATE_BUFFER))); __ load_const_optimized(Z_ARG2, _captured_state_mask); __ call(call_target_address); - out_reg_spiller.generate_fill(_masm, spill_offset); + out_reg_spiller.generate_fill(_masm, out_spill_offset); __ block_comment("} save_thread_local"); } @@ -259,13 +286,13 @@ void DowncallLinker::StubGenerator::generate() { __ bind(L_safepoint_poll_slow_path); // Need to save the native result registers around any runtime calls. - out_reg_spiller.generate_spill(_masm, spill_offset); + out_reg_spiller.generate_spill(_masm, out_spill_offset); __ load_const_optimized(call_target_address, CAST_FROM_FN_PTR(uint64_t, JavaThread::check_special_condition_for_native_trans)); __ z_lgr(Z_ARG1, Z_thread); __ call(call_target_address); - out_reg_spiller.generate_fill(_masm, spill_offset); + out_reg_spiller.generate_fill(_masm, out_spill_offset); __ z_bru(L_after_safepoint_poll); __ block_comment("} L_safepoint_poll_slow_path"); @@ -275,12 +302,12 @@ void DowncallLinker::StubGenerator::generate() { __ bind(L_reguard); // Need to save the native result registers around any runtime calls. - out_reg_spiller.generate_spill(_masm, spill_offset); + out_reg_spiller.generate_spill(_masm, out_spill_offset); __ load_const_optimized(call_target_address, CAST_FROM_FN_PTR(uint64_t, SharedRuntime::reguard_yellow_pages)); __ call(call_target_address); - out_reg_spiller.generate_fill(_masm, spill_offset); + out_reg_spiller.generate_fill(_masm, out_spill_offset); __ z_bru(L_after_reguard); diff --git a/src/hotspot/cpu/s390/frame_s390.hpp b/src/hotspot/cpu/s390/frame_s390.hpp index ad754706367..bcdeec43e1a 100644 --- a/src/hotspot/cpu/s390/frame_s390.hpp +++ b/src/hotspot/cpu/s390/frame_s390.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2024 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -463,7 +463,7 @@ // Accessors - inline intptr_t* fp() const { return _fp; } + inline intptr_t* fp() const { assert_absolute(); return _fp; } private: diff --git a/src/hotspot/cpu/s390/frame_s390.inline.hpp b/src/hotspot/cpu/s390/frame_s390.inline.hpp index dea0e72581f..6fcd36c57d1 100644 --- a/src/hotspot/cpu/s390/frame_s390.inline.hpp +++ b/src/hotspot/cpu/s390/frame_s390.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2024 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -133,10 +133,10 @@ inline void frame::interpreter_frame_set_monitors(BasicObjectLock* monitors) { // Return unique id for this frame. The id must have a value where we // can distinguish identity and younger/older relationship. null -// represents an invalid (incomparable) frame. +// represents an invalid (incomparable) frame. Should not be called for heap frames. inline intptr_t* frame::id(void) const { // Use _fp. _sp or _unextended_sp wouldn't be correct due to resizing. - return _fp; + return real_fp(); } // Return true if this frame is older (less recent activation) than diff --git a/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.cpp b/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.cpp index 7617c7a49e8..9fac231df47 100644 --- a/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.cpp @@ -169,6 +169,11 @@ void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Re __ z_lg(obj, 0, obj); // Resolve (untagged) jobject. } +void BarrierSetAssembler::try_resolve_weak_handle(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path) { + // Load the oop from the weak handle. + __ z_lg(obj, Address(obj)); +} + void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm) { BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); __ align(4, __ offset() + OFFSET_TO_PATCHABLE_DATA); // must align the following block which requires atomic updates @@ -206,11 +211,6 @@ OptoReg::Name BarrierSetAssembler::refine_register(const Node* node, OptoReg::Na return opto_reg; } -void BarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path) { - // Load the oop from the weak handle. - __ z_lg(obj, Address(obj)); -} - #undef __ #define __ _masm-> diff --git a/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.hpp b/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.hpp index d5682450414..8e76ec2f4b4 100644 --- a/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.hpp +++ b/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.hpp @@ -58,6 +58,11 @@ public: virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env, Register obj, Register tmp, Label& slowpath); + // Can be used in nmethods including native wrappers. + // Attention: obj will only be valid until next safepoint (no SATB barrier). + // (other platforms currently use it for C2 only: try_resolve_weak_handle_in_c2) + virtual void try_resolve_weak_handle(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path); + virtual void nmethod_entry_barrier(MacroAssembler* masm); virtual void barrier_stubs_init() {} @@ -65,8 +70,6 @@ public: #ifdef COMPILER2 OptoReg::Name refine_register(const Node* node, OptoReg::Name opto_reg) const; - virtual void try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, - Register tmp, Label& slow_path); #endif // COMPILER2 static const int OFFSET_TO_PATCHABLE_DATA_INSTRUCTION = 6 + 6 + 6; // iihf(6) + iilf(6) + lg(6) diff --git a/src/hotspot/cpu/s390/interp_masm_s390.cpp b/src/hotspot/cpu/s390/interp_masm_s390.cpp index a80ca26239b..d5239898dd7 100644 --- a/src/hotspot/cpu/s390/interp_masm_s390.cpp +++ b/src/hotspot/cpu/s390/interp_masm_s390.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2024 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -1259,27 +1259,15 @@ void InterpreterMacroAssembler::profile_final_call(Register mdp) { void InterpreterMacroAssembler::profile_virtual_call(Register receiver, Register mdp, - Register reg2, - bool receiver_can_be_null) { + Register reg2) { if (ProfileInterpreter) { NearLabel profile_continue; // If no method data exists, go to profile_continue. test_method_data_pointer(mdp, profile_continue); - NearLabel skip_receiver_profile; - if (receiver_can_be_null) { - NearLabel not_null; - compareU64_and_branch(receiver, (intptr_t)0L, bcondNotEqual, not_null); - // We are making a call. Increment the count for null receiver. - increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); - z_bru(skip_receiver_profile); - bind(not_null); - } - // Record the receiver type. record_klass_in_profile(receiver, mdp, reg2); - bind(skip_receiver_profile); // The method data pointer needs to be updated to reflect the new target. update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size())); diff --git a/src/hotspot/cpu/s390/interp_masm_s390.hpp b/src/hotspot/cpu/s390/interp_masm_s390.hpp index d981f9ea01e..b816185b065 100644 --- a/src/hotspot/cpu/s390/interp_masm_s390.hpp +++ b/src/hotspot/cpu/s390/interp_masm_s390.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2024 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -296,8 +296,7 @@ class InterpreterMacroAssembler: public MacroAssembler { void profile_call(Register mdp); void profile_final_call(Register mdp); void profile_virtual_call(Register receiver, Register mdp, - Register scratch2, - bool receiver_can_be_null = false); + Register scratch2); void profile_ret(Register return_bci, Register mdp); void profile_null_seen(Register mdp); void profile_typecheck(Register mdp, Register klass, Register scratch); diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.cpp b/src/hotspot/cpu/s390/macroAssembler_s390.cpp index 78779a9098a..de3608e74ba 100644 --- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp @@ -1,7 +1,7 @@ /* * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2024 SAP SE. All rights reserved. - * Copyright 2024 IBM Corporation. All rights reserved. + * Copyright 2024, 2026 IBM Corporation. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1237,7 +1237,6 @@ void MacroAssembler::load_narrow_oop(Register t, narrowOop a) { // Load narrow klass constant, compression required. void MacroAssembler::load_narrow_klass(Register t, Klass* k) { - assert(UseCompressedClassPointers, "must be on to call this method"); narrowKlass encoded_k = CompressedKlassPointers::encode(k); load_const_32to64(t, encoded_k, false /*sign_extend*/); } @@ -1255,7 +1254,6 @@ void MacroAssembler::compare_immediate_narrow_oop(Register oop1, narrowOop oop2) // Compare narrow oop in reg with narrow oop constant, no decompression. void MacroAssembler::compare_immediate_narrow_klass(Register klass1, Klass* klass2) { - assert(UseCompressedClassPointers, "must be on to call this method"); narrowKlass encoded_k = CompressedKlassPointers::encode(klass2); Assembler::z_clfi(klass1, encoded_k); @@ -1348,8 +1346,6 @@ int MacroAssembler::patch_load_narrow_oop(address pos, oop o) { // Patching the immediate value of CPU version dependent load_narrow_klass sequence. // The passed ptr must NOT be in compressed format! int MacroAssembler::patch_load_narrow_klass(address pos, Klass* k) { - assert(UseCompressedClassPointers, "Can only patch compressed klass pointers"); - narrowKlass nk = CompressedKlassPointers::encode(k); return patch_load_const_32to64(pos, nk); } @@ -1364,8 +1360,6 @@ int MacroAssembler::patch_compare_immediate_narrow_oop(address pos, oop o) { // Patching the immediate value of CPU version dependent compare_immediate_narrow_klass sequence. // The passed ptr must NOT be in compressed format! int MacroAssembler::patch_compare_immediate_narrow_klass(address pos, Klass* k) { - assert(UseCompressedClassPointers, "Can only patch compressed klass pointers"); - narrowKlass nk = CompressedKlassPointers::encode(k); return patch_compare_immediate_32(pos, nk); } @@ -2235,10 +2229,8 @@ int MacroAssembler::ic_check(int end_alignment) { if (UseCompactObjectHeaders) { load_narrow_klass_compact(R1_scratch, R2_receiver); - } else if (UseCompressedClassPointers) { - z_llgf(R1_scratch, Address(R2_receiver, oopDesc::klass_offset_in_bytes())); } else { - z_lg(R1_scratch, Address(R2_receiver, oopDesc::klass_offset_in_bytes())); + z_llgf(R1_scratch, Address(R2_receiver, oopDesc::klass_offset_in_bytes())); } z_cg(R1_scratch, Address(R9_data, in_bytes(CompiledICData::speculated_klass_offset()))); z_bre(success); @@ -3916,7 +3908,6 @@ void MacroAssembler::encode_klass_not_null(Register dst, Register src) { address base = CompressedKlassPointers::base(); int shift = CompressedKlassPointers::shift(); bool need_zero_extend = base != nullptr; - assert(UseCompressedClassPointers, "only for compressed klass ptrs"); BLOCK_COMMENT("cKlass encoder {"); @@ -4013,7 +4004,6 @@ int MacroAssembler::instr_size_for_decode_klass_not_null() { address base = CompressedKlassPointers::base(); int shift_size = CompressedKlassPointers::shift() == 0 ? 0 : 6; /* sllg */ int addbase_size = 0; - assert(UseCompressedClassPointers, "only for compressed klass ptrs"); if (base != nullptr) { unsigned int base_h = ((unsigned long)base)>>32; @@ -4043,7 +4033,6 @@ void MacroAssembler::decode_klass_not_null(Register dst) { address base = CompressedKlassPointers::base(); int shift = CompressedKlassPointers::shift(); int beg_off = offset(); - assert(UseCompressedClassPointers, "only for compressed klass ptrs"); BLOCK_COMMENT("cKlass decoder (const size) {"); @@ -4085,7 +4074,6 @@ void MacroAssembler::decode_klass_not_null(Register dst) { void MacroAssembler::decode_klass_not_null(Register dst, Register src) { address base = CompressedKlassPointers::base(); int shift = CompressedKlassPointers::shift(); - assert(UseCompressedClassPointers, "only for compressed klass ptrs"); BLOCK_COMMENT("cKlass decoder {"); @@ -4125,13 +4113,9 @@ void MacroAssembler::decode_klass_not_null(Register dst, Register src) { } void MacroAssembler::load_klass(Register klass, Address mem) { - if (UseCompressedClassPointers) { - z_llgf(klass, mem); - // Attention: no null check here! - decode_klass_not_null(klass); - } else { - z_lg(klass, mem); - } + z_llgf(klass, mem); + // Attention: no null check here! + decode_klass_not_null(klass); } // Loads the obj's Klass* into dst. @@ -4154,10 +4138,8 @@ void MacroAssembler::cmp_klass(Register klass, Register obj, Register tmp) { assert_different_registers(klass, obj, tmp); load_narrow_klass_compact(tmp, obj); z_cr(klass, tmp); - } else if (UseCompressedClassPointers) { - z_c(klass, Address(obj, oopDesc::klass_offset_in_bytes())); } else { - z_cg(klass, Address(obj, oopDesc::klass_offset_in_bytes())); + z_c(klass, Address(obj, oopDesc::klass_offset_in_bytes())); } BLOCK_COMMENT("} cmp_klass"); } @@ -4170,12 +4152,9 @@ void MacroAssembler::cmp_klasses_from_objects(Register obj1, Register obj2, Regi load_narrow_klass_compact(tmp1, obj1); load_narrow_klass_compact(tmp2, obj2); z_cr(tmp1, tmp2); - } else if (UseCompressedClassPointers) { + } else { z_l(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes())); z_c(tmp1, Address(obj2, oopDesc::klass_offset_in_bytes())); - } else { - z_lg(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes())); - z_cg(tmp1, Address(obj2, oopDesc::klass_offset_in_bytes())); } BLOCK_COMMENT("} cmp_klasses_from_objects"); } @@ -4184,36 +4163,28 @@ void MacroAssembler::load_klass(Register klass, Register src_oop) { if (UseCompactObjectHeaders) { load_narrow_klass_compact(klass, src_oop); decode_klass_not_null(klass); - } else if (UseCompressedClassPointers) { + } else { z_llgf(klass, oopDesc::klass_offset_in_bytes(), src_oop); decode_klass_not_null(klass); - } else { - z_lg(klass, oopDesc::klass_offset_in_bytes(), src_oop); } } void MacroAssembler::store_klass(Register klass, Register dst_oop, Register ck) { assert(!UseCompactObjectHeaders, "Don't use with compact headers"); - if (UseCompressedClassPointers) { - assert_different_registers(dst_oop, klass, Z_R0); - if (ck == noreg) ck = klass; - encode_klass_not_null(ck, klass); - z_st(ck, Address(dst_oop, oopDesc::klass_offset_in_bytes())); - } else { - z_stg(klass, Address(dst_oop, oopDesc::klass_offset_in_bytes())); - } + assert_different_registers(dst_oop, klass, Z_R0); + if (ck == noreg) ck = klass; + encode_klass_not_null(ck, klass); + z_st(ck, Address(dst_oop, oopDesc::klass_offset_in_bytes())); } void MacroAssembler::store_klass_gap(Register s, Register d) { assert(!UseCompactObjectHeaders, "Don't use with compact headers"); - if (UseCompressedClassPointers) { - assert(s != d, "not enough registers"); - // Support s = noreg. - if (s != noreg) { - z_st(s, Address(d, oopDesc::klass_gap_offset_in_bytes())); - } else { - z_mvhi(Address(d, oopDesc::klass_gap_offset_in_bytes()), 0); - } + assert(s != d, "not enough registers"); + // Support s = noreg. + if (s != noreg) { + z_st(s, Address(d, oopDesc::klass_gap_offset_in_bytes())); + } else { + z_mvhi(Address(d, oopDesc::klass_gap_offset_in_bytes()), 0); } } @@ -4227,67 +4198,64 @@ void MacroAssembler::compare_klass_ptr(Register Rop1, int64_t disp, Register Rba BLOCK_COMMENT("compare klass ptr {"); - if (UseCompressedClassPointers) { - const int shift = CompressedKlassPointers::shift(); - address base = CompressedKlassPointers::base(); + const int shift = CompressedKlassPointers::shift(); + address base = CompressedKlassPointers::base(); - if (UseCompactObjectHeaders) { - assert(shift >= 3, "cKlass encoder detected bad shift"); - } else { - assert((shift == 0) || (shift == 3), "cKlass encoder detected bad shift"); - } - assert_different_registers(Rop1, Z_R0); - assert_different_registers(Rop1, Rbase, Z_R1); - - // First encode register oop and then compare with cOop in memory. - // This sequence saves an unnecessary cOop load and decode. - if (base == nullptr) { - if (shift == 0) { - z_cl(Rop1, disp, Rbase); // Unscaled - } else { - z_srlg(Z_R0, Rop1, shift); // ZeroBased - z_cl(Z_R0, disp, Rbase); - } - } else { // HeapBased -#ifdef ASSERT - bool used_R0 = true; - bool used_R1 = true; -#endif - Register current = Rop1; - Label done; - - if (maybenull) { // null pointer must be preserved! - z_ltgr(Z_R0, current); - z_bre(done); - current = Z_R0; - } - - unsigned int base_h = ((unsigned long)base)>>32; - unsigned int base_l = (unsigned int)((unsigned long)base); - if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) { - lgr_if_needed(Z_R0, current); - z_aih(Z_R0, -((int)base_h)); // Base has no set bits in lower half. - } else if ((base_h == 0) && (base_l != 0)) { - lgr_if_needed(Z_R0, current); - z_agfi(Z_R0, -(int)base_l); - } else { - int pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base)); - add2reg_with_index(Z_R0, pow2_offset, Z_R1, Rop1); // Subtract base by adding complement. - } - - if (shift != 0) { - z_srlg(Z_R0, Z_R0, shift); - } - bind(done); - z_cl(Z_R0, disp, Rbase); -#ifdef ASSERT - if (used_R0) preset_reg(Z_R0, 0xb05bUL, 2); - if (used_R1) preset_reg(Z_R1, 0xb06bUL, 2); -#endif - } + if (UseCompactObjectHeaders) { + assert(shift >= 3, "cKlass encoder detected bad shift"); } else { - z_clg(Rop1, disp, Z_R0, Rbase); + assert((shift == 0) || (shift == 3), "cKlass encoder detected bad shift"); } + assert_different_registers(Rop1, Z_R0); + assert_different_registers(Rop1, Rbase, Z_R1); + + // First encode register oop and then compare with cOop in memory. + // This sequence saves an unnecessary cOop load and decode. + if (base == nullptr) { + if (shift == 0) { + z_cl(Rop1, disp, Rbase); // Unscaled + } else { + z_srlg(Z_R0, Rop1, shift); // ZeroBased + z_cl(Z_R0, disp, Rbase); + } + } else { // HeapBased +#ifdef ASSERT + bool used_R0 = true; + bool used_R1 = true; +#endif + Register current = Rop1; + Label done; + + if (maybenull) { // null pointer must be preserved! + z_ltgr(Z_R0, current); + z_bre(done); + current = Z_R0; + } + + unsigned int base_h = ((unsigned long)base)>>32; + unsigned int base_l = (unsigned int)((unsigned long)base); + if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) { + lgr_if_needed(Z_R0, current); + z_aih(Z_R0, -((int)base_h)); // Base has no set bits in lower half. + } else if ((base_h == 0) && (base_l != 0)) { + lgr_if_needed(Z_R0, current); + z_agfi(Z_R0, -(int)base_l); + } else { + int pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base)); + add2reg_with_index(Z_R0, pow2_offset, Z_R1, Rop1); // Subtract base by adding complement. + } + + if (shift != 0) { + z_srlg(Z_R0, Z_R0, shift); + } + bind(done); + z_cl(Z_R0, disp, Rbase); +#ifdef ASSERT + if (used_R0) preset_reg(Z_R0, 0xb05bUL, 2); + if (used_R1) preset_reg(Z_R1, 0xb06bUL, 2); +#endif + } + BLOCK_COMMENT("} compare klass ptr"); } @@ -6413,7 +6381,7 @@ void MacroAssembler::compiler_fast_lock_object(Register obj, Register box, Regis // Check if object matches. z_lg(tmp2, Address(tmp1_monitor, ObjectMonitor::object_offset())); BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler(); - bs_asm->try_resolve_weak_handle_in_c2(this, tmp2, Z_R0_scratch, slow_path); + bs_asm->try_resolve_weak_handle(this, tmp2, Z_R0_scratch, slow_path); z_cgr(obj, tmp2); z_brne(slow_path); diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.hpp b/src/hotspot/cpu/s390/macroAssembler_s390.hpp index da24ae80d45..32e484d4790 100644 --- a/src/hotspot/cpu/s390/macroAssembler_s390.hpp +++ b/src/hotspot/cpu/s390/macroAssembler_s390.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2024 SAP SE. All rights reserved. * Copyright (c) 2024 IBM Corporation. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -842,8 +842,7 @@ class MacroAssembler: public Assembler { void store_klass(Register klass, Register dst_oop, Register ck = noreg); // Klass will get compressed if ck not provided. void store_klass_gap(Register s, Register dst_oop); void load_narrow_klass_compact(Register dst, Register src); - // Compares the Klass pointer of an object to a given Klass (which might be narrow, - // depending on UseCompressedClassPointers). + // Compares the narrow Klass pointer of an object to a given narrow Klass void cmp_klass(Register klass, Register obj, Register tmp); // Compares the Klass pointer of two objects obj1 and obj2. Result is in the condition flags. // Uses tmp1 and tmp2 as temporary registers. diff --git a/src/hotspot/cpu/s390/matcher_s390.hpp b/src/hotspot/cpu/s390/matcher_s390.hpp index 99461e33e3c..b04a6566d41 100644 --- a/src/hotspot/cpu/s390/matcher_s390.hpp +++ b/src/hotspot/cpu/s390/matcher_s390.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2017, 2024 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -82,7 +82,6 @@ static bool narrow_klass_use_complex_address() { NOT_LP64(ShouldNotCallThis()); - assert(UseCompressedClassPointers, "only for compressed klass code"); // TODO HS25: z port if (MatchDecodeNodes) return true; return false; } diff --git a/src/hotspot/cpu/s390/methodHandles_s390.cpp b/src/hotspot/cpu/s390/methodHandles_s390.cpp index e3de6d911be..dfb8ce09b27 100644 --- a/src/hotspot/cpu/s390/methodHandles_s390.cpp +++ b/src/hotspot/cpu/s390/methodHandles_s390.cpp @@ -120,16 +120,12 @@ void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, __ z_nilf(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK); __ compare32_and_branch(temp, constant(ref_kind), Assembler::bcondEqual, L); - { - char *buf = NEW_C_HEAP_ARRAY(char, 100, mtInternal); - - jio_snprintf(buf, 100, "verify_ref_kind expected %x", ref_kind); - if (ref_kind == JVM_REF_invokeVirtual || ref_kind == JVM_REF_invokeSpecial) { - // Could do this for all ref_kinds, but would explode assembly code size. - trace_method_handle(_masm, buf); - } - __ stop(buf); + const char* msg = ref_kind_to_verify_msg(ref_kind); + if (ref_kind == JVM_REF_invokeVirtual || ref_kind == JVM_REF_invokeSpecial) { + // Could do this for all ref_kinds, but would explode assembly code size. + trace_method_handle(_masm, msg); } + __ stop(msg); BLOCK_COMMENT("} verify_ref_kind"); diff --git a/src/hotspot/cpu/s390/s390.ad b/src/hotspot/cpu/s390/s390.ad index 19bd3620228..2208a197ac9 100644 --- a/src/hotspot/cpu/s390/s390.ad +++ b/src/hotspot/cpu/s390/s390.ad @@ -1,5 +1,5 @@ // -// Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved. +// Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved. // Copyright (c) 2017, 2024 SAP SE. All rights reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // @@ -1890,10 +1890,8 @@ const int z_num_iarg_registers = sizeof(z_iarg_reg) / sizeof(z_iarg_reg[0]); const int z_num_farg_registers = sizeof(z_farg_reg) / sizeof(z_farg_reg[0]); -// Return whether or not this register is ever used as an argument. This -// function is used on startup to build the trampoline stubs in generateOptoStub. -// Registers not mentioned will be killed by the VM call in the trampoline, and -// arguments in those registers not be available to the callee. +#ifdef ASSERT +// Return whether or not this register is ever used as an argument. bool Matcher::can_be_java_arg(int reg) { // We return true for all registers contained in z_iarg_reg[] and // z_farg_reg[] and their virtual halves. @@ -1917,10 +1915,7 @@ bool Matcher::can_be_java_arg(int reg) { return false; } - -bool Matcher::is_spillable_arg(int reg) { - return can_be_java_arg(reg); -} +#endif uint Matcher::int_pressure_limit() { @@ -1934,10 +1929,6 @@ uint Matcher::float_pressure_limit() return (FLOATPRESSURE == -1) ? 15 : FLOATPRESSURE; } -bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) { - return false; -} - // Register for DIVI projection of divmodI const RegMask& Matcher::divI_proj_mask() { return _Z_RARG4_INT_REG_mask; @@ -2606,13 +2597,6 @@ frame %{ // z/Architecture stack pointer frame_pointer(Z_R15); // Z_SP - // Interpreter stores its frame pointer in a register which is - // stored to the stack by I2CAdaptors. I2CAdaptors convert from - // interpreted java to compiled java. - // - // Z_state holds pointer to caller's cInterpreter. - interpreter_frame_pointer(Z_R7); // Z_state - // Use alignment_in_bytes instead of log_2_of_alignment_in_bits. stack_alignment(frame::alignment_in_bytes); @@ -5251,6 +5235,15 @@ instruct membar_release_lock() %{ ins_pipe(pipe_class_dummy); %} +instruct membar_storeload() %{ + match(MemBarStoreLoad); + ins_cost(4 * MEMORY_REF_COST); + size(2); + format %{ "MEMBAR-storeload" %} + ins_encode %{ __ z_fence(); %} + ins_pipe(pipe_class_dummy); +%} + instruct membar_volatile() %{ match(MemBarVolatile); ins_cost(4 * MEMORY_REF_COST); @@ -5270,6 +5263,15 @@ instruct unnecessary_membar_volatile() %{ ins_pipe(pipe_class_dummy); %} +instruct membar_full() %{ + match(MemBarFull); + ins_cost(4 * MEMORY_REF_COST); + size(2); + format %{ "MEMBAR-full" %} + ins_encode %{ __ z_fence(); %} + ins_pipe(pipe_class_dummy); +%} + instruct membar_CPUOrder() %{ match(MemBarCPUOrder); ins_cost(0); diff --git a/src/hotspot/cpu/s390/stubDeclarations_s390.hpp b/src/hotspot/cpu/s390/stubDeclarations_s390.hpp index c3ad3cefeb9..d0e26beedab 100644 --- a/src/hotspot/cpu/s390/stubDeclarations_s390.hpp +++ b/src/hotspot/cpu/s390/stubDeclarations_s390.hpp @@ -29,28 +29,32 @@ #define STUBGEN_PREUNIVERSE_BLOBS_ARCH_DO(do_stub, \ do_arch_blob, \ do_arch_entry, \ - do_arch_entry_init) \ + do_arch_entry_init, \ + do_arch_entry_array) \ do_arch_blob(preuniverse, 0) \ #define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \ do_arch_blob, \ do_arch_entry, \ - do_arch_entry_init) \ + do_arch_entry_init, \ + do_arch_entry_array) \ do_arch_blob(initial, 20000) \ #define STUBGEN_CONTINUATION_BLOBS_ARCH_DO(do_stub, \ do_arch_blob, \ do_arch_entry, \ - do_arch_entry_init) \ + do_arch_entry_init, \ + do_arch_entry_array) \ do_arch_blob(continuation, 2000) \ #define STUBGEN_COMPILER_BLOBS_ARCH_DO(do_stub, \ do_arch_blob, \ do_arch_entry, \ - do_arch_entry_init) \ + do_arch_entry_init, \ + do_arch_entry_array) \ do_arch_blob(compiler, 20000 ) \ do_stub(compiler, partial_subtype_check) \ do_arch_entry(zarch, compiler, partial_subtype_check, \ @@ -60,7 +64,8 @@ #define STUBGEN_FINAL_BLOBS_ARCH_DO(do_stub, \ do_arch_blob, \ do_arch_entry, \ - do_arch_entry_init) \ + do_arch_entry_init, \ + do_arch_entry_array) \ do_arch_blob(final, 20000) \ diff --git a/src/hotspot/cpu/s390/stubGenerator_s390.cpp b/src/hotspot/cpu/s390/stubGenerator_s390.cpp index 2aa365be999..3f16312eb48 100644 --- a/src/hotspot/cpu/s390/stubGenerator_s390.cpp +++ b/src/hotspot/cpu/s390/stubGenerator_s390.cpp @@ -3422,7 +3422,7 @@ class StubGenerator: public StubCodeGenerator { } public: - StubGenerator(CodeBuffer* code, BlobId blob_id) : StubCodeGenerator(code, blob_id) { + StubGenerator(CodeBuffer* code, BlobId blob_id, AOTStubData* stub_data) : StubCodeGenerator(code, blob_id, stub_data) { switch(blob_id) { case BlobId::stubgen_preuniverse_id: generate_preuniverse_stubs(); @@ -3479,6 +3479,6 @@ class StubGenerator: public StubCodeGenerator { }; -void StubGenerator_generate(CodeBuffer* code, BlobId blob_id) { - StubGenerator g(code, blob_id); +void StubGenerator_generate(CodeBuffer* code, BlobId blob_id, AOTStubData* stub_data) { + StubGenerator g(code, blob_id, stub_data); } diff --git a/src/hotspot/cpu/s390/stubRoutines_s390.cpp b/src/hotspot/cpu/s390/stubRoutines_s390.cpp index 6feb20f9604..eda0ebfdecc 100644 --- a/src/hotspot/cpu/s390/stubRoutines_s390.cpp +++ b/src/hotspot/cpu/s390/stubRoutines_s390.cpp @@ -40,8 +40,12 @@ #define DEFINE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \ address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) = CAST_FROM_FN_PTR(address, init_function); -STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY, DEFINE_ARCH_ENTRY_INIT) +#define DEFINE_ARCH_ENTRY_ARRAY(arch, blob_name, stub_name, field_name, getter_name, count) \ + address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) [idx] ; +STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY, DEFINE_ARCH_ENTRY_INIT, DEFINE_ARCH_ENTRY_ARRAY) + +#undef DEFINE_ARCH_ENTRY_ARRAY #undef DEFINE_ARCH_ENTRY_INIT #undef DEFINE_ARCH_ENTRY @@ -736,3 +740,9 @@ juint StubRoutines::zarch::_crc32c_table[CRC32_TABLES][CRC32_COLUMN_SIZE] = { } #endif }; + +#if INCLUDE_CDS +// nothing to do for s390 +void StubRoutines::init_AOTAddressTable() { +} +#endif // INCLUDE_CDS diff --git a/src/hotspot/cpu/s390/stubRoutines_s390.hpp b/src/hotspot/cpu/s390/stubRoutines_s390.hpp index 0a07efae46c..e575115b731 100644 --- a/src/hotspot/cpu/s390/stubRoutines_s390.hpp +++ b/src/hotspot/cpu/s390/stubRoutines_s390.hpp @@ -81,9 +81,13 @@ class zarch { #define DECLARE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \ DECLARE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name) -private: - STUBGEN_ARCH_ENTRIES_DO(DECLARE_ARCH_ENTRY, DECLARE_ARCH_ENTRY_INIT) +#define DECLARE_ARCH_ENTRY_ARRAY(arch, blob_name, stub_name, field_name, getter_name, count) \ + static address STUB_FIELD_NAME(field_name) [count] ; +private: + STUBGEN_ARCH_ENTRIES_DO(DECLARE_ARCH_ENTRY, DECLARE_ARCH_ENTRY_INIT, DECLARE_ARCH_ENTRY_ARRAY) + +#undef DECLARE_ARCH_ENTRY_ARRAY #undef DECLARE_ARCH_ENTRY_INIT #undef DECLARE_ARCH_ENTRY @@ -108,8 +112,12 @@ private: #define DEFINE_ARCH_ENTRY_GETTER_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \ DEFINE_ARCH_ENTRY_GETTER(arch, blob_name, stub_name, field_name, getter_name) - STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY_GETTER, DEFINE_ARCH_ENTRY_GETTER_INIT) +#define DEFINE_ARCH_ENTRY_GETTER_ARRAY(arch, blob_name, stub_name, field_name, getter_name, count) \ + static address getter_name(int idx) { return STUB_FIELD_NAME(field_name) [idx] ; } + STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY_GETTER, DEFINE_ARCH_ENTRY_GETTER_INIT, DEFINE_ARCH_ENTRY_GETTER_ARRAY) + +#undef DEFINE_ARCH_ENTRY_GETTER_ARRAY #undef DEFINE_ARCH_ENTRY_GETTER_INIT #undef DEFINE_ARCH_ENTRY_GETTER diff --git a/src/hotspot/cpu/s390/vm_version_s390.cpp b/src/hotspot/cpu/s390/vm_version_s390.cpp index 7f5b4870aab..7e9000991ca 100644 --- a/src/hotspot/cpu/s390/vm_version_s390.cpp +++ b/src/hotspot/cpu/s390/vm_version_s390.cpp @@ -289,10 +289,6 @@ void VM_Version::initialize() { FLAG_SET_DEFAULT(UseSHA3Intrinsics, false); } - if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) { - FLAG_SET_DEFAULT(UseSHA, false); - } - if (UseSecondarySupersTable && VM_Version::get_model_index() < 5 /* z196/z11 */) { if (!FLAG_IS_DEFAULT(UseSecondarySupersTable)) { warning("UseSecondarySupersTable requires z196 or later."); diff --git a/src/hotspot/cpu/x86/assembler_x86.cpp b/src/hotspot/cpu/x86/assembler_x86.cpp index 3c8defe62d9..a4f2968f0d1 100644 --- a/src/hotspot/cpu/x86/assembler_x86.cpp +++ b/src/hotspot/cpu/x86/assembler_x86.cpp @@ -3472,7 +3472,7 @@ void Assembler::vmovdqu(XMMRegister dst, XMMRegister src) { emit_int16(0x6F, (0xC0 | encode)); } -void Assembler::vmovw(XMMRegister dst, Register src) { +void Assembler::evmovw(XMMRegister dst, Register src) { assert(VM_Version::supports_avx512_fp16(), "requires AVX512-FP16"); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); attributes.set_is_evex_instruction(); @@ -3480,7 +3480,7 @@ void Assembler::vmovw(XMMRegister dst, Register src) { emit_int16(0x6E, (0xC0 | encode)); } -void Assembler::vmovw(Register dst, XMMRegister src) { +void Assembler::evmovw(Register dst, XMMRegister src) { assert(VM_Version::supports_avx512_fp16(), "requires AVX512-FP16"); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); attributes.set_is_evex_instruction(); @@ -3488,6 +3488,36 @@ void Assembler::vmovw(Register dst, XMMRegister src) { emit_int16(0x7E, (0xC0 | encode)); } +void Assembler::evmovw(XMMRegister dst, Address src) { + assert(VM_Version::supports_avx10_2(), ""); + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit); + attributes.set_is_evex_instruction(); + vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_MAP5, &attributes); + emit_int8(0x6E); + emit_operand(dst, src, 0); +} + +void Assembler::evmovw(Address dst, XMMRegister src) { + assert(VM_Version::supports_avx10_2(), ""); + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit); + attributes.set_is_evex_instruction(); + vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_MAP5, &attributes); + emit_int8(0x7E); + emit_operand(src, dst, 0); +} + +void Assembler::evmovw(XMMRegister dst, XMMRegister src) { + assert(VM_Version::supports_avx10_2(), ""); + InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_is_evex_instruction(); + int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_MAP5, &attributes); + emit_int16(0x6E, (0xC0 | encode)); +} + void Assembler::vmovdqu(XMMRegister dst, Address src) { assert(UseAVX > 0, ""); InstructionMark im(this); @@ -5442,6 +5472,13 @@ void Assembler::pmovsxwd(XMMRegister dst, XMMRegister src) { emit_int16(0x23, (0xC0 | encode)); } +void Assembler::pmovzxwd(XMMRegister dst, XMMRegister src) { + assert(VM_Version::supports_sse4_1(), ""); + InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); + int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); + emit_int16(0x33, (0xC0 | encode)); +} + void Assembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionMark im(this); @@ -7303,6 +7340,42 @@ void Assembler::etzcntq(Register dst, Address src, bool no_flags) { emit_operand(dst, src, 0); } +void Assembler::evucomish(XMMRegister dst, Address src) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit); + attributes.set_is_evex_instruction(); + vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_MAP5, &attributes); + emit_int8(0x2E); + emit_operand(dst, src, 0); +} + +void Assembler::evucomish(XMMRegister dst, XMMRegister src) { + InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_is_evex_instruction(); + int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_MAP5, &attributes); + emit_int16(0x2E, (0xC0 | encode)); +} + +void Assembler::evucomxsh(XMMRegister dst, Address src) { + assert(VM_Version::supports_avx10_2(), ""); + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit); + attributes.set_is_evex_instruction(); + vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_MAP5, &attributes); + emit_int8(0x2E); + emit_operand(dst, src, 0); +} + +void Assembler::evucomxsh(XMMRegister dst, XMMRegister src) { + assert(VM_Version::supports_avx10_2(), ""); + InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_is_evex_instruction(); + int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_MAP5, &attributes); + emit_int16(0x2E, (0xC0 | encode)); +} + void Assembler::ucomisd(XMMRegister dst, Address src) { InstructionMark im(this); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); @@ -7320,7 +7393,7 @@ void Assembler::ucomisd(XMMRegister dst, XMMRegister src) { emit_int16(0x2E, (0xC0 | encode)); } -void Assembler::vucomxsd(XMMRegister dst, Address src) { +void Assembler::evucomxsd(XMMRegister dst, Address src) { assert(VM_Version::supports_avx10_2(), ""); InstructionMark im(this); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); @@ -7331,7 +7404,7 @@ void Assembler::vucomxsd(XMMRegister dst, Address src) { emit_operand(dst, src, 0); } -void Assembler::vucomxsd(XMMRegister dst, XMMRegister src) { +void Assembler::evucomxsd(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_avx10_2(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); attributes.set_is_evex_instruction(); @@ -7354,7 +7427,7 @@ void Assembler::ucomiss(XMMRegister dst, XMMRegister src) { emit_int16(0x2E, (0xC0 | encode)); } -void Assembler::vucomxss(XMMRegister dst, Address src) { +void Assembler::evucomxss(XMMRegister dst, Address src) { assert(VM_Version::supports_avx10_2(), ""); InstructionMark im(this); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); @@ -7365,7 +7438,7 @@ void Assembler::vucomxss(XMMRegister dst, Address src) { emit_operand(dst, src, 0); } -void Assembler::vucomxss(XMMRegister dst, XMMRegister src) { +void Assembler::evucomxss(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_avx10_2(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); attributes.set_is_evex_instruction(); @@ -8404,30 +8477,6 @@ void Assembler::vmulsh(XMMRegister dst, XMMRegister nds, XMMRegister src) { emit_int16(0x59, (0xC0 | encode)); } -void Assembler::vmaxsh(XMMRegister dst, XMMRegister nds, XMMRegister src) { - assert(VM_Version::supports_avx512_fp16(), "requires AVX512-FP16"); - InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); - attributes.set_is_evex_instruction(); - int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_MAP5, &attributes); - emit_int16(0x5F, (0xC0 | encode)); -} - -void Assembler::eminmaxsh(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) { - assert(VM_Version::supports_avx10_2(), ""); - InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); - attributes.set_is_evex_instruction(); - int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3A, &attributes); - emit_int24(0x53, (0xC0 | encode), imm8); -} - -void Assembler::vminsh(XMMRegister dst, XMMRegister nds, XMMRegister src) { - assert(VM_Version::supports_avx512_fp16(), "requires AVX512-FP16"); - InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); - attributes.set_is_evex_instruction(); - int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_MAP5, &attributes); - emit_int16(0x5D, (0xC0 | encode)); -} - void Assembler::vsqrtsh(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_avx512_fp16(), "requires AVX512-FP16"); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); @@ -13362,48 +13411,38 @@ bool Assembler::is_demotable(bool no_flags, int dst_enc, int nds_enc) { return (!no_flags && dst_enc == nds_enc); } -void Assembler::vmaxss(XMMRegister dst, XMMRegister nds, XMMRegister src) { - assert(VM_Version::supports_avx(), ""); - InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); - int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); - emit_int16(0x5F, (0xC0 | encode)); -} - -void Assembler::vmaxsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { - assert(VM_Version::supports_avx(), ""); - InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); - attributes.set_rex_vex_w_reverted(); - int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); - emit_int16(0x5F, (0xC0 | encode)); -} - -void Assembler::vminss(XMMRegister dst, XMMRegister nds, XMMRegister src) { - assert(VM_Version::supports_avx(), ""); - InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); - int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); - emit_int16(0x5D, (0xC0 | encode)); -} - -void Assembler::eminmaxss(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) { +void Assembler::evminmaxsh(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int imm8) { assert(VM_Version::supports_avx10_2(), ""); - InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); attributes.set_is_evex_instruction(); + attributes.set_embedded_opmask_register_specifier(mask); + if (merge) { + attributes.reset_is_clear_context(); + } + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3A, &attributes); + emit_int24(0x53, (0xC0 | encode), imm8); +} + +void Assembler::evminmaxss(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int imm8) { + assert(VM_Version::supports_avx10_2(), ""); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); + attributes.set_is_evex_instruction(); + attributes.set_embedded_opmask_register_specifier(mask); + if (merge) { + attributes.reset_is_clear_context(); + } int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); emit_int24(0x53, (0xC0 | encode), imm8); } -void Assembler::vminsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { - assert(VM_Version::supports_avx(), ""); - InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); - attributes.set_rex_vex_w_reverted(); - int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); - emit_int16(0x5D, (0xC0 | encode)); -} - -void Assembler::eminmaxsd(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) { +void Assembler::evminmaxsd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int imm8) { assert(VM_Version::supports_avx10_2(), ""); - InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); attributes.set_is_evex_instruction(); + attributes.set_embedded_opmask_register_specifier(mask); + if (merge) { + attributes.reset_is_clear_context(); + } int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); emit_int24(0x53, (0xC0 | encode), imm8); } diff --git a/src/hotspot/cpu/x86/assembler_x86.hpp b/src/hotspot/cpu/x86/assembler_x86.hpp index 97854f712cf..98684752b0c 100644 --- a/src/hotspot/cpu/x86/assembler_x86.hpp +++ b/src/hotspot/cpu/x86/assembler_x86.hpp @@ -1694,8 +1694,11 @@ private: void movsbl(Register dst, Address src); void movsbl(Register dst, Register src); - void vmovw(XMMRegister dst, Register src); - void vmovw(Register dst, XMMRegister src); + void evmovw(XMMRegister dst, Register src); + void evmovw(Register dst, XMMRegister src); + void evmovw(XMMRegister dst, Address src); + void evmovw(Address dst, XMMRegister src); + void evmovw(XMMRegister dst, XMMRegister src); void movsbq(Register dst, Address src); void movsbq(Register dst, Register src); @@ -1965,6 +1968,7 @@ private: void pmovsxbq(XMMRegister dst, XMMRegister src); void pmovsxbw(XMMRegister dst, XMMRegister src); void pmovsxwd(XMMRegister dst, XMMRegister src); + void pmovzxwd(XMMRegister dst, XMMRegister src); void vpmovsxbd(XMMRegister dst, XMMRegister src, int vector_len); void vpmovsxbq(XMMRegister dst, XMMRegister src, int vector_len); void vpmovsxbw(XMMRegister dst, XMMRegister src, int vector_len); @@ -2328,17 +2332,23 @@ private: void tzcntq(Register dst, Address src); void etzcntq(Register dst, Address src, bool no_flags); + // Unordered Compare Scalar Half-Precision Floating-Point Values and set EFLAGS + void evucomish(XMMRegister dst, Address src); + void evucomish(XMMRegister dst, XMMRegister src); + void evucomxsh(XMMRegister dst, Address src); + void evucomxsh(XMMRegister dst, XMMRegister src); + // Unordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS void ucomisd(XMMRegister dst, Address src); void ucomisd(XMMRegister dst, XMMRegister src); - void vucomxsd(XMMRegister dst, Address src); - void vucomxsd(XMMRegister dst, XMMRegister src); + void evucomxsd(XMMRegister dst, Address src); + void evucomxsd(XMMRegister dst, XMMRegister src); // Unordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS void ucomiss(XMMRegister dst, Address src); void ucomiss(XMMRegister dst, XMMRegister src); - void vucomxss(XMMRegister dst, Address src); - void vucomxss(XMMRegister dst, XMMRegister src); + void evucomxss(XMMRegister dst, Address src); + void evucomxss(XMMRegister dst, XMMRegister src); void xabort(int8_t imm8); @@ -2416,11 +2426,6 @@ private: void vsubss(XMMRegister dst, XMMRegister nds, Address src); void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src); - void vmaxss(XMMRegister dst, XMMRegister nds, XMMRegister src); - void vmaxsd(XMMRegister dst, XMMRegister nds, XMMRegister src); - void vminss(XMMRegister dst, XMMRegister nds, XMMRegister src); - void vminsd(XMMRegister dst, XMMRegister nds, XMMRegister src); - void sarxl(Register dst, Register src1, Register src2); void sarxl(Register dst, Address src1, Register src2); void sarxq(Register dst, Register src1, Register src2); @@ -2551,8 +2556,6 @@ private: void vsubsh(XMMRegister dst, XMMRegister nds, XMMRegister src); void vmulsh(XMMRegister dst, XMMRegister nds, XMMRegister src); void vdivsh(XMMRegister dst, XMMRegister nds, XMMRegister src); - void vmaxsh(XMMRegister dst, XMMRegister nds, XMMRegister src); - void vminsh(XMMRegister dst, XMMRegister nds, XMMRegister src); void vsqrtsh(XMMRegister dst, XMMRegister src); void vfmadd132sh(XMMRegister dst, XMMRegister src1, XMMRegister src2); @@ -2789,9 +2792,9 @@ private: void vminpd(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len); // AVX10.2 floating point minmax instructions - void eminmaxsh(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8); - void eminmaxss(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8); - void eminmaxsd(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8); + void evminmaxsh(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int imm8); + void evminmaxss(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int imm8); + void evminmaxsd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int imm8); void evminmaxph(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int imm8, int vector_len); void evminmaxph(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int imm8, int vector_len); void evminmaxps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int imm8, int vector_len); diff --git a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp index d9be0fdcc8d..5c05b3702bb 100644 --- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,6 +42,7 @@ #include "runtime/safepointMechanism.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" +#include "runtime/threadIdentifier.hpp" #include "utilities/powerOfTwo.hpp" #include "vmreg_x86.inline.hpp" @@ -70,6 +71,17 @@ static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], static jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], (jlong)UCONST64(0x8000000080000000), (jlong)UCONST64(0x8000000080000000)); static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], (jlong)UCONST64(0x8000000000000000), (jlong)UCONST64(0x8000000000000000)); +#if INCLUDE_CDS +// publish external addresses defined in this file +void LIR_Assembler::init_AOTAddressTable(GrowableArray
& external_addresses) { +#define ADD(addr) external_addresses.append((address)(addr)); + ADD(float_signmask_pool); + ADD(double_signmask_pool); + ADD(float_signflip_pool); + ADD(double_signflip_pool); +#undef ADD +} +#endif // INCLUDE_CDS NEEDS_CLEANUP // remove this definitions ? const Register SYNC_header = rax; // synchronization header @@ -77,23 +89,6 @@ const Register SHIFT_count = rcx; // where count for shift operations must be #define __ _masm-> - -static void select_different_registers(Register preserve, - Register extra, - Register &tmp1, - Register &tmp2) { - if (tmp1 == preserve) { - assert_different_registers(tmp1, tmp2, extra); - tmp1 = extra; - } else if (tmp2 == preserve) { - assert_different_registers(tmp1, tmp2, extra); - tmp2 = extra; - } - assert_different_registers(preserve, tmp1, tmp2); -} - - - static void select_different_registers(Register preserve, Register extra, Register &tmp1, @@ -535,10 +530,23 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod } case T_LONG: { +#if INCLUDE_CDS + if (AOTCodeCache::is_on_for_dump()) { + address b = c->as_pointer(); + if (b == (address)ThreadIdentifier::unsafe_offset()) { + __ lea(dest->as_register_lo(), ExternalAddress(b)); + break; + } + } +#endif assert(patch_code == lir_patch_none, "no patching handled here"); #if INCLUDE_CDS if (AOTCodeCache::is_on_for_dump()) { address b = c->as_pointer(); + if (b == (address)ThreadIdentifier::unsafe_offset()) { + __ lea(dest->as_register_lo(), ExternalAddress(b)); + break; + } if (AOTRuntimeConstants::contains(b)) { __ load_aotrc_address(dest->as_register_lo(), b); break; @@ -1309,12 +1317,8 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L } else if (obj == klass_RInfo) { klass_RInfo = dst; } - if (k->is_loaded() && !UseCompressedClassPointers) { - select_different_registers(obj, dst, k_RInfo, klass_RInfo); - } else { - Rtmp1 = op->tmp3()->as_register(); - select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1); - } + Rtmp1 = op->tmp3()->as_register(); + select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1); assert_different_registers(obj, k_RInfo, klass_RInfo); @@ -1348,12 +1352,8 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L if (op->fast_check()) { // get object class // not a safepoint as obj null check happens earlier - if (UseCompressedClassPointers) { - __ load_klass(Rtmp1, obj, tmp_load_klass); - __ cmpptr(k_RInfo, Rtmp1); - } else { - __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); - } + __ load_klass(Rtmp1, obj, tmp_load_klass); + __ cmpptr(k_RInfo, Rtmp1); __ jcc(Assembler::notEqual, *failure_target); // successful cast, fall through to profile or jump } else { @@ -2651,9 +2651,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { // but not necessarily exactly of type default_type. Label known_ok, halt; __ mov_metadata(tmp, default_type->constant_encoding()); - if (UseCompressedClassPointers) { - __ encode_klass_not_null(tmp, rscratch1); - } + __ encode_klass_not_null(tmp, rscratch1); if (basic_type != T_OBJECT) { __ cmp_klass(tmp, dst, tmp2); diff --git a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.hpp b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.hpp index c4a368b54d8..6f179255e4a 100644 --- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.hpp +++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.hpp @@ -58,4 +58,7 @@ public: void store_parameter(jobject c, int offset_from_esp_in_words); void store_parameter(Metadata* c, int offset_from_esp_in_words); +#if INCLUDE_CDS + void static init_AOTAddressTable(GrowableArray
& external_addresses); +#endif // INCLUDE_CDS #endif // CPU_X86_C1_LIRASSEMBLER_X86_HPP diff --git a/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp b/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp index 5459e8df229..f448e4ee17f 100644 --- a/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp +++ b/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1291,9 +1291,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) { } LIR_Opr reg = rlock_result(x); LIR_Opr tmp3 = LIR_OprFact::illegalOpr; - if (!x->klass()->is_loaded() || UseCompressedClassPointers) { - tmp3 = new_register(objectType); - } + tmp3 = new_register(objectType); __ checkcast(reg, obj.result(), x->klass(), new_register(objectType), new_register(objectType), tmp3, x->direct_compare(), info_for_exception, patching_info, stub, @@ -1313,9 +1311,7 @@ void LIRGenerator::do_InstanceOf(InstanceOf* x) { } obj.load_item(); LIR_Opr tmp3 = LIR_OprFact::illegalOpr; - if (!x->klass()->is_loaded() || UseCompressedClassPointers) { - tmp3 = new_register(objectType); - } + tmp3 = new_register(objectType); __ instanceof(reg, obj.result(), x->klass(), new_register(objectType), new_register(objectType), tmp3, x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci()); diff --git a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp index 88e2e6c8ba9..7adaea48ff1 100644 --- a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -85,14 +85,11 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register if (UseCompactObjectHeaders) { movptr(t1, Address(klass, Klass::prototype_header_offset())); movptr(Address(obj, oopDesc::mark_offset_in_bytes()), t1); - } else if (UseCompressedClassPointers) { // Take care not to kill klass + } else { // Take care not to kill klass movptr(Address(obj, oopDesc::mark_offset_in_bytes()), checked_cast(markWord::prototype().value())); movptr(t1, klass); encode_klass_not_null(t1, rscratch1); movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1); - } else { - movptr(Address(obj, oopDesc::mark_offset_in_bytes()), checked_cast(markWord::prototype().value())); - movptr(Address(obj, oopDesc::klass_offset_in_bytes()), klass); } if (len->is_valid()) { @@ -104,7 +101,7 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register xorl(t1, t1); movl(Address(obj, base_offset), t1); } - } else if (UseCompressedClassPointers && !UseCompactObjectHeaders) { + } else if (!UseCompactObjectHeaders) { xorptr(t1, t1); store_klass_gap(obj, t1); } diff --git a/src/hotspot/cpu/x86/c1_globals_x86.hpp b/src/hotspot/cpu/x86/c1_globals_x86.hpp index 978b233bb63..bb75a31a77c 100644 --- a/src/hotspot/cpu/x86/c1_globals_x86.hpp +++ b/src/hotspot/cpu/x86/c1_globals_x86.hpp @@ -41,7 +41,6 @@ define_pd_global(bool, TieredCompilation, false); define_pd_global(intx, CompileThreshold, 1500 ); define_pd_global(intx, OnStackReplacePercentage, 933 ); -define_pd_global(size_t, NewSizeThreadIncrease, 4*K ); define_pd_global(size_t, InitialCodeCacheSize, 160*K); define_pd_global(size_t, ReservedCodeCacheSize, 32*M ); define_pd_global(size_t, NonProfiledCodeHeapSize, 13*M ); @@ -51,7 +50,6 @@ define_pd_global(bool, ProfileInterpreter, false); define_pd_global(size_t, CodeCacheExpansionSize, 32*K ); define_pd_global(size_t, CodeCacheMinBlockLength, 1 ); define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K); -define_pd_global(bool, NeverActAsServerClassMachine, true ); define_pd_global(bool, CICompileOSR, true ); #endif // !COMPILER2 define_pd_global(bool, UseTypeProfile, false); diff --git a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp index a3ccc081b6b..b4d8aa10de2 100644 --- a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp @@ -152,7 +152,7 @@ inline Assembler::AvxVectorLen C2_MacroAssembler::vector_length_encoding(int vle // Because the transitions from emitted code to the runtime // monitorenter/exit helper stubs are so slow it's critical that -// we inline both the stack-locking fast path and the inflated fast path. +// we inline both the lock-stack fast path and the inflated fast path. // // See also: cmpFastLock and cmpFastUnlock. // @@ -1037,8 +1037,8 @@ void C2_MacroAssembler::evminmax_fp(int opcode, BasicType elem_bt, } } -void C2_MacroAssembler::vminmax_fp(int opc, BasicType elem_bt, XMMRegister dst, KRegister mask, - XMMRegister src1, XMMRegister src2, int vlen_enc) { +void C2_MacroAssembler::vminmax_fp_avx10_2(int opc, BasicType elem_bt, XMMRegister dst, KRegister mask, + XMMRegister src1, XMMRegister src2, int vlen_enc) { assert(opc == Op_MinV || opc == Op_MinReductionV || opc == Op_MaxV || opc == Op_MaxReductionV, "sanity"); @@ -1052,6 +1052,21 @@ void C2_MacroAssembler::vminmax_fp(int opc, BasicType elem_bt, XMMRegister dst, } } +void C2_MacroAssembler::sminmax_fp_avx10_2(int opc, BasicType elem_bt, XMMRegister dst, KRegister mask, + XMMRegister src1, XMMRegister src2) { + assert(opc == Op_MinF || opc == Op_MaxF || + opc == Op_MinD || opc == Op_MaxD, "sanity"); + + int imm8 = (opc == Op_MinF || opc == Op_MinD) ? AVX10_2_MINMAX_MIN_COMPARE_SIGN + : AVX10_2_MINMAX_MAX_COMPARE_SIGN; + if (elem_bt == T_FLOAT) { + evminmaxss(dst, mask, src1, src2, true, imm8); + } else { + assert(elem_bt == T_DOUBLE, ""); + evminmaxsd(dst, mask, src1, src2, true, imm8); + } +} + // Float/Double signum void C2_MacroAssembler::signum_fp(int opcode, XMMRegister dst, XMMRegister zero, XMMRegister one) { assert(opcode == Op_SignumF || opcode == Op_SignumD, "sanity"); @@ -1063,7 +1078,7 @@ void C2_MacroAssembler::signum_fp(int opcode, XMMRegister dst, XMMRegister zero, // If other floating point comparison instructions used, ZF=1 for equal and unordered cases if (opcode == Op_SignumF) { if (VM_Version::supports_avx10_2()) { - vucomxss(dst, zero); + evucomxss(dst, zero); jcc(Assembler::negative, DONE_LABEL); } else { ucomiss(dst, zero); @@ -1074,7 +1089,7 @@ void C2_MacroAssembler::signum_fp(int opcode, XMMRegister dst, XMMRegister zero, xorps(dst, ExternalAddress(StubRoutines::x86::vector_float_sign_flip()), noreg); } else if (opcode == Op_SignumD) { if (VM_Version::supports_avx10_2()) { - vucomxsd(dst, zero); + evucomxsd(dst, zero); jcc(Assembler::negative, DONE_LABEL); } else { ucomisd(dst, zero); @@ -1691,12 +1706,8 @@ void C2_MacroAssembler::load_constant_vector(BasicType bt, XMMRegister dst, Inte } void C2_MacroAssembler::load_iota_indices(XMMRegister dst, int vlen_in_bytes, BasicType bt) { - // The iota indices are ordered by type B/S/I/L/F/D, and the offset between two types is 64. - int offset = exact_log2(type2aelembytes(bt)) << 6; - if (is_floating_point_type(bt)) { - offset += 128; - } - ExternalAddress addr(StubRoutines::x86::vector_iota_indices() + offset); + int entry_idx = vector_iota_entry_index(bt); + ExternalAddress addr(StubRoutines::x86::vector_iota_indices(entry_idx)); load_vector(T_BYTE, dst, addr, vlen_in_bytes); } @@ -1729,6 +1740,24 @@ void C2_MacroAssembler::reduce_operation_128(BasicType typ, int opcode, XMMRegis default: assert(false, "wrong type"); } break; + case Op_UMinReductionV: + switch (typ) { + case T_BYTE: vpminub(dst, dst, src, Assembler::AVX_128bit); break; + case T_SHORT: vpminuw(dst, dst, src, Assembler::AVX_128bit); break; + case T_INT: vpminud(dst, dst, src, Assembler::AVX_128bit); break; + case T_LONG: evpminuq(dst, k0, dst, src, true, Assembler::AVX_128bit); break; + default: assert(false, "wrong type"); + } + break; + case Op_UMaxReductionV: + switch (typ) { + case T_BYTE: vpmaxub(dst, dst, src, Assembler::AVX_128bit); break; + case T_SHORT: vpmaxuw(dst, dst, src, Assembler::AVX_128bit); break; + case T_INT: vpmaxud(dst, dst, src, Assembler::AVX_128bit); break; + case T_LONG: evpmaxuq(dst, k0, dst, src, true, Assembler::AVX_128bit); break; + default: assert(false, "wrong type"); + } + break; case Op_AddReductionVF: addss(dst, src); break; case Op_AddReductionVD: addsd(dst, src); break; case Op_AddReductionVI: @@ -1792,6 +1821,24 @@ void C2_MacroAssembler::reduce_operation_256(BasicType typ, int opcode, XMMRegis default: assert(false, "wrong type"); } break; + case Op_UMinReductionV: + switch (typ) { + case T_BYTE: vpminub(dst, src1, src2, vector_len); break; + case T_SHORT: vpminuw(dst, src1, src2, vector_len); break; + case T_INT: vpminud(dst, src1, src2, vector_len); break; + case T_LONG: evpminuq(dst, k0, src1, src2, true, vector_len); break; + default: assert(false, "wrong type"); + } + break; + case Op_UMaxReductionV: + switch (typ) { + case T_BYTE: vpmaxub(dst, src1, src2, vector_len); break; + case T_SHORT: vpmaxuw(dst, src1, src2, vector_len); break; + case T_INT: vpmaxud(dst, src1, src2, vector_len); break; + case T_LONG: evpmaxuq(dst, k0, src1, src2, true, vector_len); break; + default: assert(false, "wrong type"); + } + break; case Op_AddReductionVI: switch (typ) { case T_BYTE: vpaddb(dst, src1, src2, vector_len); break; @@ -2058,7 +2105,11 @@ void C2_MacroAssembler::reduce8B(int opcode, Register dst, Register src1, XMMReg psrldq(vtmp2, 1); reduce_operation_128(T_BYTE, opcode, vtmp1, vtmp2); movdl(vtmp2, src1); - pmovsxbd(vtmp1, vtmp1); + if (opcode == Op_UMinReductionV || opcode == Op_UMaxReductionV) { + pmovzxbd(vtmp1, vtmp1); + } else { + pmovsxbd(vtmp1, vtmp1); + } reduce_operation_128(T_INT, opcode, vtmp1, vtmp2); pextrb(dst, vtmp1, 0x0); movsbl(dst, dst); @@ -2095,8 +2146,8 @@ void C2_MacroAssembler::mulreduce16B(int opcode, Register dst, Register src1, XM } else { pmovsxbw(vtmp2, src2); reduce8S(opcode, dst, src1, vtmp2, vtmp1, vtmp2); - pshufd(vtmp2, src2, 0x1); - pmovsxbw(vtmp2, src2); + pshufd(vtmp2, src2, 0xe); + pmovsxbw(vtmp2, vtmp2); reduce8S(opcode, dst, dst, vtmp2, vtmp1, vtmp2); } } @@ -2105,7 +2156,7 @@ void C2_MacroAssembler::mulreduce32B(int opcode, Register dst, Register src1, XM if (UseAVX > 2 && VM_Version::supports_avx512bw()) { int vector_len = Assembler::AVX_512bit; vpmovsxbw(vtmp1, src2, vector_len); - reduce32S(opcode, dst, src1, vtmp1, vtmp1, vtmp2); + reduce32S(opcode, dst, src1, vtmp1, vtmp2, vtmp1); } else { assert(UseAVX >= 2,"Should not reach here."); mulreduce16B(opcode, dst, src1, src2, vtmp1, vtmp2); @@ -2135,7 +2186,11 @@ void C2_MacroAssembler::reduce4S(int opcode, Register dst, Register src1, XMMReg reduce_operation_128(T_SHORT, opcode, vtmp1, vtmp2); } movdl(vtmp2, src1); - pmovsxwd(vtmp1, vtmp1); + if (opcode == Op_UMinReductionV || opcode == Op_UMaxReductionV) { + pmovzxwd(vtmp1, vtmp1); + } else { + pmovsxwd(vtmp1, vtmp1); + } reduce_operation_128(T_INT, opcode, vtmp1, vtmp2); pextrw(dst, vtmp1, 0x0); movswl(dst, dst); @@ -2148,6 +2203,7 @@ void C2_MacroAssembler::reduce8S(int opcode, Register dst, Register src1, XMMReg } phaddw(vtmp1, src2); } else { + assert_different_registers(src2, vtmp1); pshufd(vtmp1, src2, 0xE); reduce_operation_128(T_SHORT, opcode, vtmp1, src2); } @@ -2160,6 +2216,7 @@ void C2_MacroAssembler::reduce16S(int opcode, Register dst, Register src1, XMMRe vphaddw(vtmp2, src2, src2, vector_len); vpermq(vtmp2, vtmp2, 0xD8, vector_len); } else { + assert_different_registers(src2, vtmp2); vextracti128_high(vtmp2, src2); reduce_operation_128(T_SHORT, opcode, vtmp2, src2); } @@ -2167,6 +2224,7 @@ void C2_MacroAssembler::reduce16S(int opcode, Register dst, Register src1, XMMRe } void C2_MacroAssembler::reduce32S(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2) { + assert_different_registers(src2, vtmp1); int vector_len = Assembler::AVX_256bit; vextracti64x4_high(vtmp1, src2); reduce_operation_256(T_SHORT, opcode, vtmp1, vtmp1, src2); @@ -2356,7 +2414,7 @@ void C2_MacroAssembler::reduceFloatMinMax(int opcode, int vlen, bool is_dst_vali } if (VM_Version::supports_avx10_2()) { - vminmax_fp(opcode, T_FLOAT, wdst, k0, wtmp, wsrc, vlen_enc); + vminmax_fp_avx10_2(opcode, T_FLOAT, wdst, k0, wtmp, wsrc, vlen_enc); } else { vminmax_fp(opcode, T_FLOAT, wdst, wtmp, wsrc, tmp, atmp, btmp, vlen_enc); } @@ -2365,7 +2423,7 @@ void C2_MacroAssembler::reduceFloatMinMax(int opcode, int vlen, bool is_dst_vali } if (is_dst_valid) { if (VM_Version::supports_avx10_2()) { - vminmax_fp(opcode, T_FLOAT, dst, k0, wdst, dst, Assembler::AVX_128bit); + vminmax_fp_avx10_2(opcode, T_FLOAT, dst, k0, wdst, dst, Assembler::AVX_128bit); } else { vminmax_fp(opcode, T_FLOAT, dst, wdst, dst, tmp, atmp, btmp, Assembler::AVX_128bit); } @@ -2396,7 +2454,7 @@ void C2_MacroAssembler::reduceDoubleMinMax(int opcode, int vlen, bool is_dst_val } if (VM_Version::supports_avx10_2()) { - vminmax_fp(opcode, T_DOUBLE, wdst, k0, wtmp, wsrc, vlen_enc); + vminmax_fp_avx10_2(opcode, T_DOUBLE, wdst, k0, wtmp, wsrc, vlen_enc); } else { vminmax_fp(opcode, T_DOUBLE, wdst, wtmp, wsrc, tmp, atmp, btmp, vlen_enc); } @@ -2407,7 +2465,7 @@ void C2_MacroAssembler::reduceDoubleMinMax(int opcode, int vlen, bool is_dst_val if (is_dst_valid) { if (VM_Version::supports_avx10_2()) { - vminmax_fp(opcode, T_DOUBLE, dst, k0, wdst, dst, Assembler::AVX_128bit); + vminmax_fp_avx10_2(opcode, T_DOUBLE, dst, k0, wdst, dst, Assembler::AVX_128bit); } else { vminmax_fp(opcode, T_DOUBLE, dst, wdst, dst, tmp, atmp, btmp, Assembler::AVX_128bit); } @@ -7017,13 +7075,25 @@ void C2_MacroAssembler::evfp16ph(int opcode, XMMRegister dst, XMMRegister src1, } } -void C2_MacroAssembler::scalar_max_min_fp16(int opcode, XMMRegister dst, XMMRegister src1, XMMRegister src2, - KRegister ktmp, XMMRegister xtmp1, XMMRegister xtmp2) { - vector_max_min_fp16(opcode, dst, src1, src2, ktmp, xtmp1, xtmp2, Assembler::AVX_128bit); +void C2_MacroAssembler::sminmax_fp16(int opcode, XMMRegister dst, XMMRegister src1, XMMRegister src2, + KRegister ktmp, XMMRegister xtmp1, XMMRegister xtmp2) { + vminmax_fp16(opcode, dst, src1, src2, ktmp, xtmp1, xtmp2, Assembler::AVX_128bit); } -void C2_MacroAssembler::vector_max_min_fp16(int opcode, XMMRegister dst, XMMRegister src1, XMMRegister src2, - KRegister ktmp, XMMRegister xtmp1, XMMRegister xtmp2, int vlen_enc) { +void C2_MacroAssembler::sminmax_fp16_avx10_2(int opcode, XMMRegister dst, XMMRegister src1, XMMRegister src2, + KRegister ktmp) { + if (opcode == Op_MaxHF) { + // dst = max(src1, src2) + evminmaxsh(dst, ktmp, src1, src2, true, AVX10_2_MINMAX_MAX_COMPARE_SIGN); + } else { + assert(opcode == Op_MinHF, ""); + // dst = min(src1, src2) + evminmaxsh(dst, ktmp, src1, src2, true, AVX10_2_MINMAX_MIN_COMPARE_SIGN); + } +} + +void C2_MacroAssembler::vminmax_fp16(int opcode, XMMRegister dst, XMMRegister src1, XMMRegister src2, + KRegister ktmp, XMMRegister xtmp1, XMMRegister xtmp2, int vlen_enc) { if (opcode == Op_MaxVHF || opcode == Op_MaxHF) { // Move sign bits of src2 to mask register. evpmovw2m(ktmp, src2, vlen_enc); @@ -7066,3 +7136,48 @@ void C2_MacroAssembler::vector_max_min_fp16(int opcode, XMMRegister dst, XMMRegi Assembler::evmovdquw(dst, ktmp, xtmp1, true, vlen_enc); } } + +void C2_MacroAssembler::vminmax_fp16_avx10_2(int opcode, XMMRegister dst, XMMRegister src1, XMMRegister src2, + KRegister ktmp, int vlen_enc) { + if (opcode == Op_MaxVHF) { + // dst = max(src1, src2) + evminmaxph(dst, ktmp, src1, src2, true, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vlen_enc); + } else { + assert(opcode == Op_MinVHF, ""); + // dst = min(src1, src2) + evminmaxph(dst, ktmp, src1, src2, true, AVX10_2_MINMAX_MIN_COMPARE_SIGN, vlen_enc); + } +} + +void C2_MacroAssembler::vminmax_fp16_avx10_2(int opcode, XMMRegister dst, XMMRegister src1, Address src2, + KRegister ktmp, int vlen_enc) { + if (opcode == Op_MaxVHF) { + // dst = max(src1, src2) + evminmaxph(dst, ktmp, src1, src2, true, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vlen_enc); + } else { + assert(opcode == Op_MinVHF, ""); + // dst = min(src1, src2) + evminmaxph(dst, ktmp, src1, src2, true, AVX10_2_MINMAX_MIN_COMPARE_SIGN, vlen_enc); + } +} + +int C2_MacroAssembler::vector_iota_entry_index(BasicType bt) { + // The vector iota entries array is ordered by type B/S/I/L/F/D, and + // the offset between two types is 16. + switch(bt) { + case T_BYTE: + return 0; + case T_SHORT: + return 1; + case T_INT: + return 2; + case T_LONG: + return 3; + case T_FLOAT: + return 4; + case T_DOUBLE: + return 5; + default: + ShouldNotReachHere(); + } +} diff --git a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp index 6d8b0ceaebe..9b229ad7221 100644 --- a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp +++ b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -67,8 +67,11 @@ public: XMMRegister tmp, XMMRegister atmp, XMMRegister btmp, int vlen_enc); - void vminmax_fp(int opc, BasicType elem_bt, XMMRegister dst, KRegister mask, - XMMRegister src1, XMMRegister src2, int vlen_enc); + void vminmax_fp_avx10_2(int opc, BasicType elem_bt, XMMRegister dst, KRegister mask, + XMMRegister src1, XMMRegister src2, int vlen_enc); + + void sminmax_fp_avx10_2(int opc, BasicType elem_bt, XMMRegister dst, KRegister mask, + XMMRegister src1, XMMRegister src2); void vpuminmaxq(int opcode, XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister xtmp1, XMMRegister xtmp2, int vlen_enc); @@ -576,12 +579,22 @@ public: void evfp16ph(int opcode, XMMRegister dst, XMMRegister src1, Address src2, int vlen_enc); - void vector_max_min_fp16(int opcode, XMMRegister dst, XMMRegister src1, XMMRegister src2, - KRegister ktmp, XMMRegister xtmp1, XMMRegister xtmp2, int vlen_enc); + void vminmax_fp16(int opcode, XMMRegister dst, XMMRegister src1, XMMRegister src2, + KRegister ktmp, XMMRegister xtmp1, XMMRegister xtmp2, int vlen_enc); - void scalar_max_min_fp16(int opcode, XMMRegister dst, XMMRegister src1, XMMRegister src2, - KRegister ktmp, XMMRegister xtmp1, XMMRegister xtmp2); + void vminmax_fp16_avx10_2(int opcode, XMMRegister dst, XMMRegister src1, XMMRegister src2, + KRegister ktmp, int vlen_enc); + + void vminmax_fp16_avx10_2(int opcode, XMMRegister dst, XMMRegister src1, Address src2, + KRegister ktmp, int vlen_enc); + + void sminmax_fp16(int opcode, XMMRegister dst, XMMRegister src1, XMMRegister src2, + KRegister ktmp, XMMRegister xtmp1, XMMRegister xtmp2); + + void sminmax_fp16_avx10_2(int opcode, XMMRegister dst, XMMRegister src1, XMMRegister src2, + KRegister ktmp); void reconstruct_frame_pointer(Register rtmp); + int vector_iota_entry_index(BasicType bt); #endif // CPU_X86_C2_MACROASSEMBLER_X86_HPP diff --git a/src/hotspot/cpu/x86/c2_globals_x86.hpp b/src/hotspot/cpu/x86/c2_globals_x86.hpp index 3f616cb4578..11d8c03d0ca 100644 --- a/src/hotspot/cpu/x86/c2_globals_x86.hpp +++ b/src/hotspot/cpu/x86/c2_globals_x86.hpp @@ -46,7 +46,6 @@ define_pd_global(intx, FreqInlineSize, 325); define_pd_global(intx, MinJumpTableSize, 10); define_pd_global(intx, LoopPercentProfileLimit, 10); define_pd_global(intx, InteriorEntryAlignment, 16); -define_pd_global(size_t, NewSizeThreadIncrease, ScaleForWordSize(4*K)); define_pd_global(intx, LoopUnrollLimit, 60); // InitialCodeCacheSize derived from specjbb2000 run. define_pd_global(size_t, InitialCodeCacheSize, 2496*K); // Integral multiple of CodeCacheExpansionSize @@ -74,7 +73,4 @@ define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K); define_pd_global(bool, TrapBasedRangeChecks, false); // Not needed on x86. -// Ergonomics related flags -define_pd_global(bool, NeverActAsServerClassMachine, false); - #endif // CPU_X86_C2_GLOBALS_X86_HPP diff --git a/src/hotspot/cpu/x86/downcallLinker_x86_64.cpp b/src/hotspot/cpu/x86/downcallLinker_x86_64.cpp index c48940198ea..e3bf5f17fe9 100644 --- a/src/hotspot/cpu/x86/downcallLinker_x86_64.cpp +++ b/src/hotspot/cpu/x86/downcallLinker_x86_64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -145,10 +145,10 @@ void DowncallLinker::StubGenerator::generate() { // when we don't use a return buffer we need to spill the return value around our slow path calls bool should_save_return_value = !_needs_return_buffer; RegSpiller out_reg_spiller(_output_registers); - int spill_rsp_offset = -1; + int out_spill_rsp_offset = -1; if (should_save_return_value) { - spill_rsp_offset = 0; + out_spill_rsp_offset = 0; // spill area can be shared with shadow space and out args, // since they are only used before the call, // and spill area is only used after. @@ -173,6 +173,9 @@ void DowncallLinker::StubGenerator::generate() { // FP-> | | // |---------------------| = frame_bottom_offset = frame_size // | (optional) | + // | in_reg_spiller area | + // |---------------------| + // | (optional) | // | capture state buf | // |---------------------| = StubLocations::CAPTURED_STATE_BUFFER // | (optional) | @@ -188,6 +191,18 @@ void DowncallLinker::StubGenerator::generate() { VMStorage shuffle_reg = as_VMStorage(rbx); ArgumentShuffle arg_shuffle(filtered_java_regs, out_regs, shuffle_reg); + // Need to spill for state capturing runtime call. + // The area spilled into is distinct from the capture state buffer. + RegSpiller in_reg_spiller(out_regs); + int in_spill_rsp_offset = -1; + if (_captured_state_mask != 0) { + // The spill area cannot be shared with the shadow/out args space + // since spilling needs to happen before the call. Allocate a new + // region in the stack for this spill space. + in_spill_rsp_offset = allocated_frame_size; + allocated_frame_size += in_reg_spiller.spill_size_bytes(); + } + #ifndef PRODUCT LogTarget(Trace, foreign, downcall) lt; if (lt.is_enabled()) { @@ -232,6 +247,19 @@ void DowncallLinker::StubGenerator::generate() { arg_shuffle.generate(_masm, shuffle_reg, 0, _abi._shadow_space_bytes); __ block_comment("} argument shuffle"); + if (_captured_state_mask != 0) { + assert(in_spill_rsp_offset != -1, "must be"); + __ block_comment("{ load initial thread local"); + in_reg_spiller.generate_spill(_masm, in_spill_rsp_offset); + + // Copy the contents of the capture state buffer into thread local + __ movptr(c_rarg0, Address(rsp, locs.data_offset(StubLocations::CAPTURED_STATE_BUFFER))); + __ movl(c_rarg1, _captured_state_mask); + runtime_call(_masm, CAST_FROM_FN_PTR(address, DowncallLinker::capture_state_pre)); + + in_reg_spiller.generate_fill(_masm, in_spill_rsp_offset); + __ block_comment("} load initial thread local"); + } __ call(as_Register(locs.get(StubLocations::TARGET_ADDRESS))); assert(!_abi.is_volatile_reg(r15_thread), "Call assumed not to kill r15"); @@ -258,15 +286,15 @@ void DowncallLinker::StubGenerator::generate() { __ block_comment("{ save thread local"); if (should_save_return_value) { - out_reg_spiller.generate_spill(_masm, spill_rsp_offset); + out_reg_spiller.generate_spill(_masm, out_spill_rsp_offset); } __ movptr(c_rarg0, Address(rsp, locs.data_offset(StubLocations::CAPTURED_STATE_BUFFER))); __ movl(c_rarg1, _captured_state_mask); - runtime_call(_masm, CAST_FROM_FN_PTR(address, DowncallLinker::capture_state)); + runtime_call(_masm, CAST_FROM_FN_PTR(address, DowncallLinker::capture_state_post)); if (should_save_return_value) { - out_reg_spiller.generate_fill(_masm, spill_rsp_offset); + out_reg_spiller.generate_fill(_masm, out_spill_rsp_offset); } __ block_comment("} save thread local"); @@ -319,14 +347,14 @@ void DowncallLinker::StubGenerator::generate() { __ bind(L_safepoint_poll_slow_path); if (should_save_return_value) { - out_reg_spiller.generate_spill(_masm, spill_rsp_offset); + out_reg_spiller.generate_spill(_masm, out_spill_rsp_offset); } __ mov(c_rarg0, r15_thread); runtime_call(_masm, CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)); if (should_save_return_value) { - out_reg_spiller.generate_fill(_masm, spill_rsp_offset); + out_reg_spiller.generate_fill(_masm, out_spill_rsp_offset); } __ jmp(L_after_safepoint_poll); @@ -338,13 +366,13 @@ void DowncallLinker::StubGenerator::generate() { __ bind(L_reguard); if (should_save_return_value) { - out_reg_spiller.generate_spill(_masm, spill_rsp_offset); + out_reg_spiller.generate_spill(_masm, out_spill_rsp_offset); } runtime_call(_masm, CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); if (should_save_return_value) { - out_reg_spiller.generate_fill(_masm, spill_rsp_offset); + out_reg_spiller.generate_fill(_masm, out_spill_rsp_offset); } __ jmp(L_after_reguard); diff --git a/src/hotspot/cpu/x86/frame_x86.inline.hpp b/src/hotspot/cpu/x86/frame_x86.inline.hpp index dcd766545d3..3f3b951edc8 100644 --- a/src/hotspot/cpu/x86/frame_x86.inline.hpp +++ b/src/hotspot/cpu/x86/frame_x86.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -231,8 +231,8 @@ inline bool frame::equal(frame other) const { // Return unique id for this frame. The id must have a value where we can distinguish // identity and younger/older relationship. null represents an invalid (incomparable) -// frame. -inline intptr_t* frame::id(void) const { return unextended_sp(); } +// frame. Should not be called for heap frames. +inline intptr_t* frame::id(void) const { return real_fp(); } // Return true if the frame is older (less recent activation) than the frame represented by id inline bool frame::is_older(intptr_t* id) const { assert(this->id() != nullptr && id != nullptr, "null frame id"); @@ -397,6 +397,9 @@ inline frame frame::sender(RegisterMap* map) const { StackWatermarkSet::on_iteration(map->thread(), result); } + // Calling frame::id() is currently not supported for heap frames. + assert(result._on_heap || this->_on_heap || result.is_older(this->id()), "Must be"); + return result; } diff --git a/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.cpp index 0ea769dd488..c05f37a3bea 100644 --- a/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -130,7 +130,7 @@ __ BIND(L_loop); __ BIND(L_done); } -void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj, Address dst, Register rscratch) { +void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj, Register rscratch) { // Does a store check for the oop in register obj. The content of // register obj is destroyed afterwards. CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set(); @@ -138,6 +138,8 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register ob __ shrptr(obj, CardTable::card_shift()); Address card_addr; + precond(rscratch != noreg); + assert_different_registers(obj, rscratch); // The calculation for byte_map_base is as follows: // byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); @@ -161,7 +163,7 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register ob // entry and that entry is not properly handled by the relocation code. AddressLiteral cardtable((address)byte_map_base, relocInfo::none); Address index(noreg, obj, Address::times_1); - card_addr = __ as_Address(ArrayAddress(cardtable, index), rscratch1); + card_addr = __ as_Address(ArrayAddress(cardtable, index), rscratch); } int dirty = CardTable::dirty_card_val(); @@ -190,10 +192,10 @@ void CardTableBarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorS if (needs_post_barrier) { // flatten object address if needed if (!precise || (dst.index() == noreg && dst.disp() == 0)) { - store_check(masm, dst.base(), dst, tmp2); + store_check(masm, dst.base(), tmp2); } else { __ lea(tmp1, dst); - store_check(masm, tmp1, dst, tmp2); + store_check(masm, tmp1, tmp2); } } } diff --git a/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.hpp b/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.hpp index 201c11062f2..c38e16d4d5f 100644 --- a/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.hpp +++ b/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,7 +33,7 @@ protected: virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count) {} - void store_check(MacroAssembler* masm, Register obj, Address dst, Register rscratch); + void store_check(MacroAssembler* masm, Register obj, Register rscratch); virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count, Register tmp); diff --git a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp index 47a3dad54e7..c20551b5084 100644 --- a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp @@ -31,6 +31,7 @@ #include "gc/z/zBarrierSetAssembler.hpp" #include "gc/z/zBarrierSetRuntime.hpp" #include "gc/z/zThreadLocalData.hpp" +#include "logging/log.hpp" #include "memory/resourceArea.hpp" #include "runtime/jniHandles.hpp" #include "runtime/sharedRuntime.hpp" @@ -1391,10 +1392,13 @@ static uint16_t patch_barrier_relocation_value(int format) { } } -void ZBarrierSetAssembler::patch_barrier_relocation(address addr, int format) { +void ZBarrierSetAssembler::patch_barrier_relocation(address addr, int format, bool log) { const int offset = patch_barrier_relocation_offset(format); const uint16_t value = patch_barrier_relocation_value(format); uint8_t* const patch_addr = (uint8_t*)addr + offset; + if (log) { + log_trace(aot, codecache, stubs)("patching address " INTPTR_FORMAT " offset %d value 0x%x", p2i(addr), offset, value); + } if (format == ZBarrierRelocationFormatLoadGoodBeforeShl) { if (VM_Version::supports_apx_f()) { NativeInstruction* instruction = nativeInstruction_at(addr); @@ -1426,6 +1430,74 @@ void ZBarrierSetAssembler::patch_barriers() { #undef __ #define __ masm-> +void ZBarrierSetAssembler::register_reloc_addresses(GrowableArray
&entries, int begin, int count) { + int formats[] = { + ZBarrierRelocationFormatLoadBadAfterTest, + ZBarrierRelocationFormatStoreBadAfterTest, + ZBarrierRelocationFormatStoreGoodAfterOr, + -1 + }; + int format_idx = 0; + int format = formats[format_idx++]; + for (int i = begin; i < begin + count; i++) { + address addr = entries.at(i); + // reloc addresses occur in 3 groups terminated with a nullptr + if (addr == nullptr) { + assert(format_idx < (int)(sizeof(formats) / sizeof(formats[0])), + "too many reloc groups"); + format = formats[format_idx++]; + } else { + switch(format) { + case ZBarrierRelocationFormatLoadBadAfterTest: + _load_bad_relocations.append(addr); + break; + case ZBarrierRelocationFormatStoreBadAfterTest: + _store_bad_relocations.append(addr); + break; + case ZBarrierRelocationFormatStoreGoodAfterOr: + _store_good_relocations.append(addr); + break; + default: + ShouldNotReachHere(); + break; + } + patch_barrier_relocation(addr, format, true); + } + } + assert(format == -1, "unterminated format list"); +} + +void ZBarrierSetAssembler::retrieve_reloc_addresses(address start, address end, GrowableArray
&entries) { + assert(start != nullptr, "start address must not be null"); + assert(end != nullptr, "start address must not be null"); + assert(start < end, "stub range must not be empty"); + for (int i = 0; i < _load_bad_relocations.length(); i++) { + address addr = _load_bad_relocations.at(i); + assert(addr != nullptr, "load bad reloc address shoudl not be null!"); + if (start <= addr && addr < end) { + entries.append(addr); + } + } + entries.append(nullptr); + for (int i = 0; i < _store_bad_relocations.length(); i++) { + address addr = _store_bad_relocations.at(i); + assert(addr != nullptr, "store bad reloc address shoudl not be null!"); + if (start <= addr && addr < end) { + entries.append(addr); + } + } + entries.append(nullptr); + for (int i = 0; i < _store_good_relocations.length(); i++) { + address addr = _store_good_relocations.at(i); + assert(addr != nullptr, "store good reloc address shoudl not be null!"); + if (start <= addr && addr < end) { + entries.append(addr); + } + } + entries.append(nullptr); +} + + void ZBarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error) { // C1 calls verfy_oop in the middle of barriers, before they have been uncolored // and after being colored. Therefore, we must deal with colored oops as well. diff --git a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.hpp b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.hpp index e91e2b9ea20..ce0c4769716 100644 --- a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.hpp +++ b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.hpp @@ -189,10 +189,14 @@ public: Label& slow_path, Label& slow_path_continuation) const; - void patch_barrier_relocation(address addr, int format); + void patch_barrier_relocation(address addr, int format, bool log = false); void patch_barriers(); + void register_reloc_addresses(GrowableArray
&entries, int begin, int count); + + void retrieve_reloc_addresses(address start, address end, GrowableArray
&entries); + void check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error); }; diff --git a/src/hotspot/cpu/x86/globals_x86.hpp b/src/hotspot/cpu/x86/globals_x86.hpp index 4f5b6d31e75..6de46752790 100644 --- a/src/hotspot/cpu/x86/globals_x86.hpp +++ b/src/hotspot/cpu/x86/globals_x86.hpp @@ -117,9 +117,6 @@ define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong); product(bool, UseIncDec, true, DIAGNOSTIC, \ "Use INC, DEC instructions on x86") \ \ - product(bool, UseNewLongLShift, false, \ - "Use optimized bitwise shift left") \ - \ product(bool, UseAddressNop, false, \ "Use '0F 1F [addr]' NOP instructions on x86 cpus") \ \ @@ -168,16 +165,27 @@ define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong); "Perform Ecore Optimization") \ \ /* Minimum array size in bytes to use AVX512 intrinsics */ \ - /* for copy, inflate and fill which don't bail out early based on any */ \ + /* for inflate and fill which don't bail out early based on any */ \ /* condition. When this value is set to zero compare operations like */ \ /* compare, vectorizedMismatch, compress can also use AVX512 intrinsics.*/\ product(int, AVX3Threshold, 4096, DIAGNOSTIC, \ "Minimum array size in bytes to use AVX512 intrinsics" \ - "for copy, inflate and fill. When this value is set as zero" \ + "for inflate and fill. When this value is set as zero" \ "compare operations can also use AVX512 intrinsics.") \ range(0, max_jint) \ constraint(AVX3ThresholdConstraintFunc,AfterErgo) \ \ + /* Minimum array size in bytes to use AVX512 intrinsics */ \ + /* for copy and fill which don't bail out early based on any */ \ + /* condition. When this value is set to zero clear operations that */ \ + /* work on memory blocks can also use AVX512 intrinsics. */ \ + product(int, CopyAVX3Threshold, 4096, DIAGNOSTIC, \ + "Minimum array size in bytes to use AVX512 intrinsics" \ + "for copy and fill. When this value is set as zero" \ + "clear operations can also use AVX512 intrinsics.") \ + range(0, max_jint) \ + constraint(CopyAVX3ThresholdConstraintFunc,AfterErgo) \ + \ product(bool, IntelJccErratumMitigation, true, DIAGNOSTIC, \ "Turn off JVM mitigations related to Intel micro code " \ "mitigations for the Intel JCC erratum") \ diff --git a/src/hotspot/cpu/x86/interp_masm_x86.cpp b/src/hotspot/cpu/x86/interp_masm_x86.cpp index b2ea4143ac4..a38971c86fb 100644 --- a/src/hotspot/cpu/x86/interp_masm_x86.cpp +++ b/src/hotspot/cpu/x86/interp_masm_x86.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1392,28 +1392,15 @@ void InterpreterMacroAssembler::profile_final_call(Register mdp) { void InterpreterMacroAssembler::profile_virtual_call(Register receiver, - Register mdp, - bool receiver_can_be_null) { + Register mdp) { if (ProfileInterpreter) { Label profile_continue; // If no method data exists, go to profile_continue. test_method_data_pointer(mdp, profile_continue); - Label skip_receiver_profile; - if (receiver_can_be_null) { - Label not_null; - testptr(receiver, receiver); - jccb(Assembler::notZero, not_null); - // We are making a call. Increment the count for null receiver. - increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); - jmp(skip_receiver_profile); - bind(not_null); - } - // Record the receiver type. profile_receiver_type(receiver, mdp, 0); - bind(skip_receiver_profile); // The method data pointer needs to be updated to reflect the new target. update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size())); diff --git a/src/hotspot/cpu/x86/interp_masm_x86.hpp b/src/hotspot/cpu/x86/interp_masm_x86.hpp index 4114028f78e..dfbd7ab64e0 100644 --- a/src/hotspot/cpu/x86/interp_masm_x86.hpp +++ b/src/hotspot/cpu/x86/interp_masm_x86.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -243,8 +243,7 @@ class InterpreterMacroAssembler: public MacroAssembler { void profile_not_taken_branch(Register mdp); void profile_call(Register mdp); void profile_final_call(Register mdp); - void profile_virtual_call(Register receiver, Register mdp, - bool receiver_can_be_null = false); + void profile_virtual_call(Register receiver, Register mdp); void profile_ret(Register return_bci, Register mdp); void profile_null_seen(Register mdp); void profile_typecheck(Register mdp, Register klass); diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.cpp b/src/hotspot/cpu/x86/macroAssembler_x86.cpp index b54f6adc263..5ab3ca339aa 100644 --- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp @@ -385,7 +385,8 @@ void MacroAssembler::warn(const char* msg) { // Windows always allocates space for its register args subq(rsp, frame::arg_reg_save_area_bytes); #endif - lea(c_rarg0, ExternalAddress((address) msg)); + const char* str = (code_section()->scratch_emit()) ? msg : AOTCodeCache::add_C_string(msg); + lea(c_rarg0, ExternalAddress((address) str)); call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning))); #ifdef _WIN64 @@ -961,7 +962,7 @@ void MacroAssembler::call(AddressLiteral entry, Register rscratch) { void MacroAssembler::ic_call(address entry, jint method_index) { RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index); // Needs full 64-bit immediate for later patching. - mov64(rax, (int64_t)Universe::non_oop_word()); + Assembler::mov64(rax, (int64_t)Universe::non_oop_word()); call(AddressLiteral(entry, rh)); } @@ -985,12 +986,9 @@ int MacroAssembler::ic_check(int end_alignment) { if (UseCompactObjectHeaders) { load_narrow_klass_compact(temp, receiver); cmpl(temp, Address(data, CompiledICData::speculated_klass_offset())); - } else if (UseCompressedClassPointers) { + } else { movl(temp, Address(receiver, oopDesc::klass_offset_in_bytes())); cmpl(temp, Address(data, CompiledICData::speculated_klass_offset())); - } else { - movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes())); - cmpptr(temp, Address(data, CompiledICData::speculated_klass_offset())); } // if inline cache check fails, then jump to runtime routine @@ -1961,6 +1959,30 @@ void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src, Register rscrat } } +void MacroAssembler::movhlf(XMMRegister dst, XMMRegister src, Register rscratch) { + if (VM_Version::supports_avx10_2()) { + evmovw(dst, src); + } else { + assert(rscratch != noreg, "missing"); + evmovw(rscratch, src); + evmovw(dst, rscratch); + } +} + +void MacroAssembler::mov64(Register dst, int64_t imm64) { + if (is_uimm32(imm64)) { + movl(dst, checked_cast(imm64)); + } else if (is_simm32(imm64)) { + movq(dst, checked_cast(imm64)); + } else { + Assembler::mov64(dst, imm64); + } +} + +void MacroAssembler::mov64(Register dst, int64_t imm64, relocInfo::relocType rtype, int format) { + Assembler::mov64(dst, imm64, rtype, format); +} + void MacroAssembler::movptr(Register dst, Register src) { movq(dst, src); } @@ -1971,13 +1993,7 @@ void MacroAssembler::movptr(Register dst, Address src) { // src should NEVER be a real pointer. Use AddressLiteral for true pointers void MacroAssembler::movptr(Register dst, intptr_t src) { - if (is_uimm32(src)) { - movl(dst, checked_cast(src)); - } else if (is_simm32(src)) { - movq(dst, checked_cast(src)); - } else { - mov64(dst, src); - } + mov64(dst, src); } void MacroAssembler::movptr(Address dst, Register src) { @@ -2656,14 +2672,14 @@ void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src, Register rscra } } -void MacroAssembler::vucomxsd(XMMRegister dst, AddressLiteral src, Register rscratch) { +void MacroAssembler::evucomxsd(XMMRegister dst, AddressLiteral src, Register rscratch) { assert(rscratch != noreg || always_reachable(src), "missing"); if (reachable(src)) { - Assembler::vucomxsd(dst, as_Address(src)); + Assembler::evucomxsd(dst, as_Address(src)); } else { lea(rscratch, src); - Assembler::vucomxsd(dst, Address(rscratch, 0)); + Assembler::evucomxsd(dst, Address(rscratch, 0)); } } @@ -2678,14 +2694,36 @@ void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src, Register rscra } } -void MacroAssembler::vucomxss(XMMRegister dst, AddressLiteral src, Register rscratch) { +void MacroAssembler::evucomxss(XMMRegister dst, AddressLiteral src, Register rscratch) { assert(rscratch != noreg || always_reachable(src), "missing"); if (reachable(src)) { - Assembler::vucomxss(dst, as_Address(src)); + Assembler::evucomxss(dst, as_Address(src)); } else { lea(rscratch, src); - Assembler::vucomxss(dst, Address(rscratch, 0)); + Assembler::evucomxss(dst, Address(rscratch, 0)); + } +} + +void MacroAssembler::evucomish(XMMRegister dst, AddressLiteral src, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + + if (reachable(src)) { + Assembler::evucomish(dst, as_Address(src)); + } else { + lea(rscratch, src); + Assembler::evucomish(dst, Address(rscratch, 0)); + } +} + +void MacroAssembler::evucomxsh(XMMRegister dst, AddressLiteral src, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + + if (reachable(src)) { + Assembler::evucomxsh(dst, as_Address(src)); + } else { + lea(rscratch, src); + Assembler::evucomxsh(dst, Address(rscratch, 0)); } } @@ -5376,11 +5414,9 @@ void MacroAssembler::load_klass(Register dst, Register src, Register tmp) { if (UseCompactObjectHeaders) { load_narrow_klass_compact(dst, src); decode_klass_not_null(dst, tmp); - } else if (UseCompressedClassPointers) { + } else { movl(dst, Address(src, oopDesc::klass_offset_in_bytes())); decode_klass_not_null(dst, tmp); - } else { - movptr(dst, Address(src, oopDesc::klass_offset_in_bytes())); } } @@ -5388,12 +5424,8 @@ void MacroAssembler::store_klass(Register dst, Register src, Register tmp) { assert(!UseCompactObjectHeaders, "not with compact headers"); assert_different_registers(src, tmp); assert_different_registers(dst, tmp); - if (UseCompressedClassPointers) { - encode_klass_not_null(src, tmp); - movl(Address(dst, oopDesc::klass_offset_in_bytes()), src); - } else { - movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src); - } + encode_klass_not_null(src, tmp); + movl(Address(dst, oopDesc::klass_offset_in_bytes()), src); } void MacroAssembler::cmp_klass(Register klass, Register obj, Register tmp) { @@ -5402,10 +5434,8 @@ void MacroAssembler::cmp_klass(Register klass, Register obj, Register tmp) { assert_different_registers(klass, obj, tmp); load_narrow_klass_compact(tmp, obj); cmpl(klass, tmp); - } else if (UseCompressedClassPointers) { - cmpl(klass, Address(obj, oopDesc::klass_offset_in_bytes())); } else { - cmpptr(klass, Address(obj, oopDesc::klass_offset_in_bytes())); + cmpl(klass, Address(obj, oopDesc::klass_offset_in_bytes())); } } @@ -5416,12 +5446,9 @@ void MacroAssembler::cmp_klasses_from_objects(Register obj1, Register obj2, Regi load_narrow_klass_compact(tmp1, obj1); load_narrow_klass_compact(tmp2, obj2); cmpl(tmp1, tmp2); - } else if (UseCompressedClassPointers) { + } else { movl(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes())); cmpl(tmp1, Address(obj2, oopDesc::klass_offset_in_bytes())); - } else { - movptr(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes())); - cmpptr(tmp1, Address(obj2, oopDesc::klass_offset_in_bytes())); } } @@ -5470,10 +5497,8 @@ void MacroAssembler::store_heap_oop_null(Address dst) { void MacroAssembler::store_klass_gap(Register dst, Register src) { assert(!UseCompactObjectHeaders, "Don't use with compact headers"); - if (UseCompressedClassPointers) { - // Store to klass gap in destination - movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src); - } + // Store to klass gap in destination + movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src); } #ifdef ASSERT @@ -5648,7 +5673,12 @@ void MacroAssembler::encode_and_move_klass_not_null(Register dst, Register src) BLOCK_COMMENT("encode_and_move_klass_not_null {"); assert_different_registers(src, dst); if (CompressedKlassPointers::base() != nullptr) { - movptr(dst, -(intptr_t)CompressedKlassPointers::base()); + if (AOTCodeCache::is_on_for_dump()) { + movptr(dst, ExternalAddress(CompressedKlassPointers::base_addr())); + negq(dst); + } else { + movptr(dst, -(intptr_t)CompressedKlassPointers::base()); + } addq(dst, src); } else { movptr(dst, src); @@ -5663,7 +5693,6 @@ void MacroAssembler::decode_klass_not_null(Register r, Register tmp) { BLOCK_COMMENT("decode_klass_not_null {"); assert_different_registers(r, tmp); // Note: it will change flags - assert(UseCompressedClassPointers, "should only be used for compressed headers"); // Cannot assert, unverified entry point counts instructions (see .ad file) // vtableStubs also counts instructions in pd_code_size_limit. // Also do not verify_oop as this is called by verify_oop. @@ -5685,7 +5714,6 @@ void MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src) BLOCK_COMMENT("decode_and_move_klass_not_null {"); assert_different_registers(src, dst); // Note: it will change flags - assert (UseCompressedClassPointers, "should only be used for compressed headers"); // Cannot assert, unverified entry point counts instructions (see .ad file) // vtableStubs also counts instructions in pd_code_size_limit. // Also do not verify_oop as this is called by verify_oop. @@ -5698,7 +5726,11 @@ void MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src) } else { if (CompressedKlassPointers::shift() <= Address::times_8) { if (CompressedKlassPointers::base() != nullptr) { - movptr(dst, (intptr_t)CompressedKlassPointers::base()); + if (AOTCodeCache::is_on_for_dump()) { + movptr(dst, ExternalAddress(CompressedKlassPointers::base_addr())); + } else { + movptr(dst, (intptr_t)CompressedKlassPointers::base()); + } } else { xorq(dst, dst); } @@ -5710,9 +5742,14 @@ void MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src) } } else { if (CompressedKlassPointers::base() != nullptr) { - const intptr_t base_right_shifted = - (intptr_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift(); - movptr(dst, base_right_shifted); + if (AOTCodeCache::is_on_for_dump()) { + movptr(dst, ExternalAddress(CompressedKlassPointers::base_addr())); + shrq(dst, CompressedKlassPointers::shift()); + } else { + const intptr_t base_right_shifted = + (intptr_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift(); + movptr(dst, base_right_shifted); + } } else { xorq(dst, dst); } @@ -5742,7 +5779,6 @@ void MacroAssembler::set_narrow_oop(Address dst, jobject obj) { } void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { - assert (UseCompressedClassPointers, "should only be used for compressed headers"); assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); int klass_index = oop_recorder()->find_index(k); RelocationHolder rspec = metadata_Relocation::spec(klass_index); @@ -5750,7 +5786,6 @@ void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { } void MacroAssembler::set_narrow_klass(Address dst, Klass* k) { - assert (UseCompressedClassPointers, "should only be used for compressed headers"); assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); int klass_index = oop_recorder()->find_index(k); RelocationHolder rspec = metadata_Relocation::spec(klass_index); @@ -5776,7 +5811,6 @@ void MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) { } void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) { - assert (UseCompressedClassPointers, "should only be used for compressed headers"); assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); int klass_index = oop_recorder()->find_index(k); RelocationHolder rspec = metadata_Relocation::spec(klass_index); @@ -5784,7 +5818,6 @@ void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) { } void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) { - assert (UseCompressedClassPointers, "should only be used for compressed headers"); assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); int klass_index = oop_recorder()->find_index(k); RelocationHolder rspec = metadata_Relocation::spec(klass_index); @@ -5793,7 +5826,7 @@ void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) { void MacroAssembler::reinit_heapbase() { if (UseCompressedOops) { - if (Universe::heap() != nullptr) { + if (Universe::heap() != nullptr && !AOTCodeCache::is_on_for_dump()) { if (CompressedOops::base() == nullptr) { MacroAssembler::xorptr(r12_heapbase, r12_heapbase); } else { @@ -5812,7 +5845,7 @@ void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register rtmp, X // cnt - number of qwords (8-byte words). // base - start address, qword aligned. Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end; - bool use64byteVector = (MaxVectorSize == 64) && (VM_Version::avx3_threshold() == 0); + bool use64byteVector = (MaxVectorSize == 64) && (CopyAVX3Threshold == 0); if (use64byteVector) { vpxor(xtmp, xtmp, xtmp, AVX_512bit); } else if (MaxVectorSize >= 32) { @@ -5876,7 +5909,7 @@ void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register rtmp, X // Clearing constant sized memory using YMM/ZMM registers. void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) { assert(UseAVX > 2 && VM_Version::supports_avx512vl(), ""); - bool use64byteVector = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0); + bool use64byteVector = (MaxVectorSize > 32) && (CopyAVX3Threshold == 0); int vector64_count = (cnt & (~0x7)) >> 3; cnt = cnt & 0x7; @@ -6101,8 +6134,8 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned, // Fill 64-byte chunks Label L_fill_64_bytes_loop_avx3, L_check_fill_64_bytes_avx2; - // If number of bytes to fill < VM_Version::avx3_threshold(), perform fill using AVX2 - cmpptr(count, VM_Version::avx3_threshold()); + // If number of bytes to fill < CopyAVX3Threshold, perform fill using AVX2 + cmpptr(count, CopyAVX3Threshold); jccb(Assembler::below, L_check_fill_64_bytes_avx2); vpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit); @@ -9177,7 +9210,7 @@ void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XM case T_FLOAT: evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vector_len); break; case T_DOUBLE: - evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vector_len); break; + evminmaxpd(dst, mask, nds, src, merge, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vector_len); break; default: fatal("Unexpected type argument %s", type2name(type)); break; } @@ -9475,7 +9508,6 @@ void MacroAssembler::generate_fill_avx3(BasicType type, Register to, Register va Label L_fill_zmm_sequence; int shift = -1; - int avx3threshold = VM_Version::avx3_threshold(); switch(type) { case T_BYTE: shift = 0; break; @@ -9491,10 +9523,10 @@ void MacroAssembler::generate_fill_avx3(BasicType type, Register to, Register va fatal("Unhandled type: %s\n", type2name(type)); } - if ((avx3threshold != 0) || (MaxVectorSize == 32)) { + if ((CopyAVX3Threshold != 0) || (MaxVectorSize == 32)) { if (MaxVectorSize == 64) { - cmpq(count, avx3threshold >> shift); + cmpq(count, CopyAVX3Threshold >> shift); jcc(Assembler::greater, L_fill_zmm_sequence); } diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.hpp b/src/hotspot/cpu/x86/macroAssembler_x86.hpp index 5c049f710e2..021d2943ee8 100644 --- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp +++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp @@ -162,6 +162,8 @@ class MacroAssembler: public Assembler { void incrementq(AddressLiteral dst, Register rscratch = noreg); + void movhlf(XMMRegister dst, XMMRegister src, Register rscratch = noreg); + // Support optimal SSE move instructions. void movflt(XMMRegister dst, XMMRegister src) { if (dst-> encoding() == src->encoding()) return; @@ -351,8 +353,7 @@ class MacroAssembler: public Assembler { void load_klass(Register dst, Register src, Register tmp); void store_klass(Register dst, Register src, Register tmp); - // Compares the Klass pointer of an object to a given Klass (which might be narrow, - // depending on UseCompressedClassPointers). + // Compares the narrow Klass pointer of an object to a given narrow Klass. void cmp_klass(Register klass, Register obj, Register tmp); // Compares the Klass pointer of two objects obj1 and obj2. Result is in the condition flags. @@ -1309,21 +1310,29 @@ public: void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); } void subss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); + void evucomish(XMMRegister dst, XMMRegister src) { Assembler::evucomish(dst, src); } + void evucomish(XMMRegister dst, Address src) { Assembler::evucomish(dst, src); } + void evucomish(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); + + void evucomxsh(XMMRegister dst, XMMRegister src) { Assembler::evucomxsh(dst, src); } + void evucomxsh(XMMRegister dst, Address src) { Assembler::evucomxsh(dst, src); } + void evucomxsh(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); + void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); } void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); } void ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); - void vucomxss(XMMRegister dst, XMMRegister src) { Assembler::vucomxss(dst, src); } - void vucomxss(XMMRegister dst, Address src) { Assembler::vucomxss(dst, src); } - void vucomxss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); + void evucomxss(XMMRegister dst, XMMRegister src) { Assembler::evucomxss(dst, src); } + void evucomxss(XMMRegister dst, Address src) { Assembler::evucomxss(dst, src); } + void evucomxss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); } void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); } void ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); - void vucomxsd(XMMRegister dst, XMMRegister src) { Assembler::vucomxsd(dst, src); } - void vucomxsd(XMMRegister dst, Address src) { Assembler::vucomxsd(dst, src); } - void vucomxsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); + void evucomxsd(XMMRegister dst, XMMRegister src) { Assembler::evucomxsd(dst, src); } + void evucomxsd(XMMRegister dst, Address src) { Assembler::evucomxsd(dst, src); } + void evucomxsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values void xorpd(XMMRegister dst, XMMRegister src); @@ -1869,6 +1878,9 @@ public: void mov_metadata(Register dst, Metadata* obj); void mov_metadata(Address dst, Metadata* obj, Register rscratch); + void mov64(Register dst, int64_t imm64); + void mov64(Register dst, int64_t imm64, relocInfo::relocType rtype, int format); + void movptr(Register dst, Register src); void movptr(Register dst, Address src); void movptr(Register dst, AddressLiteral src); diff --git a/src/hotspot/cpu/x86/macroAssembler_x86_sha.cpp b/src/hotspot/cpu/x86/macroAssembler_x86_sha.cpp index 9f0232075cd..401d5dc22cc 100644 --- a/src/hotspot/cpu/x86/macroAssembler_x86_sha.cpp +++ b/src/hotspot/cpu/x86/macroAssembler_x86_sha.cpp @@ -242,7 +242,6 @@ void MacroAssembler::fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegiste Label done_hash, loop0; address K256 = StubRoutines::x86::k256_addr(); - address pshuffle_byte_flip_mask = StubRoutines::x86::pshuffle_byte_flip_mask_addr(); movdqu(state0, Address(state, 0)); movdqu(state1, Address(state, 16)); @@ -253,7 +252,7 @@ void MacroAssembler::fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegiste palignr(state0, state1, 8); pblendw(state1, msgtmp4, 0xF0); - movdqu(shuf_mask, ExternalAddress(pshuffle_byte_flip_mask)); + movdqu(shuf_mask, ExternalAddress(StubRoutines::x86::pshuffle_byte_flip_mask_addr())); lea(rax, ExternalAddress(K256)); bind(loop0); @@ -661,8 +660,6 @@ void MacroAssembler::sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegiste compute_size1, compute_size_end1; address K256_W = StubRoutines::x86::k256_W_addr(); - address pshuffle_byte_flip_mask = StubRoutines::x86::pshuffle_byte_flip_mask_addr(); - address pshuffle_byte_flip_mask_addr = nullptr; const XMMRegister& SHUF_00BA = xmm10; // ymm10: shuffle xBxA -> 00BA const XMMRegister& SHUF_DC00 = xmm12; // ymm12: shuffle xDxC -> DC00 @@ -791,10 +788,14 @@ enum { // load g - r10 after it is used as scratch movl(h, Address(CTX, 4*7)); - pshuffle_byte_flip_mask_addr = pshuffle_byte_flip_mask; - vmovdqu(BYTE_FLIP_MASK, ExternalAddress(pshuffle_byte_flip_mask_addr + 0)); // [PSHUFFLE_BYTE_FLIP_MASK wrt rip] - vmovdqu(SHUF_00BA, ExternalAddress(pshuffle_byte_flip_mask_addr + 32)); // [_SHUF_00BA wrt rip] - vmovdqu(SHUF_DC00, ExternalAddress(pshuffle_byte_flip_mask_addr + 64)); // [_SHUF_DC00 wrt rip] + // the three successive pshuffle_byte_flip_mask stub entries should + // be offset by 32 bytes + assert(StubRoutines::x86::pshuffle_byte_flip_mask_addr() + 32 == StubRoutines::x86::pshuffle_byte_flip_mask_00ba_addr(), "sanity"); + assert(StubRoutines::x86::pshuffle_byte_flip_mask_addr() + 64 == StubRoutines::x86::pshuffle_byte_flip_mask_dc00_addr(), "sanity"); + + vmovdqu(BYTE_FLIP_MASK, ExternalAddress(StubRoutines::x86::pshuffle_byte_flip_mask_addr())); // [PSHUFFLE_BYTE_FLIP_MASK wrt rip] + vmovdqu(SHUF_00BA, ExternalAddress(StubRoutines::x86::pshuffle_byte_flip_mask_00ba_addr())); // [_SHUF_00BA wrt rip] + vmovdqu(SHUF_DC00, ExternalAddress(StubRoutines::x86::pshuffle_byte_flip_mask_dc00_addr())); // [_SHUF_DC00 wrt rip] movl(g, Address(CTX, 4*6)); @@ -953,11 +954,9 @@ bind(only_one_block); // load g - r10 after use as scratch movl(h, Address(CTX, 4*7)); // 0x5be0cd19 - - pshuffle_byte_flip_mask_addr = pshuffle_byte_flip_mask; - vmovdqu(BYTE_FLIP_MASK, ExternalAddress(pshuffle_byte_flip_mask_addr + 0)); // [PSHUFFLE_BYTE_FLIP_MASK wrt rip] - vmovdqu(SHUF_00BA, ExternalAddress(pshuffle_byte_flip_mask_addr + 32)); // [_SHUF_00BA wrt rip] - vmovdqu(SHUF_DC00, ExternalAddress(pshuffle_byte_flip_mask_addr + 64)); // [_SHUF_DC00 wrt rip] + vmovdqu(BYTE_FLIP_MASK, ExternalAddress(StubRoutines::x86::pshuffle_byte_flip_mask_addr())); // [PSHUFFLE_BYTE_FLIP_MASK wrt rip] + vmovdqu(SHUF_00BA, ExternalAddress(StubRoutines::x86::pshuffle_byte_flip_mask_00ba_addr())); // [_SHUF_00BA wrt rip] + vmovdqu(SHUF_DC00, ExternalAddress(StubRoutines::x86::pshuffle_byte_flip_mask_dc00_addr())); // [_SHUF_DC00 wrt rip] movl(g, Address(CTX, 4*6)); // 0x1f83d9ab @@ -1346,9 +1345,12 @@ void MacroAssembler::sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegiste // load g - r10 after it is used as scratch movq(h, Address(CTX, 8 * 7)); - pshuffle_byte_flip_mask_addr = pshuffle_byte_flip_mask_sha512; - vmovdqu(BYTE_FLIP_MASK, ExternalAddress(pshuffle_byte_flip_mask_addr + 0)); // PSHUFFLE_BYTE_FLIP_MASK wrt rip - vmovdqu(YMM_MASK_LO, ExternalAddress(pshuffle_byte_flip_mask_addr + 32)); + // the two successive pshuffle_byte_flip_mask_sha512 stub entries should + // be offset by 32 bytes + assert(StubRoutines::x86::pshuffle_byte_flip_mask_addr_sha512() + 32 == StubRoutines::x86::pshuffle_byte_flip_mask_ymm_lo_addr_sha512(), "sanity"); + + vmovdqu(BYTE_FLIP_MASK, ExternalAddress(StubRoutines::x86::pshuffle_byte_flip_mask_addr_sha512())); // PSHUFFLE_BYTE_FLIP_MASK wrt rip + vmovdqu(YMM_MASK_LO, ExternalAddress(StubRoutines::x86::pshuffle_byte_flip_mask_ymm_lo_addr_sha512())); // MASK_YMM_LO wrt rip movq(g, Address(CTX, 8 * 6)); diff --git a/src/hotspot/cpu/x86/matcher_x86.hpp b/src/hotspot/cpu/x86/matcher_x86.hpp index f7973a8564e..62a5d2827bc 100644 --- a/src/hotspot/cpu/x86/matcher_x86.hpp +++ b/src/hotspot/cpu/x86/matcher_x86.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -75,7 +75,6 @@ } static bool narrow_klass_use_complex_address() { - assert(UseCompressedClassPointers, "only for compressed klass code"); return (CompressedKlassPointers::shift() <= 3); } diff --git a/src/hotspot/cpu/x86/methodHandles_x86.cpp b/src/hotspot/cpu/x86/methodHandles_x86.cpp index 54376c6ad9a..5b15444bc32 100644 --- a/src/hotspot/cpu/x86/methodHandles_x86.cpp +++ b/src/hotspot/cpu/x86/methodHandles_x86.cpp @@ -110,14 +110,13 @@ void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Registe __ andl(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK); __ cmpl(temp, ref_kind); __ jcc(Assembler::equal, L); - { char* buf = NEW_C_HEAP_ARRAY(char, 100, mtInternal); - jio_snprintf(buf, 100, "verify_ref_kind expected %x", ref_kind); - if (ref_kind == JVM_REF_invokeVirtual || - ref_kind == JVM_REF_invokeSpecial) - // could do this for all ref_kinds, but would explode assembly code size - trace_method_handle(_masm, buf); - __ STOP(buf); + const char* msg = ref_kind_to_verify_msg(ref_kind); + if (ref_kind == JVM_REF_invokeVirtual || + ref_kind == JVM_REF_invokeSpecial) { + // could do this for all ref_kinds, but would explode assembly code size + trace_method_handle(_masm, msg); } + __ STOP(msg); BLOCK_COMMENT("} verify_ref_kind"); __ bind(L); } diff --git a/src/hotspot/cpu/x86/stubDeclarations_x86.hpp b/src/hotspot/cpu/x86/stubDeclarations_x86.hpp index 971c8fd3c44..24886deb3c5 100644 --- a/src/hotspot/cpu/x86/stubDeclarations_x86.hpp +++ b/src/hotspot/cpu/x86/stubDeclarations_x86.hpp @@ -29,14 +29,16 @@ #define STUBGEN_PREUNIVERSE_BLOBS_ARCH_DO(do_stub, \ do_arch_blob, \ do_arch_entry, \ - do_arch_entry_init) \ + do_arch_entry_init, \ + do_arch_entry_array) \ do_arch_blob(preuniverse, 500) \ #define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \ do_arch_blob, \ do_arch_entry, \ - do_arch_entry_init) \ + do_arch_entry_init, \ + do_arch_entry_array) \ do_arch_blob(initial, PRODUCT_ONLY(20000) NOT_PRODUCT(21000) WINDOWS_ONLY(+1000)) \ do_stub(initial, verify_mxcsr) \ do_arch_entry(x86, initial, verify_mxcsr, verify_mxcsr_entry, \ @@ -65,14 +67,18 @@ #define STUBGEN_CONTINUATION_BLOBS_ARCH_DO(do_stub, \ do_arch_blob, \ do_arch_entry, \ - do_arch_entry_init) \ + do_arch_entry_init, \ + do_arch_entry_array) \ do_arch_blob(continuation, 3000) \ +// count needed for declaration of vector_iota_indices stub +#define VECTOR_IOTA_COUNT 6 #define STUBGEN_COMPILER_BLOBS_ARCH_DO(do_stub, \ do_arch_blob, \ do_arch_entry, \ - do_arch_entry_init) \ + do_arch_entry_init, \ + do_arch_entry_array) \ do_arch_blob(compiler, 120000 WINDOWS_ONLY(+2000)) \ do_stub(compiler, vector_float_sign_mask) \ do_arch_entry(x86, compiler, vector_float_sign_mask, \ @@ -126,8 +132,9 @@ do_arch_entry(x86, compiler, vector_long_sign_mask, \ vector_long_sign_mask, vector_long_sign_mask) \ do_stub(compiler, vector_iota_indices) \ - do_arch_entry(x86, compiler, vector_iota_indices, \ - vector_iota_indices, vector_iota_indices) \ + do_arch_entry_array(x86, compiler, vector_iota_indices, \ + vector_iota_indices, vector_iota_indices, \ + VECTOR_IOTA_COUNT) \ do_stub(compiler, vector_count_leading_zeros_lut) \ do_arch_entry(x86, compiler, vector_count_leading_zeros_lut, \ vector_count_leading_zeros_lut, \ @@ -161,6 +168,12 @@ do_arch_entry(x86, compiler, pshuffle_byte_flip_mask, \ pshuffle_byte_flip_mask_addr, \ pshuffle_byte_flip_mask_addr) \ + do_arch_entry(x86, compiler, pshuffle_byte_flip_mask, \ + pshuffle_byte_flip_mask_00ba_addr, \ + pshuffle_byte_flip_mask_00ba_addr) \ + do_arch_entry(x86, compiler, pshuffle_byte_flip_mask, \ + pshuffle_byte_flip_mask_dc00_addr, \ + pshuffle_byte_flip_mask_dc00_addr) \ /* x86_64 exposes these 3 stubs via a generic entry array */ \ /* other arches use arch-specific entries */ \ /* this really needs rationalising */ \ @@ -171,6 +184,9 @@ do_arch_entry(x86, compiler, pshuffle_byte_flip_mask_sha512, \ pshuffle_byte_flip_mask_addr_sha512, \ pshuffle_byte_flip_mask_addr_sha512) \ + do_arch_entry(x86, compiler, pshuffle_byte_flip_mask_sha512, \ + pshuffle_byte_flip_mask_ymm_lo_addr_sha512, \ + pshuffle_byte_flip_mask_ymm_lo_addr_sha512) \ do_stub(compiler, compress_perm_table32) \ do_arch_entry(x86, compiler, compress_perm_table32, \ compress_perm_table32, compress_perm_table32) \ @@ -241,7 +257,8 @@ #define STUBGEN_FINAL_BLOBS_ARCH_DO(do_stub, \ do_arch_blob, \ do_arch_entry, \ - do_arch_entry_init) \ + do_arch_entry_init, \ + do_arch_entry_array) \ do_arch_blob(final, 33000 \ WINDOWS_ONLY(+22000) ZGC_ONLY(+20000)) \ diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp index efb0411aa39..993d1964034 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp @@ -188,8 +188,18 @@ address StubGenerator::generate_call_stub(address& return_address) { (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, "adjust this code"); StubId stub_id = StubId::stubgen_call_stub_id; + GrowableArray
entries; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 2, "sanity check"); + address start = load_archive_data(stub_id, &entries); + if (start != nullptr) { + assert(entries.length() == 1, "expected 1 extra entry"); + return_address = entries.at(0); + return start; + } + StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); // same as in generate_catch_exception()! const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); @@ -298,6 +308,7 @@ address StubGenerator::generate_call_stub(address& return_address) { BLOCK_COMMENT("call_stub_return_address:"); return_address = __ pc(); + entries.append(return_address); // store result depending on type (everything that is not // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) @@ -394,6 +405,9 @@ address StubGenerator::generate_call_stub(address& return_address) { __ movdbl(Address(c_rarg0, 0), xmm0); __ jmp(exit); + // record the stub entry and end plus the auxiliary entry + store_archive_data(stub_id, start, __ pc(), &entries); + return start; } @@ -411,8 +425,15 @@ address StubGenerator::generate_call_stub(address& return_address) { address StubGenerator::generate_catch_exception() { StubId stub_id = StubId::stubgen_catch_exception_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); // same as in generate_call_stub(): const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); @@ -442,7 +463,9 @@ address StubGenerator::generate_catch_exception() { __ verify_oop(rax); __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax); - __ lea(rscratch1, ExternalAddress((address)__FILE__)); + // special case -- add file name string to AOT address table + address file = (address)AOTCodeCache::add_C_string(__FILE__); + __ lea(rscratch1, ExternalAddress(file)); __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1); __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__); @@ -451,6 +474,9 @@ address StubGenerator::generate_catch_exception() { "_call_stub_return_address must have been generated before"); __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -467,8 +493,14 @@ address StubGenerator::generate_catch_exception() { address StubGenerator::generate_forward_exception() { StubId stub_id = StubId::stubgen_forward_exception_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); // Upon entry, the sp points to the return address returning into // Java (interpreted or compiled) code; i.e., the return address @@ -521,6 +553,9 @@ address StubGenerator::generate_forward_exception() { __ verify_oop(rax); __ jmp(rbx); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -531,12 +566,21 @@ address StubGenerator::generate_forward_exception() { // Result: address StubGenerator::generate_orderaccess_fence() { StubId stub_id = StubId::stubgen_fence_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ membar(Assembler::StoreLoad); __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -550,8 +594,14 @@ address StubGenerator::generate_orderaccess_fence() { address StubGenerator::generate_verify_mxcsr() { StubId stub_id = StubId::stubgen_verify_mxcsr_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); const Address mxcsr_save(rsp, 0); @@ -574,15 +624,24 @@ address StubGenerator::generate_verify_mxcsr() { __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::generate_f2i_fixup() { StubId stub_id = StubId::stubgen_f2i_fixup_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); Address inout(rsp, 5 * wordSize); // return address + 4 saves - address start = __ pc(); + start = __ pc(); Label L; @@ -613,14 +672,23 @@ address StubGenerator::generate_f2i_fixup() { __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::generate_f2l_fixup() { StubId stub_id = StubId::stubgen_f2l_fixup_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); Address inout(rsp, 5 * wordSize); // return address + 4 saves - address start = __ pc(); + start = __ pc(); Label L; @@ -651,15 +719,24 @@ address StubGenerator::generate_f2l_fixup() { __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::generate_d2i_fixup() { StubId stub_id = StubId::stubgen_d2i_fixup_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); Address inout(rsp, 6 * wordSize); // return address + 5 saves - address start = __ pc(); + start = __ pc(); Label L; @@ -699,15 +776,24 @@ address StubGenerator::generate_d2i_fixup() { __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::generate_d2l_fixup() { StubId stub_id = StubId::stubgen_d2l_fixup_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); Address inout(rsp, 6 * wordSize); // return address + 5 saves - address start = __ pc(); + start = __ pc(); Label L; @@ -747,14 +833,23 @@ address StubGenerator::generate_d2l_fixup() { __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::generate_count_leading_zeros_lut() { - __ align64(); StubId stub_id = StubId::stubgen_vector_count_leading_zeros_lut_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align64(); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ emit_data64(0x0101010102020304, relocInfo::none); __ emit_data64(0x0000000000000000, relocInfo::none); @@ -765,14 +860,23 @@ address StubGenerator::generate_count_leading_zeros_lut() { __ emit_data64(0x0101010102020304, relocInfo::none); __ emit_data64(0x0000000000000000, relocInfo::none); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::generate_popcount_avx_lut() { - __ align64(); StubId stub_id = StubId::stubgen_vector_popcount_lut_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align64(); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ emit_data64(0x0302020102010100, relocInfo::none); __ emit_data64(0x0403030203020201, relocInfo::none); @@ -783,14 +887,30 @@ address StubGenerator::generate_popcount_avx_lut() { __ emit_data64(0x0302020102010100, relocInfo::none); __ emit_data64(0x0403030203020201, relocInfo::none); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } -address StubGenerator::generate_iota_indices() { - __ align(CodeEntryAlignment); +void StubGenerator::generate_iota_indices() { StubId stub_id = StubId::stubgen_vector_iota_indices_id; + GrowableArray
entries; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == VECTOR_IOTA_COUNT, "sanity check"); + address start = load_archive_data(stub_id, &entries); + if (start != nullptr) { + assert(entries.length() == VECTOR_IOTA_COUNT - 1, + "unexpected extra entry count %d", entries.length()); + StubRoutines::x86::_vector_iota_indices[0] = start; + for (int i = 1; i < VECTOR_IOTA_COUNT; i++) { + StubRoutines::x86::_vector_iota_indices[i] = entries.at(i - 1); + } + return; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); // B __ emit_data64(0x0706050403020100, relocInfo::none); __ emit_data64(0x0F0E0D0C0B0A0908, relocInfo::none); @@ -800,6 +920,7 @@ address StubGenerator::generate_iota_indices() { __ emit_data64(0x2F2E2D2C2B2A2928, relocInfo::none); __ emit_data64(0x3736353433323130, relocInfo::none); __ emit_data64(0x3F3E3D3C3B3A3938, relocInfo::none); + entries.append(__ pc()); // W __ emit_data64(0x0003000200010000, relocInfo::none); __ emit_data64(0x0007000600050004, relocInfo::none); @@ -809,6 +930,7 @@ address StubGenerator::generate_iota_indices() { __ emit_data64(0x0017001600150014, relocInfo::none); __ emit_data64(0x001B001A00190018, relocInfo::none); __ emit_data64(0x001F001E001D001C, relocInfo::none); + entries.append(__ pc()); // D __ emit_data64(0x0000000100000000, relocInfo::none); __ emit_data64(0x0000000300000002, relocInfo::none); @@ -818,6 +940,7 @@ address StubGenerator::generate_iota_indices() { __ emit_data64(0x0000000B0000000A, relocInfo::none); __ emit_data64(0x0000000D0000000C, relocInfo::none); __ emit_data64(0x0000000F0000000E, relocInfo::none); + entries.append(__ pc()); // Q __ emit_data64(0x0000000000000000, relocInfo::none); __ emit_data64(0x0000000000000001, relocInfo::none); @@ -827,6 +950,7 @@ address StubGenerator::generate_iota_indices() { __ emit_data64(0x0000000000000005, relocInfo::none); __ emit_data64(0x0000000000000006, relocInfo::none); __ emit_data64(0x0000000000000007, relocInfo::none); + entries.append(__ pc()); // D - FP __ emit_data64(0x3F80000000000000, relocInfo::none); // 0.0f, 1.0f __ emit_data64(0x4040000040000000, relocInfo::none); // 2.0f, 3.0f @@ -836,6 +960,7 @@ address StubGenerator::generate_iota_indices() { __ emit_data64(0x4130000041200000, relocInfo::none); // 10.0f, 11.0f __ emit_data64(0x4150000041400000, relocInfo::none); // 12.0f, 13.0f __ emit_data64(0x4170000041600000, relocInfo::none); // 14.0f, 15.0f + entries.append(__ pc()); // Q - FP __ emit_data64(0x0000000000000000, relocInfo::none); // 0.0d __ emit_data64(0x3FF0000000000000, relocInfo::none); // 1.0d @@ -845,14 +970,30 @@ address StubGenerator::generate_iota_indices() { __ emit_data64(0x4014000000000000, relocInfo::none); // 5.0d __ emit_data64(0x4018000000000000, relocInfo::none); // 6.0d __ emit_data64(0x401c000000000000, relocInfo::none); // 7.0d - return start; + + // record the stub entry and end + store_archive_data(stub_id, start, __ pc(), &entries); + + // install the entry addresses in the entry array + assert(entries.length() == entry_count - 1, + "unexpected entries count %d", entries.length()); + StubRoutines::x86::_vector_iota_indices[0] = start; + for (int i = 1; i < VECTOR_IOTA_COUNT; i++) { + StubRoutines::x86::_vector_iota_indices[i] = entries.at(i - 1); + } } address StubGenerator::generate_vector_reverse_bit_lut() { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_vector_reverse_bit_lut_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ emit_data64(0x0E060A020C040800, relocInfo::none); __ emit_data64(0x0F070B030D050901, relocInfo::none); @@ -863,14 +1004,23 @@ address StubGenerator::generate_vector_reverse_bit_lut() { __ emit_data64(0x0E060A020C040800, relocInfo::none); __ emit_data64(0x0F070B030D050901, relocInfo::none); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::generate_vector_reverse_byte_perm_mask_long() { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_vector_reverse_byte_perm_mask_long_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ emit_data64(0x0001020304050607, relocInfo::none); __ emit_data64(0x08090A0B0C0D0E0F, relocInfo::none); @@ -881,14 +1031,23 @@ address StubGenerator::generate_vector_reverse_byte_perm_mask_long() { __ emit_data64(0x0001020304050607, relocInfo::none); __ emit_data64(0x08090A0B0C0D0E0F, relocInfo::none); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::generate_vector_reverse_byte_perm_mask_int() { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_vector_reverse_byte_perm_mask_int_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ emit_data64(0x0405060700010203, relocInfo::none); __ emit_data64(0x0C0D0E0F08090A0B, relocInfo::none); @@ -899,14 +1058,23 @@ address StubGenerator::generate_vector_reverse_byte_perm_mask_int() { __ emit_data64(0x0405060700010203, relocInfo::none); __ emit_data64(0x0C0D0E0F08090A0B, relocInfo::none); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::generate_vector_reverse_byte_perm_mask_short() { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_vector_reverse_byte_perm_mask_short_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ emit_data64(0x0607040502030001, relocInfo::none); __ emit_data64(0x0E0F0C0D0A0B0809, relocInfo::none); @@ -917,31 +1085,52 @@ address StubGenerator::generate_vector_reverse_byte_perm_mask_short() { __ emit_data64(0x0607040502030001, relocInfo::none); __ emit_data64(0x0E0F0C0D0A0B0809, relocInfo::none); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::generate_vector_byte_shuffle_mask() { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_vector_byte_shuffle_mask_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ emit_data64(0x7070707070707070, relocInfo::none); __ emit_data64(0x7070707070707070, relocInfo::none); __ emit_data64(0xF0F0F0F0F0F0F0F0, relocInfo::none); __ emit_data64(0xF0F0F0F0F0F0F0F0, relocInfo::none); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::generate_fp_mask(StubId stub_id, int64_t mask) { + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ emit_data64( mask, relocInfo::none ); __ emit_data64( mask, relocInfo::none ); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -957,9 +1146,15 @@ address StubGenerator::generate_compress_perm_table(StubId stub_id) { default: ShouldNotReachHere(); } + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); if (esize == 32) { // Loop to generate 256 x 8 int compression permute index table. A row is // accessed using 8 bit index computed using vector mask. An entry in @@ -997,6 +1192,9 @@ address StubGenerator::generate_compress_perm_table(StubId stub_id) { } } } + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -1012,9 +1210,15 @@ address StubGenerator::generate_expand_perm_table(StubId stub_id) { default: ShouldNotReachHere(); } + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); if (esize == 32) { // Loop to generate 256 x 8 int expand permute index table. A row is accessed // using 8 bit index computed using vector mask. An entry in a row holds either @@ -1050,13 +1254,22 @@ address StubGenerator::generate_expand_perm_table(StubId stub_id) { } } } + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::generate_vector_mask(StubId stub_id, int64_t mask) { + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ emit_data64(mask, relocInfo::none); __ emit_data64(mask, relocInfo::none); @@ -1067,14 +1280,23 @@ address StubGenerator::generate_vector_mask(StubId stub_id, int64_t mask) { __ emit_data64(mask, relocInfo::none); __ emit_data64(mask, relocInfo::none); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::generate_vector_byte_perm_mask() { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_vector_byte_perm_mask_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ emit_data64(0x0000000000000001, relocInfo::none); __ emit_data64(0x0000000000000003, relocInfo::none); @@ -1085,13 +1307,22 @@ address StubGenerator::generate_vector_byte_perm_mask() { __ emit_data64(0x0000000000000004, relocInfo::none); __ emit_data64(0x0000000000000006, relocInfo::none); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::generate_vector_fp_mask(StubId stub_id, int64_t mask) { + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ emit_data64(mask, relocInfo::none); __ emit_data64(mask, relocInfo::none); @@ -1102,6 +1333,9 @@ address StubGenerator::generate_vector_fp_mask(StubId stub_id, int64_t mask) { __ emit_data64(mask, relocInfo::none); __ emit_data64(mask, relocInfo::none); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -1110,9 +1344,15 @@ address StubGenerator::generate_vector_custom_i32(StubId stub_id, Assembler::Avx int32_t val4, int32_t val5, int32_t val6, int32_t val7, int32_t val8, int32_t val9, int32_t val10, int32_t val11, int32_t val12, int32_t val13, int32_t val14, int32_t val15) { + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); assert(len != Assembler::AVX_NoVec, "vector len must be specified"); __ emit_data(val0, relocInfo::none, 0); @@ -1135,6 +1375,9 @@ address StubGenerator::generate_vector_custom_i32(StubId stub_id, Assembler::Avx __ emit_data(val15, relocInfo::none, 0); } } + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -1156,8 +1399,14 @@ address StubGenerator::generate_vector_custom_i32(StubId stub_id, Assembler::Avx // * = popped on exit address StubGenerator::generate_verify_oop() { StubId stub_id = StubId::stubgen_verify_oop_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Label exit, error; @@ -1235,6 +1484,9 @@ address StubGenerator::generate_verify_oop() { __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); __ hlt(); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -1350,35 +1602,46 @@ void StubGenerator::restore_argument_regs(BasicType type) { address StubGenerator::generate_data_cache_writeback() { const Register src = c_rarg0; // source address - - __ align(CodeEntryAlignment); - StubId stub_id = StubId::stubgen_data_cache_writeback_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); __ cache_wb(Address(src, 0)); __ leave(); __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::generate_data_cache_writeback_sync() { const Register is_pre = c_rarg0; // pre or post sync - - __ align(CodeEntryAlignment); - StubId stub_id = StubId::stubgen_data_cache_writeback_sync_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); // pre wbsync is a no-op // post wbsync translates to an sfence Label skip; - address start = __ pc(); + start = __ pc(); __ enter(); __ cmpl(is_pre, 0); @@ -1388,6 +1651,9 @@ address StubGenerator::generate_data_cache_writeback_sync() { __ leave(); __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -1405,9 +1671,15 @@ address StubGenerator::generate_md5_implCompress(StubId stub_id) { default: ShouldNotReachHere(); } + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); const Register buf_param = r15; const Address state_param(rsp, 0 * wordSize); @@ -1437,30 +1709,51 @@ address StubGenerator::generate_md5_implCompress(StubId stub_id) { __ leave(); __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::generate_upper_word_mask() { - __ align64(); StubId stub_id = StubId::stubgen_upper_word_mask_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align64(); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ emit_data64(0x0000000000000000, relocInfo::none); __ emit_data64(0xFFFFFFFF00000000, relocInfo::none); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::generate_shuffle_byte_flip_mask() { - __ align64(); StubId stub_id = StubId::stubgen_shuffle_byte_flip_mask_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align64(); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); __ emit_data64(0x0001020304050607, relocInfo::none); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -1478,9 +1771,15 @@ address StubGenerator::generate_sha1_implCompress(StubId stub_id) { default: ShouldNotReachHere(); } + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Register buf = c_rarg0; Register state = c_rarg1; @@ -1509,15 +1808,32 @@ address StubGenerator::generate_sha1_implCompress(StubId stub_id) { __ leave(); __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } -address StubGenerator::generate_pshuffle_byte_flip_mask() { - __ align64(); +address StubGenerator::generate_pshuffle_byte_flip_mask(address& entry_00ba, address& entry_dc00) { StubId stub_id = StubId::stubgen_pshuffle_byte_flip_mask_id; + GrowableArray
entries; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 3, "sanity check"); + address start = load_archive_data(stub_id, &entries); + if (start != nullptr) { + assert(entries.length() == entry_count - 1, + "unexpected extra entry count %d", entries.length()); + entry_00ba = entries.at(0); + entry_dc00 = entries.at(1); + assert(VM_Version::supports_avx2() == (entry_00ba != nullptr && entry_dc00 != nullptr), + "entries cannot be null when avx2 is enabled"); + return start; + } + __ align64(); StubCodeMark mark(this, stub_id); - address start = __ pc(); - + start = __ pc(); + address entry2 = nullptr; + address entry3 = nullptr; __ emit_data64(0x0405060700010203, relocInfo::none); __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); @@ -1525,37 +1841,66 @@ address StubGenerator::generate_pshuffle_byte_flip_mask() { __ emit_data64(0x0405060700010203, relocInfo::none); // second copy __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); // _SHUF_00BA + entry2 = __ pc(); __ emit_data64(0x0b0a090803020100, relocInfo::none); __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); __ emit_data64(0x0b0a090803020100, relocInfo::none); __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); // _SHUF_DC00 + entry3 = __ pc(); __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); __ emit_data64(0x0b0a090803020100, relocInfo::none); __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); __ emit_data64(0x0b0a090803020100, relocInfo::none); } + // have to track the 2nd and 3rd entries even if they are null + entry_00ba = entry2; + entries.push(entry_00ba); + entry_dc00 = entry3; + entries.push(entry_dc00); + + // record the stub entry and end plus all the auxiliary entries + store_archive_data(stub_id, start, __ pc(), &entries); return start; } //Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. -address StubGenerator::generate_pshuffle_byte_flip_mask_sha512() { - __ align32(); +address StubGenerator::generate_pshuffle_byte_flip_mask_sha512(address& entry_ymm_lo) { StubId stub_id = StubId::stubgen_pshuffle_byte_flip_mask_sha512_id; + GrowableArray
entries; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 2, "sanity check"); + address start = load_archive_data(stub_id, &entries); + if (start != nullptr) { + assert(entries.length() == entry_count - 1, + "unexpected extra entry count %d", entries.length()); + entry_ymm_lo = entries.at(0); + assert(VM_Version::supports_avx2() == (entry_ymm_lo != nullptr), + "entry cannot be null when avx2 is enabled"); + return start; + } + __ align32(); StubCodeMark mark(this, stub_id); - address start = __ pc(); - + start = __ pc(); + address entry2 = nullptr; if (VM_Version::supports_avx2()) { __ emit_data64(0x0001020304050607, relocInfo::none); // PSHUFFLE_BYTE_FLIP_MASK __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); __ emit_data64(0x1011121314151617, relocInfo::none); __ emit_data64(0x18191a1b1c1d1e1f, relocInfo::none); + // capture 2nd entry + entry2 = __ pc(); __ emit_data64(0x0000000000000000, relocInfo::none); //MASK_YMM_LO __ emit_data64(0x0000000000000000, relocInfo::none); __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); } + // have to track the 2nd entry even if it is null + entry_ymm_lo = entry2; + entries.push(entry2); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc(), &entries); return start; } @@ -1575,9 +1920,15 @@ address StubGenerator::generate_sha256_implCompress(StubId stub_id) { ShouldNotReachHere(); } assert(VM_Version::supports_sha() || VM_Version::supports_avx2(), ""); + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Register buf = c_rarg0; Register state = c_rarg1; @@ -1612,6 +1963,9 @@ address StubGenerator::generate_sha256_implCompress(StubId stub_id) { __ leave(); __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -1629,9 +1983,15 @@ address StubGenerator::generate_sha512_implCompress(StubId stub_id) { } assert(VM_Version::supports_avx2(), ""); assert(VM_Version::supports_bmi2() || VM_Version::supports_sha512(), ""); + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Register buf = c_rarg0; Register state = c_rarg1; @@ -1660,14 +2020,23 @@ address StubGenerator::generate_sha512_implCompress(StubId stub_id) { __ leave(); __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::base64_shuffle_addr() { - __ align64(); StubId stub_id = StubId::stubgen_shuffle_base64_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align64(); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); assert(((unsigned long long)start & 0x3f) == 0, "Alignment problem (0x%08llx)", (unsigned long long)start); @@ -1680,42 +2049,69 @@ address StubGenerator::base64_shuffle_addr() { __ emit_data64(0x2829272825262425, relocInfo::none); __ emit_data64(0x2e2f2d2e2b2c2a2b, relocInfo::none); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::base64_avx2_shuffle_addr() { - __ align32(); StubId stub_id = StubId::stubgen_avx2_shuffle_base64_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align32(); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ emit_data64(0x0809070805060405, relocInfo::none); __ emit_data64(0x0e0f0d0e0b0c0a0b, relocInfo::none); __ emit_data64(0x0405030401020001, relocInfo::none); __ emit_data64(0x0a0b090a07080607, relocInfo::none); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::base64_avx2_input_mask_addr() { - __ align32(); StubId stub_id = StubId::stubgen_avx2_input_mask_base64_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align32(); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ emit_data64(0x8000000000000000, relocInfo::none); __ emit_data64(0x8000000080000000, relocInfo::none); __ emit_data64(0x8000000080000000, relocInfo::none); __ emit_data64(0x8000000080000000, relocInfo::none); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::base64_avx2_lut_addr() { - __ align32(); StubId stub_id = StubId::stubgen_avx2_lut_base64_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align32(); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ emit_data64(0xfcfcfcfcfcfc4741, relocInfo::none); __ emit_data64(0x0000f0edfcfcfcfc, relocInfo::none); @@ -1728,14 +2124,23 @@ address StubGenerator::base64_avx2_lut_addr() { __ emit_data64(0xfcfcfcfcfcfc4741, relocInfo::none); __ emit_data64(0x000020effcfcfcfc, relocInfo::none); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::base64_encoding_table_addr() { - __ align64(); StubId stub_id = StubId::stubgen_encoding_table_base64_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align64(); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); assert(((unsigned long long)start & 0x3f) == 0, "Alignment problem (0x%08llx)", (unsigned long long)start); __ emit_data64(0x4847464544434241, relocInfo::none); @@ -1757,6 +2162,9 @@ address StubGenerator::base64_encoding_table_addr() { __ emit_data64(0x333231307a797877, relocInfo::none); __ emit_data64(0x5f2d393837363534, relocInfo::none); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -1766,10 +2174,16 @@ address StubGenerator::base64_encoding_table_addr() { // boolean isURL) { address StubGenerator::generate_base64_encodeBlock() { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_base64_encodeBlock_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); @@ -2144,15 +2558,24 @@ address StubGenerator::generate_base64_encodeBlock() __ leave(); __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } // base64 AVX512vbmi tables address StubGenerator::base64_vbmi_lookup_lo_addr() { - __ align64(); StubId stub_id = StubId::stubgen_lookup_lo_base64_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align64(); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); assert(((unsigned long long)start & 0x3f) == 0, "Alignment problem (0x%08llx)", (unsigned long long)start); @@ -2165,14 +2588,23 @@ address StubGenerator::base64_vbmi_lookup_lo_addr() { __ emit_data64(0x3b3a393837363534, relocInfo::none); __ emit_data64(0x8080808080803d3c, relocInfo::none); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::base64_vbmi_lookup_hi_addr() { - __ align64(); StubId stub_id = StubId::stubgen_lookup_hi_base64_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align64(); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); assert(((unsigned long long)start & 0x3f) == 0, "Alignment problem (0x%08llx)", (unsigned long long)start); @@ -2185,13 +2617,22 @@ address StubGenerator::base64_vbmi_lookup_hi_addr() { __ emit_data64(0x302f2e2d2c2b2a29, relocInfo::none); __ emit_data64(0x8080808080333231, relocInfo::none); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::base64_vbmi_lookup_lo_url_addr() { - __ align64(); StubId stub_id = StubId::stubgen_lookup_lo_base64url_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align64(); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); assert(((unsigned long long)start & 0x3f) == 0, "Alignment problem (0x%08llx)", (unsigned long long)start); @@ -2204,14 +2645,23 @@ address StubGenerator::base64_vbmi_lookup_lo_url_addr() { __ emit_data64(0x3b3a393837363534, relocInfo::none); __ emit_data64(0x8080808080803d3c, relocInfo::none); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::base64_vbmi_lookup_hi_url_addr() { - __ align64(); StubId stub_id = StubId::stubgen_lookup_hi_base64url_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align64(); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); assert(((unsigned long long)start & 0x3f) == 0, "Alignment problem (0x%08llx)", (unsigned long long)start); @@ -2224,14 +2674,23 @@ address StubGenerator::base64_vbmi_lookup_hi_url_addr() { __ emit_data64(0x302f2e2d2c2b2a29, relocInfo::none); __ emit_data64(0x8080808080333231, relocInfo::none); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::base64_vbmi_pack_vec_addr() { - __ align64(); StubId stub_id = StubId::stubgen_pack_vec_base64_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align64(); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); assert(((unsigned long long)start & 0x3f) == 0, "Alignment problem (0x%08llx)", (unsigned long long)start); @@ -2244,14 +2703,23 @@ address StubGenerator::base64_vbmi_pack_vec_addr() { __ emit_data64(0x0000000000000000, relocInfo::none); __ emit_data64(0x0000000000000000, relocInfo::none); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::base64_vbmi_join_0_1_addr() { - __ align64(); StubId stub_id = StubId::stubgen_join_0_1_base64_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align64(); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); assert(((unsigned long long)start & 0x3f) == 0, "Alignment problem (0x%08llx)", (unsigned long long)start); @@ -2264,14 +2732,23 @@ address StubGenerator::base64_vbmi_join_0_1_addr() { __ emit_data64(0x494a444546404142, relocInfo::none); __ emit_data64(0x565051524c4d4e48, relocInfo::none); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::base64_vbmi_join_1_2_addr() { - __ align64(); StubId stub_id = StubId::stubgen_join_1_2_base64_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align64(); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); assert(((unsigned long long)start & 0x3f) == 0, "Alignment problem (0x%08llx)", (unsigned long long)start); @@ -2284,14 +2761,23 @@ address StubGenerator::base64_vbmi_join_1_2_addr() { __ emit_data64(0x5c5d5e58595a5455, relocInfo::none); __ emit_data64(0x696a646566606162, relocInfo::none); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::base64_vbmi_join_2_3_addr() { - __ align64(); StubId stub_id = StubId::stubgen_join_2_3_base64_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align64(); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); assert(((unsigned long long)start & 0x3f) == 0, "Alignment problem (0x%08llx)", (unsigned long long)start); @@ -2304,14 +2790,23 @@ address StubGenerator::base64_vbmi_join_2_3_addr() { __ emit_data64(0x767071726c6d6e68, relocInfo::none); __ emit_data64(0x7c7d7e78797a7475, relocInfo::none); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::base64_AVX2_decode_tables_addr() { - __ align64(); StubId stub_id = StubId::stubgen_avx2_decode_tables_base64_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align64(); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); assert(((unsigned long long)start & 0x3f) == 0, "Alignment problem (0x%08llx)", (unsigned long long)start); @@ -2339,14 +2834,23 @@ address StubGenerator::base64_AVX2_decode_tables_addr() { // merge multiplier __ emit_data(0x00011000, relocInfo::none, 0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::base64_AVX2_decode_LUT_tables_addr() { - __ align64(); StubId stub_id = StubId::stubgen_avx2_decode_lut_tables_base64_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align64(); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); assert(((unsigned long long)start & 0x3f) == 0, "Alignment problem (0x%08llx)", (unsigned long long)start); @@ -2380,13 +2884,22 @@ address StubGenerator::base64_AVX2_decode_LUT_tables_addr() { __ emit_data64(0x0804080402011010, relocInfo::none); __ emit_data64(0x1010101010101010, relocInfo::none); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::base64_decoding_table_addr() { StubId stub_id = StubId::stubgen_decoding_table_base64_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ emit_data64(0xffffffffffffffff, relocInfo::none); __ emit_data64(0xffffffffffffffff, relocInfo::none); @@ -2455,6 +2968,9 @@ address StubGenerator::base64_decoding_table_addr() { __ emit_data64(0xffffffffffffffff, relocInfo::none); __ emit_data64(0xffffffffffffffff, relocInfo::none); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -2466,10 +2982,16 @@ address StubGenerator::base64_decoding_table_addr() { // Intrinsic function prototype in Base64.java: // private void decodeBlock(byte[] src, int sp, int sl, byte[] dst, int dp, boolean isURL, isMIME) { address StubGenerator::generate_base64_decodeBlock() { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_base64_decodeBlock_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); @@ -2982,6 +3504,9 @@ address StubGenerator::generate_base64_decodeBlock() { __ leave(); __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -3000,11 +3525,17 @@ address StubGenerator::generate_base64_decodeBlock() { address StubGenerator::generate_updateBytesCRC32() { assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_updateBytesCRC32_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) @@ -3039,6 +3570,9 @@ address StubGenerator::generate_updateBytesCRC32() { __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -3057,10 +3591,16 @@ address StubGenerator::generate_updateBytesCRC32() { */ address StubGenerator::generate_updateBytesCRC32C(bool is_pclmulqdq_supported) { assert(UseCRC32CIntrinsics, "need SSE4_2"); - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_updateBytesCRC32C_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); //reg.arg int#0 int#1 int#2 int#3 int#4 int#5 float regs //Windows RCX RDX R8 R9 none none XMM0..XMM3 @@ -3120,6 +3660,9 @@ address StubGenerator::generate_updateBytesCRC32C(bool is_pclmulqdq_supported) { __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -3138,10 +3681,16 @@ address StubGenerator::generate_updateBytesCRC32C(bool is_pclmulqdq_supported) { * rsp+40 - z address */ address StubGenerator::generate_multiplyToLen() { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_multiplyToLen_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) @@ -3179,6 +3728,9 @@ address StubGenerator::generate_multiplyToLen() { __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -3195,10 +3747,16 @@ address StubGenerator::generate_multiplyToLen() { * rax - int >= mismatched index, < 0 bitwise complement of tail */ address StubGenerator::generate_vectorizedMismatch() { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_vectorizedMismatch_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); BLOCK_COMMENT("Entry:"); __ enter(); @@ -3232,6 +3790,9 @@ address StubGenerator::generate_vectorizedMismatch() { __ leave(); __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -3247,10 +3808,16 @@ address StubGenerator::generate_vectorizedMismatch() { */ address StubGenerator::generate_squareToLen() { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_squareToLen_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) // Unix: rdi, rsi, rdx, rcx (c_rarg0, c_rarg1, ...) @@ -3279,14 +3846,23 @@ address StubGenerator::generate_squareToLen() { __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::generate_method_entry_barrier() { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_method_entry_barrier_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Label deoptimize_label; @@ -3356,6 +3932,9 @@ address StubGenerator::generate_method_entry_barrier() { __ movptr(rsp, Address(rsp, 0)); // new rsp was written in the barrier __ jmp(Address(rsp, -1 * wordSize)); // jmp target should be callers verified_entry_point + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -3373,10 +3952,16 @@ address StubGenerator::generate_method_entry_barrier() { * rsp+40 - k */ address StubGenerator::generate_mulAdd() { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_mulAdd_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) @@ -3411,14 +3996,23 @@ address StubGenerator::generate_mulAdd() { __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::generate_bigIntegerRightShift() { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_bigIntegerRightShiftWorker_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Label Shift512Loop, ShiftTwo, ShiftTwoLoop, ShiftOne, Exit; // For Unix, the arguments are as follows: rdi, rsi, rdx, rcx, r8. @@ -3534,6 +4128,9 @@ address StubGenerator::generate_bigIntegerRightShift() { __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -3551,10 +4148,16 @@ address StubGenerator::generate_bigIntegerRightShift() { * rsp40 - numIter */ address StubGenerator::generate_bigIntegerLeftShift() { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_bigIntegerLeftShiftWorker_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Label Shift512Loop, ShiftTwo, ShiftTwoLoop, ShiftOne, Exit; // For Unix, the arguments are as follows: rdi, rsi, rdx, rcx, r8. @@ -3659,6 +4262,9 @@ address StubGenerator::generate_bigIntegerLeftShift() { __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -3708,9 +4314,15 @@ void StubGenerator::generate_libm_stubs() { */ address StubGenerator::generate_float16ToFloat() { StubId stub_id = StubId::stubgen_hf2f_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); BLOCK_COMMENT("Entry:"); // No need for RuntimeStub frame since it is called only during JIT compilation @@ -3720,6 +4332,9 @@ address StubGenerator::generate_float16ToFloat() { __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -3734,9 +4349,15 @@ address StubGenerator::generate_float16ToFloat() { */ address StubGenerator::generate_floatToFloat16() { StubId stub_id = StubId::stubgen_f2hf_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); BLOCK_COMMENT("Entry:"); // No need for RuntimeStub frame since it is called only during JIT compilation @@ -3746,6 +4367,9 @@ address StubGenerator::generate_floatToFloat16() { __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -3775,8 +4399,14 @@ address StubGenerator::generate_cont_thaw(StubId stub_id) { default: ShouldNotReachHere(); } + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); // TODO: Handle Valhalla return types. May require generating different return barriers. @@ -3889,6 +4519,9 @@ address StubGenerator::generate_cont_thaw(StubId stub_id) { __ ret(0); } + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -3909,8 +4542,14 @@ address StubGenerator::generate_cont_returnBarrier_exception() { address StubGenerator::generate_cont_preempt_stub() { if (!Continuations::enabled()) return nullptr; StubId stub_id = StubId::stubgen_cont_preempt_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ reset_last_Java_frame(true); @@ -3934,14 +4573,23 @@ address StubGenerator::generate_cont_preempt_stub() { __ movptr(rscratch1, ExternalAddress(ContinuationEntry::thaw_call_pc_address())); __ jmp(rscratch1); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } // exception handler for upcall stubs address StubGenerator::generate_upcall_stub_exception_handler() { StubId stub_id = StubId::stubgen_upcall_stub_exception_handler_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); // native caller has no idea how to handle exceptions // we just crash here. Up to callee to catch exceptions. @@ -3953,6 +4601,9 @@ address StubGenerator::generate_upcall_stub_exception_handler() { __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, UpcallLinker::handle_uncaught_exception))); __ should_not_reach_here(); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -3961,8 +4612,14 @@ address StubGenerator::generate_upcall_stub_exception_handler() { // rbx = result address StubGenerator::generate_upcall_stub_load_target() { StubId stub_id = StubId::stubgen_upcall_stub_load_target_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ resolve_global_jobject(j_rarg0, rscratch1); // Load target method from receiver @@ -3976,11 +4633,27 @@ address StubGenerator::generate_upcall_stub_load_target() { __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } void StubGenerator::generate_lookup_secondary_supers_table_stub() { StubId stub_id = StubId::stubgen_lookup_secondary_supers_table_id; + GrowableArray
entries; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == Klass::SECONDARY_SUPERS_TABLE_SIZE, "sanity check"); + address start = load_archive_data(stub_id, &entries); + if (start != nullptr) { + assert(entries.length() == Klass::SECONDARY_SUPERS_TABLE_SIZE - 1, + "unexpected extra entry count %d", entries.length()); + StubRoutines::_lookup_secondary_supers_table_stubs[0] = start; + for (int slot = 1; slot < Klass::SECONDARY_SUPERS_TABLE_SIZE; slot++) { + StubRoutines::_lookup_secondary_supers_table_stubs[slot] = entries.at(slot - 1); + } + return; + } StubCodeMark mark(this, stub_id); const Register @@ -3989,21 +4662,35 @@ void StubGenerator::generate_lookup_secondary_supers_table_stub() { result = rdi; for (int slot = 0; slot < Klass::SECONDARY_SUPERS_TABLE_SIZE; slot++) { - StubRoutines::_lookup_secondary_supers_table_stubs[slot] = __ pc(); + address next_entry = __ pc(); + if (slot == 0) { + start = next_entry; + } else { + entries.append(next_entry); + } + StubRoutines::_lookup_secondary_supers_table_stubs[slot] = next_entry; __ lookup_secondary_supers_table_const(r_sub_klass, r_super_klass, rdx, rcx, rbx, r11, // temps result, slot); __ ret(0); } + + // record the stub entry and end plus all the auxiliary entries + store_archive_data(stub_id, start, __ pc(), &entries); } // Slow path implementation for UseSecondarySupersTable. address StubGenerator::generate_lookup_secondary_supers_table_slow_path_stub() { StubId stub_id = StubId::stubgen_lookup_secondary_supers_table_slow_path_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); - - address start = __ pc(); + start = __ pc(); const Register r_super_klass = rax, @@ -4025,6 +4712,9 @@ address StubGenerator::generate_lookup_secondary_supers_table_slow_path_stub() { __ movl(result, 0); __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -4165,7 +4855,7 @@ void StubGenerator::generate_compiler_stubs() { StubRoutines::x86::_vector_short_shuffle_mask = generate_vector_mask(StubId::stubgen_vector_short_shuffle_mask_id, 0x0100010001000100); StubRoutines::x86::_vector_long_shuffle_mask = generate_vector_mask(StubId::stubgen_vector_long_shuffle_mask_id, 0x0000000100000000); StubRoutines::x86::_vector_long_sign_mask = generate_vector_mask(StubId::stubgen_vector_long_sign_mask_id, 0x8000000000000000); - StubRoutines::x86::_vector_iota_indices = generate_iota_indices(); + generate_iota_indices(); StubRoutines::x86::_vector_count_leading_zeros_lut = generate_count_leading_zeros_lut(); StubRoutines::x86::_vector_reverse_bit_lut = generate_vector_reverse_bit_lut(); StubRoutines::x86::_vector_reverse_byte_perm_mask_long = generate_vector_reverse_byte_perm_mask_long(); @@ -4232,6 +4922,8 @@ void StubGenerator::generate_compiler_stubs() { } if (UseSHA256Intrinsics) { + address entry2 = nullptr; + address entry3 = nullptr; StubRoutines::x86::_k256_adr = (address)StubRoutines::x86::_k256; char* dst = (char*)StubRoutines::x86::_k256_W; char* src = (char*)StubRoutines::x86::_k256; @@ -4240,14 +4932,18 @@ void StubGenerator::generate_compiler_stubs() { memcpy(dst + 32 * ii + 16, src + 16 * ii, 16); } StubRoutines::x86::_k256_W_adr = (address)StubRoutines::x86::_k256_W; - StubRoutines::x86::_pshuffle_byte_flip_mask_addr = generate_pshuffle_byte_flip_mask(); + StubRoutines::x86::_pshuffle_byte_flip_mask_addr = generate_pshuffle_byte_flip_mask(entry2, entry3); + StubRoutines::x86::_pshuffle_byte_flip_mask_00ba_addr = entry2; + StubRoutines::x86::_pshuffle_byte_flip_mask_dc00_addr = entry3; StubRoutines::_sha256_implCompress = generate_sha256_implCompress(StubId::stubgen_sha256_implCompress_id); StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(StubId::stubgen_sha256_implCompressMB_id); } if (UseSHA512Intrinsics) { + address entry2 = nullptr; StubRoutines::x86::_k512_W_addr = (address)StubRoutines::x86::_k512_W; - StubRoutines::x86::_pshuffle_byte_flip_mask_addr_sha512 = generate_pshuffle_byte_flip_mask_sha512(); + StubRoutines::x86::_pshuffle_byte_flip_mask_addr_sha512 = generate_pshuffle_byte_flip_mask_sha512(entry2); + StubRoutines::x86::_pshuffle_byte_flip_mask_ymm_lo_addr_sha512 = entry2; StubRoutines::_sha512_implCompress = generate_sha512_implCompress(StubId::stubgen_sha512_implCompress_id); StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(StubId::stubgen_sha512_implCompressMB_id); } @@ -4325,7 +5021,7 @@ void StubGenerator::generate_compiler_stubs() { #endif // COMPILER2_OR_JVMCI } -StubGenerator::StubGenerator(CodeBuffer* code, BlobId blob_id) : StubCodeGenerator(code, blob_id) { +StubGenerator::StubGenerator(CodeBuffer* code, BlobId blob_id, AOTStubData* stub_data) : StubCodeGenerator(code, blob_id, stub_data) { switch(blob_id) { case BlobId::stubgen_preuniverse_id: generate_preuniverse_stubs(); @@ -4348,8 +5044,35 @@ StubGenerator::StubGenerator(CodeBuffer* code, BlobId blob_id) : StubCodeGenerat }; } -void StubGenerator_generate(CodeBuffer* code, BlobId blob_id) { - StubGenerator g(code, blob_id); +#if INCLUDE_CDS +// publish addresses of static data defined in this file and in other +// stubgen stub generator files +void StubGenerator::init_AOTAddressTable(GrowableArray
& external_addresses) { + init_AOTAddressTable_adler(external_addresses); + init_AOTAddressTable_aes(external_addresses); + init_AOTAddressTable_cbrt(external_addresses); + init_AOTAddressTable_chacha(external_addresses); + // constants publishes for all of address use by cos and almost all of sin + init_AOTAddressTable_constants(external_addresses); + init_AOTAddressTable_dilithium(external_addresses); + init_AOTAddressTable_exp(external_addresses); + init_AOTAddressTable_fmod(external_addresses); + init_AOTAddressTable_ghash(external_addresses); + init_AOTAddressTable_kyber(external_addresses); + init_AOTAddressTable_log(external_addresses); + init_AOTAddressTable_poly1305(external_addresses); + init_AOTAddressTable_poly_mont(external_addresses); + init_AOTAddressTable_pow(external_addresses); + init_AOTAddressTable_sha3(external_addresses); + init_AOTAddressTable_sin(external_addresses); + init_AOTAddressTable_sinh(external_addresses); + init_AOTAddressTable_tan(external_addresses); + init_AOTAddressTable_tanh(external_addresses); +} +#endif // INCLUDE_CDS + +void StubGenerator_generate(CodeBuffer* code, BlobId blob_id, AOTStubData* stub_data) { + StubGenerator g(code, blob_id, stub_data); } #undef __ diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64.hpp b/src/hotspot/cpu/x86/stubGenerator_x86_64.hpp index 36315535d16..d3823cb559f 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64.hpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -84,7 +84,7 @@ class StubGenerator: public StubCodeGenerator { address generate_count_leading_zeros_lut(); address generate_popcount_avx_lut(); - address generate_iota_indices(); + void generate_iota_indices(); address generate_vector_reverse_bit_lut(); address generate_vector_reverse_byte_perm_mask_long(); @@ -166,12 +166,12 @@ class StubGenerator: public StubCodeGenerator { // - If target supports AVX3 features (BW+VL+F) then implementation uses 32 byte vectors (YMMs) // for both special cases (various small block sizes) and aligned copy loop. This is the // default configuration. - // - If copy length is above AVX3Threshold, then implementation use 64 byte vectors (ZMMs) + // - If copy length is above CopyAVX3Threshold, then implementation use 64 byte vectors (ZMMs) // for main copy loop (and subsequent tail) since bulk of the cycles will be consumed in it. // - If user forces MaxVectorSize=32 then above 4096 bytes its seen that REP MOVs shows a // better performance for disjoint copies. For conjoint/backward copy vector based // copy performs better. - // - If user sets AVX3Threshold=0, then special cases for small blocks sizes operate over + // - If user sets CopyAVX3Threshold=0, then special cases for small blocks sizes operate over // 64 byte vector registers (ZMMs). address generate_disjoint_copy_avx3_masked(StubId stub_id, address* entry); @@ -303,11 +303,11 @@ class StubGenerator: public StubCodeGenerator { address generate_sha512_implCompress(StubId stub_id); // Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. - address generate_pshuffle_byte_flip_mask_sha512(); + address generate_pshuffle_byte_flip_mask_sha512(address& entry_ymm_lo); address generate_upper_word_mask(); address generate_shuffle_byte_flip_mask(); - address generate_pshuffle_byte_flip_mask(); + address generate_pshuffle_byte_flip_mask(address& entry_00ba, address& entry_dc0); // AES intrinsic stubs @@ -330,6 +330,19 @@ class StubGenerator: public StubCodeGenerator { void aesecb_decrypt(Register source_addr, Register dest_addr, Register key, Register len); + // Shared implementation for ECB/AES Encrypt and Decrypt, which does 4 blocks + // in a loop at a time to hide instruction latency. Set is_encrypt=true for + // encryption, false for decryption. + address generate_electronicCodeBook_AESCrypt_Parallel(bool is_encrypt); + + // A version of ECB/AES Encrypt which does 4 blocks in a loop at a time + // to hide instruction latency + address generate_electronicCodeBook_encryptAESCrypt_Parallel(); + + // A version of ECB/AES Decrypt which does 4 blocks in a loop at a time + // to hide instruction latency + address generate_electronicCodeBook_decryptAESCrypt_Parallel(); + // Vector AES Galois Counter Mode implementation address generate_galoisCounterMode_AESCrypt(); void aesgcm_encrypt(Register in, Register len, Register ct, Register out, Register key, @@ -637,8 +650,33 @@ class StubGenerator: public StubCodeGenerator { void generate_compiler_stubs(); void generate_final_stubs(); +#if INCLUDE_CDS + static void init_AOTAddressTable_adler(GrowableArray
& external_addresses); + static void init_AOTAddressTable_aes(GrowableArray
& external_addresses); + static void init_AOTAddressTable_cbrt(GrowableArray
& external_addresses); + static void init_AOTAddressTable_chacha(GrowableArray
& external_addresses); + static void init_AOTAddressTable_constants(GrowableArray
& external_addresses); + static void init_AOTAddressTable_dilithium(GrowableArray
& external_addresses); + static void init_AOTAddressTable_exp(GrowableArray
& external_addresses); + static void init_AOTAddressTable_fmod(GrowableArray
& external_addresses); + static void init_AOTAddressTable_ghash(GrowableArray
& external_addresses); + static void init_AOTAddressTable_kyber(GrowableArray
& external_addresses); + static void init_AOTAddressTable_log(GrowableArray
& external_addresses); + static void init_AOTAddressTable_poly1305(GrowableArray
& external_addresses); + static void init_AOTAddressTable_poly_mont(GrowableArray
& external_addresses); + static void init_AOTAddressTable_pow(GrowableArray
& external_addresses); + static void init_AOTAddressTable_sha3(GrowableArray
& external_addresses); + static void init_AOTAddressTable_sin(GrowableArray
& external_addresses); + static void init_AOTAddressTable_sinh(GrowableArray
& external_addresses); + static void init_AOTAddressTable_tan(GrowableArray
& external_addresses); + static void init_AOTAddressTable_tanh(GrowableArray
& external_addresses); +#endif // INCLUDE_CDS + public: - StubGenerator(CodeBuffer* code, BlobId blob_id); + StubGenerator(CodeBuffer* code, BlobId blob_id, AOTStubData* stub_data); +#if INCLUDE_CDS + static void init_AOTAddressTable(GrowableArray
& external_addresses); +#endif // INCLUDE_CDS }; #endif // CPU_X86_STUBGENERATOR_X86_64_HPP diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_adler.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_adler.cpp index 2799997a761..a9424978e0e 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_adler.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_adler.cpp @@ -67,8 +67,14 @@ address StubGenerator::generate_updateBytesAdler32() { __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_updateBytesAdler32_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); // Choose an appropriate LIMIT for inner loop based on the granularity // of intermediate results. For int, LIMIT of 5552 will ensure intermediate @@ -144,7 +150,7 @@ address StubGenerator::generate_updateBytesAdler32() { __ align32(); if (VM_Version::supports_avx512vl()) { // AVX2 performs better for smaller inputs because of leaner post loop reduction sequence.. - __ cmpl(s, MAX2(128, VM_Version::avx3_threshold())); + __ cmpl(s, MAX2(128, CopyAVX3Threshold)); __ jcc(Assembler::belowEqual, SLOOP1A_AVX2); __ lea(end, Address(s, data, Address::times_1, - (2*CHUNKSIZE -1))); @@ -334,7 +340,19 @@ address StubGenerator::generate_updateBytesAdler32() { __ leave(); __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); return start; } #undef __ + +#if INCLUDE_CDS +void StubGenerator::init_AOTAddressTable_adler(GrowableArray
& external_addresses) { +#define ADD(addr) external_addresses.append((address)(addr)) + ADD(ADLER32_ASCALE_TABLE); + ADD(ADLER32_SHUF0_TABLE); + ADD(ADLER32_SHUF1_TABLE); +#undef ADD +} +#endif // INCLUDE_CDS diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_aes.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_aes.cpp index 24de32a6fe7..b95aa5f8818 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_aes.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_aes.cpp @@ -1,5 +1,5 @@ /* -* Copyright (c) 2019, 2025, Intel Corporation. All rights reserved. +* Copyright (c) 2019, 2026, Intel Corporation. All rights reserved. * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -218,7 +218,9 @@ void StubGenerator::generate_aes_stubs() { StubRoutines::_galoisCounterMode_AESCrypt = generate_galoisCounterMode_AESCrypt(); } else { StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel(); - if (VM_Version::supports_avx2()) { + StubRoutines::_electronicCodeBook_encryptAESCrypt = generate_electronicCodeBook_encryptAESCrypt_Parallel(); + StubRoutines::_electronicCodeBook_decryptAESCrypt = generate_electronicCodeBook_decryptAESCrypt_Parallel(); + if (VM_Version::supports_avx2() && VM_Version::supports_clmul()) { StubRoutines::_galoisCounterMode_AESCrypt = generate_avx2_galoisCounterMode_AESCrypt(); } } @@ -248,10 +250,16 @@ void StubGenerator::generate_aes_stubs() { // Output: // rax - number of processed bytes address StubGenerator::generate_galoisCounterMode_AESCrypt() { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_galoisCounterMode_AESCrypt_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); const Register in = c_rarg0; const Register len = c_rarg1; @@ -317,6 +325,9 @@ address StubGenerator::generate_galoisCounterMode_AESCrypt() { __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -335,10 +346,16 @@ address StubGenerator::generate_galoisCounterMode_AESCrypt() { // Output: // rax - number of processed bytes address StubGenerator::generate_avx2_galoisCounterMode_AESCrypt() { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_galoisCounterMode_AESCrypt_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); const Register in = c_rarg0; const Register len = c_rarg1; @@ -402,15 +419,24 @@ address StubGenerator::generate_avx2_galoisCounterMode_AESCrypt() { __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } // Vector AES Counter implementation address StubGenerator::generate_counterMode_VectorAESCrypt() { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_counterMode_AESCrypt_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); const Register from = c_rarg0; // source array address const Register to = c_rarg1; // destination array address @@ -469,6 +495,9 @@ address StubGenerator::generate_counterMode_VectorAESCrypt() { __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -496,10 +525,16 @@ address StubGenerator::generate_counterMode_VectorAESCrypt() { // address StubGenerator::generate_counterMode_AESCrypt_Parallel() { assert(UseAES, "need AES instructions and misaligned SSE support"); - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_counterMode_AESCrypt_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); const Register from = c_rarg0; // source array address const Register to = c_rarg1; // destination array address @@ -779,15 +814,24 @@ address StubGenerator::generate_counterMode_AESCrypt_Parallel() { __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::generate_cipherBlockChaining_decryptVectorAESCrypt() { assert(VM_Version::supports_avx512_vaes(), "need AES instructions and misaligned SSE support"); - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_cipherBlockChaining_decryptAESCrypt_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); const Register from = c_rarg0; // source array address const Register to = c_rarg1; // destination array address @@ -1055,6 +1099,9 @@ address StubGenerator::generate_cipherBlockChaining_decryptVectorAESCrypt() { __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -1067,11 +1114,17 @@ address StubGenerator::generate_cipherBlockChaining_decryptVectorAESCrypt() { // address StubGenerator::generate_aescrypt_encryptBlock() { assert(UseAES, "need AES instructions and misaligned SSE support"); - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_aescrypt_encryptBlock_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); Label L_doLast; - address start = __ pc(); + start = __ pc(); const Register from = c_rarg0; // source array address const Register to = c_rarg1; // destination array address @@ -1150,6 +1203,9 @@ address StubGenerator::generate_aescrypt_encryptBlock() { __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -1162,11 +1218,17 @@ address StubGenerator::generate_aescrypt_encryptBlock() { // address StubGenerator::generate_aescrypt_decryptBlock() { assert(UseAES, "need AES instructions and misaligned SSE support"); - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_aescrypt_decryptBlock_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); Label L_doLast; - address start = __ pc(); + start = __ pc(); const Register from = c_rarg0; // source array address const Register to = c_rarg1; // destination array address @@ -1246,6 +1308,9 @@ address StubGenerator::generate_aescrypt_decryptBlock() { __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -1264,10 +1329,16 @@ address StubGenerator::generate_aescrypt_decryptBlock() { // address StubGenerator::generate_cipherBlockChaining_encryptAESCrypt() { assert(UseAES, "need AES instructions and misaligned SSE support"); - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_cipherBlockChaining_encryptAESCrypt_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256; const Register from = c_rarg0; // source array address @@ -1396,9 +1467,213 @@ address StubGenerator::generate_cipherBlockChaining_encryptAESCrypt() { __ jcc(Assembler::notEqual, L_loopTop_256); __ jmp(L_exit); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } +// This is a version of ECB/AES Encrypt/Decrypt which does 4 blocks in a loop +// at a time to hide instruction latency. +// +// For encryption (is_encrypt=true): +// pxor key[0], aesenc key[1..rounds-1], aesenclast key[rounds] +// For decryption (is_encrypt=false): +// pxor key[1], aesdec key[2..rounds], aesdeclast key[0] +// +// Arguments: +// +// Inputs: +// c_rarg0 - source byte array address +// c_rarg1 - destination byte array address +// c_rarg2 - session key (Ke/Kd) in little endian int array +// c_rarg3 - input length (must be multiple of blocksize 16) +// +// Output: +// rax - input length +// +address StubGenerator::generate_electronicCodeBook_AESCrypt_Parallel(bool is_encrypt) { + assert(UseAES, "need AES instructions and misaligned SSE support"); + StubId stub_id = is_encrypt ? StubId::stubgen_electronicCodeBook_encryptAESCrypt_id + : StubId::stubgen_electronicCodeBook_decryptAESCrypt_id; + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); + StubCodeMark mark(this, stub_id); + start = __ pc(); + + const Register from = c_rarg0; // source array address + const Register to = c_rarg1; // destination array address + const Register key = c_rarg2; // key array address + const Register len_reg = c_rarg3; // src len (must be multiple of blocksize 16) + const Register pos = rax; + const Register keylen = r11; + + const XMMRegister xmm_result0 = xmm0; + const XMMRegister xmm_result1 = xmm1; + const XMMRegister xmm_result2 = xmm2; + const XMMRegister xmm_result3 = xmm3; + const XMMRegister xmm_key_shuf_mask = xmm4; + const XMMRegister xmm_key_tmp = xmm5; + // keys 0-9 pre-loaded into xmm6-xmm15 + const int XMM_REG_NUM_KEY_FIRST = 6; + const int XMM_REG_NUM_KEY_LAST = 15; + const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); + + // for key_128, key_192, key_256 + const int ROUNDS[3] = {10, 12, 14}; + + Label L_exit; + Label L_loop4[3], L_single[3], L_done[3]; + +#ifdef DoFour +#undef DoFour +#endif +#ifdef DoOne +#undef DoOne +#endif + +#define DoFour(opc, reg) \ +__ opc(xmm_result0, reg); \ +__ opc(xmm_result1, reg); \ +__ opc(xmm_result2, reg); \ +__ opc(xmm_result3, reg); + +#define DoOne(opc, reg) \ +__ opc(xmm_result0, reg); + + __ enter(); // required for proper stackwalking of RuntimeStub frame + __ push(len_reg); // save original length for return value + + __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); + + __ movdqu(xmm_key_shuf_mask, ExternalAddress(key_shuffle_mask_addr()), r10 /*rscratch*/); + // load up xmm regs 6 thru 15 with keys 0x00 - 0x90 + for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_LAST; rnum++, offset += 0x10) { + load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); + } + __ xorptr(pos, pos); + + // key length could be only {11, 13, 15} * 4 = {44, 52, 60} + __ cmpl(keylen, 52); + __ jcc(Assembler::equal, L_loop4[1]); + __ cmpl(keylen, 60); + __ jcc(Assembler::equal, L_loop4[2]); + + // k == 0: generate code for key_128 + // k == 1: generate code for key_192 + // k == 2: generate code for key_256 + for (int k = 0; k < 3; ++k) { + __ align(OptoLoopAlignment); + __ BIND(L_loop4[k]); + __ cmpptr(len_reg, 4 * AESBlockSize); + __ jcc(Assembler::less, L_single[k]); + + __ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); + __ movdqu(xmm_result1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); + __ movdqu(xmm_result2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); + __ movdqu(xmm_result3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); + + if (is_encrypt) { + DoFour(pxor, xmm_key_first); + for (int rnum = 1; rnum < 10; rnum++) { + DoFour(aesenc, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); + } + for (int i = 10; i < ROUNDS[k]; i++) { + load_key(xmm_key_tmp, key, i * 0x10, xmm_key_shuf_mask); + DoFour(aesenc, xmm_key_tmp); + } + load_key(xmm_key_tmp, key, ROUNDS[k] * 0x10, xmm_key_shuf_mask); + DoFour(aesenclast, xmm_key_tmp); + } else { + DoFour(pxor, as_XMMRegister(1 + XMM_REG_NUM_KEY_FIRST)); + for (int rnum = 2; rnum < 10; rnum++) { + DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); + } + for (int i = 10; i <= ROUNDS[k]; i++) { + load_key(xmm_key_tmp, key, i * 0x10, xmm_key_shuf_mask); + DoFour(aesdec, xmm_key_tmp); + } + DoFour(aesdeclast, xmm_key_first); + } + + __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); + __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); + __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); + __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); + + __ addptr(pos, 4 * AESBlockSize); + __ subptr(len_reg, 4 * AESBlockSize); + __ jmp(L_loop4[k]); + + __ align(OptoLoopAlignment); + __ BIND(L_single[k]); + __ cmpptr(len_reg, AESBlockSize); + __ jcc(Assembler::less, L_done[k]); + + __ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0)); + + if (is_encrypt) { + DoOne(pxor, xmm_key_first); + for (int rnum = 1; rnum < 10; rnum++) { + DoOne(aesenc, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); + } + for (int i = 10; i < ROUNDS[k]; i++) { + load_key(xmm_key_tmp, key, i * 0x10, xmm_key_shuf_mask); + DoOne(aesenc, xmm_key_tmp); + } + load_key(xmm_key_tmp, key, ROUNDS[k] * 0x10, xmm_key_shuf_mask); + DoOne(aesenclast, xmm_key_tmp); + } else { + DoOne(pxor, as_XMMRegister(1 + XMM_REG_NUM_KEY_FIRST)); + for (int rnum = 2; rnum < 10; rnum++) { + DoOne(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); + } + for (int i = 10; i <= ROUNDS[k]; i++) { + load_key(xmm_key_tmp, key, i * 0x10, xmm_key_shuf_mask); + DoOne(aesdec, xmm_key_tmp); + } + DoOne(aesdeclast, xmm_key_first); + } + + __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result0); + __ addptr(pos, AESBlockSize); + __ subptr(len_reg, AESBlockSize); + __ jmp(L_single[k]); + + __ BIND(L_done[k]); + if (k < 2) __ jmp(L_exit); + } //for key_128/192/256 + + __ BIND(L_exit); + // Clear all XMM registers holding sensitive key material before returning + __ pxor(xmm_key_tmp, xmm_key_tmp); + for (int rnum = XMM_REG_NUM_KEY_FIRST; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { + __ pxor(as_XMMRegister(rnum), as_XMMRegister(rnum)); + } + __ pop(rax); + __ leave(); // required for proper stackwalking of RuntimeStub frame + __ ret(0); + + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + + return start; + +#undef DoFour +#undef DoOne +} + +address StubGenerator::generate_electronicCodeBook_encryptAESCrypt_Parallel() { + return generate_electronicCodeBook_AESCrypt_Parallel(true); +} + +address StubGenerator::generate_electronicCodeBook_decryptAESCrypt_Parallel() { + return generate_electronicCodeBook_AESCrypt_Parallel(false); +} + // This is a version of CBC/AES Decrypt which does 4 blocks in a loop at a time // to hide instruction latency // @@ -1416,10 +1691,16 @@ address StubGenerator::generate_cipherBlockChaining_encryptAESCrypt() { // address StubGenerator::generate_cipherBlockChaining_decryptAESCrypt_Parallel() { assert(UseAES, "need AES instructions and misaligned SSE support"); - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_cipherBlockChaining_decryptAESCrypt_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); const Register from = c_rarg0; // source array address const Register to = c_rarg1; // destination array address @@ -1493,7 +1774,7 @@ address StubGenerator::generate_cipherBlockChaining_decryptAESCrypt_Parallel() { __ opc(xmm_result0, src_reg); \ __ opc(xmm_result1, src_reg); \ __ opc(xmm_result2, src_reg); \ -__ opc(xmm_result3, src_reg); \ +__ opc(xmm_result3, src_reg); for (int k = 0; k < 3; ++k) { __ BIND(L_multiBlock_loopTopHead[k]); @@ -1655,14 +1936,23 @@ __ opc(xmm_result3, src_reg); \ __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::generate_electronicCodeBook_encryptAESCrypt() { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_electronicCodeBook_encryptAESCrypt_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); const Register from = c_rarg0; // source array address const Register to = c_rarg1; // destination array address @@ -1676,14 +1966,23 @@ address StubGenerator::generate_electronicCodeBook_encryptAESCrypt() { __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } address StubGenerator::generate_electronicCodeBook_decryptAESCrypt() { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_electronicCodeBook_decryptAESCrypt_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); const Register from = c_rarg0; // source array address const Register to = c_rarg1; // destination array address @@ -1697,6 +1996,9 @@ address StubGenerator::generate_electronicCodeBook_decryptAESCrypt() { __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -4096,3 +4398,27 @@ void StubGenerator::aesgcm_avx2(Register in, Register len, Register ct, Register } #undef __ + +#if INCLUDE_CDS +void StubGenerator::init_AOTAddressTable_aes(GrowableArray
& external_addresses) { +#define ADD(addr) external_addresses.append((address)(addr)) + ADD(key_shuffle_mask_addr()); + ADD(counter_shuffle_mask_addr()); + ADD(counter_mask_linc0_addr()); + ADD(counter_mask_linc1_addr()); + ADD(counter_mask_linc1f_addr()); + ADD(counter_mask_linc2_addr()); + ADD(counter_mask_linc2f_addr()); + ADD(counter_mask_linc4_addr()); + ADD(counter_mask_linc8_addr()); + ADD(counter_mask_linc16_addr()); + ADD(counter_mask_linc32_addr()); + ADD(counter_mask_ones_addr()); + ADD(ghash_polynomial_reduction_addr()); + ADD(ghash_polynomial_two_one_addr()); + ADD(counter_mask_addbe_4444_addr()); + ADD(counter_mask_addbe_1234_addr()); + ADD(counter_mask_add_1234_addr()); +#undef ADD +} +#endif // INCLUDE_CDS diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_arraycopy.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_arraycopy.cpp index d53fafafdb4..5530e5325de 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_arraycopy.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_arraycopy.cpp @@ -511,12 +511,12 @@ void StubGenerator::copy_bytes_backward(Register from, Register dest, // - If target supports AVX3 features (BW+VL+F) then implementation uses 32 byte vectors (YMMs) // for both special cases (various small block sizes) and aligned copy loop. This is the // default configuration. -// - If copy length is above AVX3Threshold, then implementation use 64 byte vectors (ZMMs) +// - If copy length is above CopyAVX3Threshold, then implementation use 64 byte vectors (ZMMs) // for main copy loop (and subsequent tail) since bulk of the cycles will be consumed in it. // - If user forces MaxVectorSize=32 then above 4096 bytes its seen that REP MOVs shows a // better performance for disjoint copies. For conjoint/backward copy vector based // copy performs better. -// - If user sets AVX3Threshold=0, then special cases for small blocks sizes operate over +// - If user sets CopyAVX3Threshold=0, then special cases for small blocks sizes operate over // 64 byte vector registers (ZMMs). // Inputs: @@ -570,13 +570,47 @@ address StubGenerator::generate_disjoint_copy_avx3_masked(StubId stub_id, addres default: ShouldNotReachHere(); } + GrowableArray
entries; + GrowableArray
extras; + bool add_handlers = !is_oop && !aligned; + bool add_relocs = UseZGC && is_oop; + bool add_extras = add_handlers || add_relocs; + // The stub employs one unsafe handler region by default but has two + // when MaxVectorSize == 64 So we may expect 0, 3 or 6 extras. + int handlers_count = (MaxVectorSize == 64 ? 2 : 1); + int expected_entry_count = (entry != nullptr ? 2 : 1); + int expected_extra_count = (add_handlers ? handlers_count : 0) * UnsafeMemoryAccess::COLUMN_COUNT; // 0/1/2 x UMAM {start,end,handler} + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == expected_entry_count, "sanity check"); + GrowableArray
* entries_ptr = (entry_count == 1 ? nullptr : &entries); + GrowableArray
* extras_ptr = (add_extras ? &extras : nullptr); + address start = load_archive_data(stub_id, entries_ptr, extras_ptr); + if (start != nullptr) { + assert(entries.length() == expected_entry_count - 1, + "unexpected entry count %d", entries.length()); + assert(!add_handlers || extras.length() == expected_extra_count, + "unexpected handler addresses count %d", extras.length()); + if (entry != nullptr) { + *entry = entries.at(0); + } + if (add_handlers) { + // restore 1/2 x UMAM {start,end,handler} addresses from extras + register_unsafe_access_handlers(extras, 0, handlers_count); + } +#if INCLUDE_ZGC + // register addresses at which ZGC does colour patching + if (add_relocs) { + register_reloc_addresses(extras, 0, extras.length()); + } +#endif // INCLUDE_ZGC + return start; + } __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); - int avx3threshold = VM_Version::avx3_threshold(); - bool use64byteVector = (MaxVectorSize > 32) && (avx3threshold == 0); + bool use64byteVector = (MaxVectorSize > 32) && (CopyAVX3Threshold == 0); const int large_threshold = 2621440; // 2.5 MB Label L_main_loop, L_main_loop_64bytes, L_tail, L_tail64, L_exit, L_entry; Label L_repmovs, L_main_pre_loop, L_main_pre_loop_64bytes, L_pre_main_post_64; @@ -596,6 +630,7 @@ address StubGenerator::generate_disjoint_copy_avx3_masked(StubId stub_id, addres if (entry != nullptr) { *entry = __ pc(); + entries.append(*entry); // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) BLOCK_COMMENT("Entry:"); } @@ -621,7 +656,7 @@ address StubGenerator::generate_disjoint_copy_avx3_masked(StubId stub_id, addres int threshold[] = { 4096, 2048, 1024, 512}; // UnsafeMemoryAccess page error: continue after unsafe access - UnsafeMemoryAccessMark umam(this, !is_oop && !aligned, true); + UnsafeMemoryAccessMark umam(this, add_handlers, true); // 'from', 'to' and 'count' are now valid // temp1 holds remaining count and temp4 holds running count used to compute @@ -647,7 +682,7 @@ address StubGenerator::generate_disjoint_copy_avx3_masked(StubId stub_id, addres __ cmpq(temp2, large_threshold); __ jcc(Assembler::greaterEqual, L_copy_large); } - if (avx3threshold != 0) { + if (CopyAVX3Threshold != 0) { __ cmpq(count, threshold[shift]); if (MaxVectorSize == 64) { // Copy using 64 byte vectors. @@ -659,7 +694,7 @@ address StubGenerator::generate_disjoint_copy_avx3_masked(StubId stub_id, addres } } - if ((MaxVectorSize < 64) || (avx3threshold != 0)) { + if ((MaxVectorSize < 64) || (CopyAVX3Threshold != 0)) { // Partial copy to make dst address 32 byte aligned. __ movq(temp2, to); __ andq(temp2, 31); @@ -790,10 +825,28 @@ address StubGenerator::generate_disjoint_copy_avx3_masked(StubId stub_id, addres if (MaxVectorSize == 64) { __ BIND(L_copy_large); - UnsafeMemoryAccessMark umam(this, !is_oop && !aligned, false, ucme_exit_pc); + UnsafeMemoryAccessMark umam(this, add_handlers, false, ucme_exit_pc); arraycopy_avx3_large(to, from, temp1, temp2, temp3, temp4, count, xmm1, xmm2, xmm3, xmm4, shift); __ jmp(L_finish); } + // retrieve the registered handler addresses + address end = __ pc(); + if (add_handlers) { + retrieve_unsafe_access_handlers(start, end, extras); + } + assert(extras.length() == expected_extra_count, + "unexpected handler addresses count %d", extras.length()); +#if INCLUDE_ZGC + // retrieve addresses at which ZGC does colour patching + if (add_relocs) { + retrieve_reloc_addresses(start, end, extras); + } +#endif // INCLUDE_ZGC + + // record the stub entry and end plus the no_push entry and any + // extra handler addresses + store_archive_data(stub_id, start, end, entries_ptr, extras_ptr); + return start; } @@ -908,13 +961,43 @@ address StubGenerator::generate_conjoint_copy_avx3_masked(StubId stub_id, addres default: ShouldNotReachHere(); } - + GrowableArray
entries; + GrowableArray
extras; + bool add_handlers = !is_oop && !aligned; + bool add_relocs = UseZGC && is_oop; + bool add_extras = add_handlers || add_relocs; + int expected_entry_count = (entry != nullptr ? 2 : 1); + int expected_handler_count = (add_handlers ? 1 : 0) * UnsafeMemoryAccess::COLUMN_COUNT; // 0/1 x UMAM {start,end,handler} + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == expected_entry_count, "sanity check"); + GrowableArray
* entries_ptr = (entry_count == 1 ? nullptr : &entries); + GrowableArray
* extras_ptr = (add_extras ? &extras : nullptr); + address start = load_archive_data(stub_id, entries_ptr, extras_ptr); + if (start != nullptr) { + assert(entries.length() == expected_entry_count - 1, + "unexpected entry count %d", entries.length()); + assert(!add_handlers || extras.length() == expected_handler_count, + "unexpected handler addresses count %d", extras.length()); + if (entry != nullptr) { + *entry = entries.at(0); + } + if (add_handlers) { + // restore 1 x UMAM {start,end,handler} addresses from extras + register_unsafe_access_handlers(extras, 0, 1); + } +#if INCLUDE_ZGC + if (add_relocs) { + // register addresses at which ZGC does colour patching + register_reloc_addresses(extras, 0, extras.length()); + } +#endif // INCLUDE_ZGC + return start; + } __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); - int avx3threshold = VM_Version::avx3_threshold(); - bool use64byteVector = (MaxVectorSize > 32) && (avx3threshold == 0); + bool use64byteVector = (MaxVectorSize > 32) && (CopyAVX3Threshold == 0); Label L_main_pre_loop, L_main_pre_loop_64bytes, L_pre_main_post_64; Label L_main_loop, L_main_loop_64bytes, L_tail, L_tail64, L_exit, L_entry; @@ -933,6 +1016,7 @@ address StubGenerator::generate_conjoint_copy_avx3_masked(StubId stub_id, addres if (entry != nullptr) { *entry = __ pc(); + entries.append(*entry); // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) BLOCK_COMMENT("Entry:"); } @@ -959,7 +1043,7 @@ address StubGenerator::generate_conjoint_copy_avx3_masked(StubId stub_id, addres int threshold[] = { 4096, 2048, 1024, 512}; // UnsafeMemoryAccess page error: continue after unsafe access - UnsafeMemoryAccessMark umam(this, !is_oop && !aligned, true); + UnsafeMemoryAccessMark umam(this, add_handlers, true); // 'from', 'to' and 'count' are now valid // temp1 holds remaining count. @@ -979,12 +1063,12 @@ address StubGenerator::generate_conjoint_copy_avx3_masked(StubId stub_id, addres // PRE-MAIN-POST loop for aligned copy. __ BIND(L_entry); - if ((MaxVectorSize > 32) && (avx3threshold != 0)) { + if ((MaxVectorSize > 32) && (CopyAVX3Threshold != 0)) { __ cmpq(temp1, threshold[shift]); __ jcc(Assembler::greaterEqual, L_pre_main_post_64); } - if ((MaxVectorSize < 64) || (avx3threshold != 0)) { + if ((MaxVectorSize < 64) || (CopyAVX3Threshold != 0)) { // Partial copy to make dst address 32 byte aligned. __ leaq(temp2, Address(to, temp1, (Address::ScaleFactor)(shift), 0)); __ andq(temp2, 31); @@ -1073,6 +1157,23 @@ address StubGenerator::generate_conjoint_copy_avx3_masked(StubId stub_id, addres __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // retrieve the registered handler addresses + address end = __ pc(); + if (add_handlers) { + retrieve_unsafe_access_handlers(start, end, extras); + } + assert(extras.length() == expected_handler_count, + "unexpected handler addresses count %d", extras.length()); +#if INCLUDE_ZGC + // retrieve addresses at which ZGC does colour patching + if (add_relocs) { + retrieve_reloc_addresses(start, end, extras); + } +#endif // INCLUDE_ZGC + // record the stub entry and end plus the no_push entry and any + // extra handler addresses + store_archive_data(stub_id, start, end, entries_ptr, extras_ptr); + return start; } @@ -1199,7 +1300,7 @@ void StubGenerator::arraycopy_avx3_special_cases_conjoint(XMMRegister xmm, KRegi bool use64byteVector, Label& L_entry, Label& L_exit) { Label L_entry_64, L_entry_96, L_entry_128; Label L_entry_160, L_entry_192; - bool avx3 = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0); + bool avx3 = (MaxVectorSize > 32) && (CopyAVX3Threshold == 0); int size_mat[][6] = { /* T_BYTE */ {32 , 64, 96 , 128 , 160 , 192 }, @@ -1387,9 +1488,29 @@ address StubGenerator::generate_disjoint_byte_copy(address* entry) { return generate_disjoint_copy_avx3_masked(stub_id, entry); } #endif + GrowableArray
entries; + GrowableArray
extras; + int expected_entry_count = (entry != nullptr ? 2 : 1); + int expected_handler_count = (2 * UnsafeMemoryAccess::COLUMN_COUNT); // 2 x UMAM {start,end,handler} + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == expected_entry_count, "sanity check"); + GrowableArray
* entries_ptr = (entry_count == 1 ? nullptr : &entries); + address start = load_archive_data(stub_id, entries_ptr, &extras); + if (start != nullptr) { + assert(entries.length() == expected_entry_count - 1, + "unexpected entry count %d", entries.length()); + assert(extras.length() == expected_handler_count, + "unexpected handler addresses count %d", extras.length()); + if (entry != nullptr) { + *entry = entries.at(0); + } + // restore 2 UMAM {start,end,handler} addresses from extras + register_unsafe_access_handlers(extras, 0, 2); + return start; + } __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT; Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; @@ -1409,6 +1530,7 @@ address StubGenerator::generate_disjoint_byte_copy(address* entry) { if (entry != nullptr) { *entry = __ pc(); + entries.append(*entry); // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) BLOCK_COMMENT("Entry:"); } @@ -1478,6 +1600,17 @@ __ BIND(L_exit); copy_bytes_forward(end_from, end_to, qword_count, rax, r10, L_copy_bytes, L_copy_8_bytes, decorators, T_BYTE); __ jmp(L_copy_4_bytes); } + + // retrieve the registered handler addresses + address end = __ pc(); + retrieve_unsafe_access_handlers(start, end, extras); + assert(extras.length() == expected_handler_count, + "unexpected handler addresses count %d", extras.length()); + + // record the stub entry and end plus the no_push entry and any + // extra handler addresses + store_archive_data(stub_id, start, end, entries_ptr, &extras); + return start; } @@ -1505,9 +1638,29 @@ address StubGenerator::generate_conjoint_byte_copy(address nooverlap_target, add return generate_conjoint_copy_avx3_masked(stub_id, entry, nooverlap_target); } #endif + GrowableArray
entries; + GrowableArray
extras; + int expected_entry_count = (entry != nullptr ? 2 : 1); + int expected_handler_count = (2 * UnsafeMemoryAccess::COLUMN_COUNT); // 2 x UMAM {start,end,handler} + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == expected_entry_count, "sanity check"); + GrowableArray
* entries_ptr = (entry_count == 1 ? nullptr : &entries); + address start = load_archive_data(stub_id, entries_ptr, &extras); + if (start != nullptr) { + assert(entries.length() == expected_entry_count - 1, + "unexpected entry count %d", entries.length()); + assert(extras.length() == expected_handler_count, + "unexpected handler addresses count %d", extras.length()); + if (entry != nullptr) { + *entry = entries.at(0); + } + // restore 2 UMAM {start,end,handler} addresses from extras + register_unsafe_access_handlers(extras, 0, 2); + return start; + } __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); DecoratorSet decorators = IN_HEAP | IS_ARRAY; Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; @@ -1522,6 +1675,7 @@ address StubGenerator::generate_conjoint_byte_copy(address nooverlap_target, add if (entry != nullptr) { *entry = __ pc(); + entries.append(*entry); // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) BLOCK_COMMENT("Entry:"); } @@ -1588,6 +1742,16 @@ address StubGenerator::generate_conjoint_byte_copy(address nooverlap_target, add __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // retrieve the registered handler addresses + address end = __ pc(); + retrieve_unsafe_access_handlers(start, end, extras); + assert(extras.length() == expected_handler_count, + "unexpected handler addresses count %d", extras.length()); + + // record the stub entry and end plus the no_push entry and any + // extra handler addresses + store_archive_data(stub_id, start, end, entries_ptr, &extras); + return start; } @@ -1618,10 +1782,29 @@ address StubGenerator::generate_disjoint_short_copy(address *entry) { return generate_disjoint_copy_avx3_masked(stub_id, entry); } #endif - + GrowableArray
entries; + GrowableArray
extras; + int expected_entry_count = (entry != nullptr ? 2 : 1); + int expected_handler_count = (2 * UnsafeMemoryAccess::COLUMN_COUNT); // 2 x UMAM {start,end,handler} + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == expected_entry_count, "sanity check"); + GrowableArray
* entries_ptr = (entry_count == 1 ? nullptr : &entries); + address start = load_archive_data(stub_id, entries_ptr, &extras); + if (start != nullptr) { + assert(entries.length() == expected_entry_count - 1, + "unexpected entry count %d", entries.length()); + assert(extras.length() == expected_handler_count, + "unexpected handler addresses count %d", extras.length()); + if (entry != nullptr) { + *entry = entries.at(0); + } + // restore 2 UMAM {start,end,handler} addresses from extras + register_unsafe_access_handlers(extras, 0, 2); + return start; + } __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT; Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit; @@ -1640,6 +1823,7 @@ address StubGenerator::generate_disjoint_short_copy(address *entry) { if (entry != nullptr) { *entry = __ pc(); + entries.append(*entry); // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) BLOCK_COMMENT("Entry:"); } @@ -1703,6 +1887,16 @@ __ BIND(L_exit); __ jmp(L_copy_4_bytes); } + // retrieve the registered handler addresses + address end = __ pc(); + retrieve_unsafe_access_handlers(start, end, extras); + assert(extras.length() == expected_handler_count, + "unexpected handler addresses count %d", extras.length()); + + // record the stub entry and end plus the no_push entry and any + // extra handler addresses + store_archive_data(stub_id, start, end, entries_ptr, &extras); + return start; } @@ -1710,7 +1904,6 @@ __ BIND(L_exit); address StubGenerator::generate_fill(StubId stub_id) { BasicType t; bool aligned; - switch (stub_id) { case StubId::stubgen_jbyte_fill_id: t = T_BYTE; @@ -1739,10 +1932,27 @@ address StubGenerator::generate_fill(StubId stub_id) { default: ShouldNotReachHere(); } + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + GrowableArray
extras; + bool add_handlers = ((t == T_BYTE) && !aligned); + int handlers_count = (add_handlers ? 1 : 0); + int expected_extras_count = (handlers_count * UnsafeMemoryAccess::COLUMN_COUNT); // 0/1 x UMAM {start,end,handler} + GrowableArray
* extras_ptr = (add_handlers ? &extras : nullptr); + address start = load_archive_data(stub_id, nullptr, extras_ptr); + if (start != nullptr) { + assert(extras.length() == expected_extras_count, + "unexpected handler addresses count %d", extras.length()); + if (add_handlers) { + // restore 1 x UMAM {start,end,handler} addresses from extras + register_unsafe_access_handlers(extras, 0, 1); + } + return start; + } __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); BLOCK_COMMENT("Entry:"); @@ -1755,7 +1965,7 @@ address StubGenerator::generate_fill(StubId stub_id) { { // Add set memory mark to protect against unsafe accesses faulting - UnsafeMemoryAccessMark umam(this, ((t == T_BYTE) && !aligned), true); + UnsafeMemoryAccessMark umam(this, add_handlers, true); __ generate_fill(t, aligned, to, value, r11, rax, xmm0); } @@ -1763,6 +1973,15 @@ address StubGenerator::generate_fill(StubId stub_id) { __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + address end = __ pc(); + if (add_handlers) { + retrieve_unsafe_access_handlers(start, end, extras); + } + assert(extras.length() == expected_extras_count, + "unexpected handler addresses count %d", extras.length()); + // record the stub entry and end + store_archive_data(stub_id, start, end, nullptr, extras_ptr); + return start; } @@ -1790,10 +2009,29 @@ address StubGenerator::generate_conjoint_short_copy(address nooverlap_target, ad return generate_conjoint_copy_avx3_masked(stub_id, entry, nooverlap_target); } #endif - + GrowableArray
entries; + GrowableArray
extras; + int expected_entry_count = (entry != nullptr ? 2 : 1); + int expected_handler_count = (2 * UnsafeMemoryAccess::COLUMN_COUNT); // 2 x UMAM {start,end,handler} + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == expected_entry_count, "sanity check"); + GrowableArray
* entries_ptr = (entry_count == 1 ? nullptr : &entries); + address start = load_archive_data(stub_id, entries_ptr, &extras); + if (start != nullptr) { + assert(entries.length() == expected_entry_count - 1, + "unexpected entry count %d", entries.length()); + assert(extras.length() == expected_handler_count, + "unexpected handler addresses count %d", extras.length()); + if (entry != nullptr) { + *entry = entries.at(0); + } + // restore 2 UMAM {start,end,handler} addresses from extras + register_unsafe_access_handlers(extras, 0, 2); + return start; + } __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); DecoratorSet decorators = IN_HEAP | IS_ARRAY; Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes; @@ -1808,6 +2046,7 @@ address StubGenerator::generate_conjoint_short_copy(address nooverlap_target, ad if (entry != nullptr) { *entry = __ pc(); + entries.append(*entry); // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) BLOCK_COMMENT("Entry:"); } @@ -1866,6 +2105,16 @@ address StubGenerator::generate_conjoint_short_copy(address nooverlap_target, ad __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // retrieve the registered handler addresses + address end = __ pc(); + retrieve_unsafe_access_handlers(start, end, extras); + assert(extras.length() == expected_handler_count, + "unexpected handler addresses count %d", extras.length()); + + // record the stub entry and end plus the no_push entry and any + // extra handler addresses + store_archive_data(stub_id, start, end, entries_ptr, &extras); + return start; } @@ -1918,10 +2167,42 @@ address StubGenerator::generate_disjoint_int_oop_copy(StubId stub_id, address* e return generate_disjoint_copy_avx3_masked(stub_id, entry); } #endif + GrowableArray
entries; + GrowableArray
extras; + bool add_handlers = !is_oop && !aligned; + bool add_relocs = UseZGC && is_oop; + bool add_extras = add_handlers || add_relocs; + int expected_entry_count = (entry != nullptr ? 2 : 1); + int expected_handler_count = (add_handlers ? 2 : 0) * UnsafeMemoryAccess::COLUMN_COUNT; // 0/2 x UMAM {start,end,handler} + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == expected_entry_count, "sanity check"); + GrowableArray
* entries_ptr = (entry_count == 1 ? nullptr : &entries); + GrowableArray
* extras_ptr = (add_extras ? &extras : nullptr); + address start = load_archive_data(stub_id, entries_ptr, extras_ptr); + if (start != nullptr) { + assert(entries.length() == expected_entry_count - 1, + "unexpected entry count %d", entries.length()); + assert(!add_handlers || extras.length() == expected_handler_count, + "unexpected handler addresses count %d", extras.length()); + if (entry != nullptr) { + *entry = entries.at(0); + } + if (add_handlers) { + // restore 2 UMAM {start,end,handler} addresses from extras + register_unsafe_access_handlers(extras, 0, 2); + } +#if INCLUDE_ZGC + // register addresses at which ZGC does colour patching + if (add_relocs) { + register_reloc_addresses(extras, 0, extras.length()); + } +#endif // INCLUDE_ZGC + return start; + } __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit; const Register from = rdi; // source array address @@ -1939,6 +2220,7 @@ address StubGenerator::generate_disjoint_int_oop_copy(StubId stub_id, address* e if (entry != nullptr) { *entry = __ pc(); + entries.append(*entry); // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) BLOCK_COMMENT("Entry:"); } @@ -1959,7 +2241,7 @@ address StubGenerator::generate_disjoint_int_oop_copy(StubId stub_id, address* e { // UnsafeMemoryAccess page error: continue after unsafe access - UnsafeMemoryAccessMark umam(this, !is_oop && !aligned, true); + UnsafeMemoryAccessMark umam(this, add_handlers, true); // 'from', 'to' and 'count' are now valid __ movptr(dword_count, count); __ shrptr(count, 1); // count => qword_count @@ -1971,20 +2253,20 @@ address StubGenerator::generate_disjoint_int_oop_copy(StubId stub_id, address* e __ jmp(L_copy_bytes); // Copy trailing qwords - __ BIND(L_copy_8_bytes); + __ BIND(L_copy_8_bytes); __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); __ increment(qword_count); __ jcc(Assembler::notZero, L_copy_8_bytes); // Check for and copy trailing dword - __ BIND(L_copy_4_bytes); + __ BIND(L_copy_4_bytes); __ testl(dword_count, 1); // Only byte test since the value is 0 or 1 __ jccb(Assembler::zero, L_exit); __ movl(rax, Address(end_from, 8)); __ movl(Address(end_to, 8), rax); } -__ BIND(L_exit); + __ BIND(L_exit); address ucme_exit_pc = __ pc(); bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count); restore_arg_regs_using_thread(); @@ -1995,12 +2277,30 @@ __ BIND(L_exit); __ ret(0); { - UnsafeMemoryAccessMark umam(this, !is_oop && !aligned, false, ucme_exit_pc); + UnsafeMemoryAccessMark umam(this, add_handlers, false, ucme_exit_pc); // Copy in multi-bytes chunks copy_bytes_forward(end_from, end_to, qword_count, rax, r10, L_copy_bytes, L_copy_8_bytes, decorators, is_oop ? T_OBJECT : T_INT); __ jmp(L_copy_4_bytes); } + // retrieve the registered handler addresses + address end = __ pc(); + if (add_handlers) { + retrieve_unsafe_access_handlers(start, end, extras); + } + assert(extras.length() == expected_handler_count, + "unexpected handler addresses count %d", extras.length()); +#if INCLUDE_ZGC + // retrieve addresses at which ZGC does colour patching + if (add_relocs) { + retrieve_reloc_addresses(start, end, extras); + } +#endif // INCLUDE_ZGC + + // record the stub entry and end plus the no_push entry and any + // extra handler addresses + store_archive_data(stub_id, start, end, entries_ptr, extras_ptr); + return start; } @@ -2049,10 +2349,42 @@ address StubGenerator::generate_conjoint_int_oop_copy(StubId stub_id, address no return generate_conjoint_copy_avx3_masked(stub_id, entry, nooverlap_target); } #endif + bool add_handlers = !is_oop && !aligned; + bool add_relocs = UseZGC && is_oop; + bool add_extras = add_handlers || add_relocs; + GrowableArray
entries; + GrowableArray
extras; + int expected_entry_count = (entry != nullptr ? 2 : 1); + int expected_handler_count = (add_handlers ? 2 : 0) * UnsafeMemoryAccess::COLUMN_COUNT; // 0/2 x UMAM {start,end,handler} + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == expected_entry_count, "sanity check"); + GrowableArray
* entries_ptr = (entry_count == 1 ? nullptr : &entries); + GrowableArray
* extras_ptr = (add_extras ? &extras : nullptr); + address start = load_archive_data(stub_id, entries_ptr, extras_ptr); + if (start != nullptr) { + assert(entries.length() == expected_entry_count - 1, + "unexpected entry count %d", entries.length()); + assert(!add_handlers || extras.length() == expected_handler_count, + "unexpected handler addresses count %d", extras.length()); + if (entry != nullptr) { + *entry = entries.at(0); + } + if (add_handlers) { + // restore 2 UMAM {start,end,handler} addresses from extras + register_unsafe_access_handlers(extras, 0, 2); + } +#if INCLUDE_ZGC + // register addresses at which ZGC does colour patching + if (add_relocs) { + register_reloc_addresses(extras, 6, extras.length()); + } +#endif // INCLUDE_ZGC + return start; + } __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Label L_copy_bytes, L_copy_8_bytes, L_exit; const Register from = rdi; // source array address @@ -2066,7 +2398,8 @@ address StubGenerator::generate_conjoint_int_oop_copy(StubId stub_id, address no if (entry != nullptr) { *entry = __ pc(); - // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) + entries.append(*entry); + // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) BLOCK_COMMENT("Entry:"); } @@ -2089,7 +2422,7 @@ address StubGenerator::generate_conjoint_int_oop_copy(StubId stub_id, address no assert_clean_int(count, rax); // Make sure 'count' is clean int. { // UnsafeMemoryAccess page error: continue after unsafe access - UnsafeMemoryAccessMark umam(this, !is_oop && !aligned, true); + UnsafeMemoryAccessMark umam(this, add_handlers, true); // 'from', 'to' and 'count' are now valid __ movptr(dword_count, count); __ shrptr(count, 1); // count => qword_count @@ -2104,7 +2437,7 @@ address StubGenerator::generate_conjoint_int_oop_copy(StubId stub_id, address no __ jmp(L_copy_bytes); // Copy trailing qwords - __ BIND(L_copy_8_bytes); + __ BIND(L_copy_8_bytes); __ movq(rax, Address(from, qword_count, Address::times_8, -8)); __ movq(Address(to, qword_count, Address::times_8, -8), rax); __ decrement(qword_count); @@ -2122,12 +2455,12 @@ address StubGenerator::generate_conjoint_int_oop_copy(StubId stub_id, address no { // UnsafeMemoryAccess page error: continue after unsafe access - UnsafeMemoryAccessMark umam(this, !is_oop && !aligned, true); + UnsafeMemoryAccessMark umam(this, add_handlers, true); // Copy in multi-bytes chunks copy_bytes_backward(from, to, qword_count, rax, r10, L_copy_bytes, L_copy_8_bytes, decorators, is_oop ? T_OBJECT : T_INT); } -__ BIND(L_exit); + __ BIND(L_exit); bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count); restore_arg_regs_using_thread(); INC_COUNTER_NP(SharedRuntime::_jint_array_copy_ctr, rscratch1); // Update counter after rscratch1 is free @@ -2136,6 +2469,23 @@ __ BIND(L_exit); __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // retrieve the registered handler addresses + address end = __ pc(); + if (add_handlers) { + retrieve_unsafe_access_handlers(start, end, extras); + } + assert(extras.length() == expected_handler_count, + "unexpected handler addresses count %d", extras.length()); +#if INCLUDE_ZGC + // retrieve addresses at which ZGC does colour patching + if (add_relocs) { + retrieve_reloc_addresses(start, end, extras); + } +#endif // INCLUDE_ZGC + // record the stub entry and end plus the no_push entry and any + // extra handler addresses + store_archive_data(stub_id, start, end, entries_ptr, extras_ptr); + return start; } @@ -2182,10 +2532,42 @@ address StubGenerator::generate_disjoint_long_oop_copy(StubId stub_id, address * return generate_disjoint_copy_avx3_masked(stub_id, entry); } #endif + bool add_handlers = !is_oop && !aligned; + bool add_relocs = UseZGC && is_oop; + bool add_extras = add_handlers || add_relocs; + GrowableArray
entries; + GrowableArray
extras; + int expected_entry_count = (entry != nullptr ? 2 : 1); + int expected_handler_count = (add_handlers ? 2 : 0) * UnsafeMemoryAccess::COLUMN_COUNT; // 0/2 x UMAM {start,end,handler} + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == expected_entry_count, "sanity check"); + GrowableArray
* entries_ptr = (entry_count == 1 ? nullptr : &entries); + GrowableArray
* extras_ptr = (add_extras ? &extras : nullptr); + address start = load_archive_data(stub_id, entries_ptr, extras_ptr); + if (start != nullptr) { + assert(entries.length() == expected_entry_count - 1, + "unexpected entry count %d", entries.length()); + assert(!add_handlers || extras.length() == expected_handler_count, + "unexpected handler addresses count %d", extras.length()); + if (entry != nullptr) { + *entry = entries.at(0); + } + if (add_handlers) { + // restore 2 UMAM {start,end,handler} addresses from extras + register_unsafe_access_handlers(extras, 0, 2); + } +#if INCLUDE_ZGC + // register addresses at which ZGC does colour patching + if (add_relocs) { + register_reloc_addresses(extras, 0, extras.length()); + } +#endif // INCLUDE_ZGC + return start; + } __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Label L_copy_bytes, L_copy_8_bytes, L_exit; const Register from = rdi; // source array address @@ -2203,6 +2585,7 @@ address StubGenerator::generate_disjoint_long_oop_copy(StubId stub_id, address * if (entry != nullptr) { *entry = __ pc(); + entries.append(*entry); // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) BLOCK_COMMENT("Entry:"); } @@ -2223,7 +2606,7 @@ address StubGenerator::generate_disjoint_long_oop_copy(StubId stub_id, address * bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count); { // UnsafeMemoryAccess page error: continue after unsafe access - UnsafeMemoryAccessMark umam(this, !is_oop && !aligned, true); + UnsafeMemoryAccessMark umam(this, add_handlers, true); // Copy from low to high addresses. Use 'to' as scratch. __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); @@ -2255,7 +2638,7 @@ address StubGenerator::generate_disjoint_long_oop_copy(StubId stub_id, address * { // UnsafeMemoryAccess page error: continue after unsafe access - UnsafeMemoryAccessMark umam(this, !is_oop && !aligned, true); + UnsafeMemoryAccessMark umam(this, add_handlers, true); // Copy in multi-bytes chunks copy_bytes_forward(end_from, end_to, qword_count, rax, r10, L_copy_bytes, L_copy_8_bytes, decorators, is_oop ? T_OBJECT : T_LONG); } @@ -2271,6 +2654,23 @@ address StubGenerator::generate_disjoint_long_oop_copy(StubId stub_id, address * __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // retrieve the registered handler addresses + address end = __ pc(); + if (add_handlers) { + retrieve_unsafe_access_handlers(start, end, extras); + } + assert(extras.length() == expected_handler_count, + "unexpected handler addresses count %d", extras.length()); +#if INCLUDE_ZGC + // retrieve addresses at which ZGC does colour patching + if (add_relocs) { + retrieve_reloc_addresses(start, end, extras); + } +#endif // INCLUDE_ZGC + // record the stub entry and end plus the no_push entry and any + // extra handler addresses + store_archive_data(stub_id, start, end, entries_ptr, extras_ptr); + return start; } @@ -2315,10 +2715,42 @@ address StubGenerator::generate_conjoint_long_oop_copy(StubId stub_id, address n return generate_conjoint_copy_avx3_masked(stub_id, entry, nooverlap_target); } #endif + bool add_handlers = !is_oop && !aligned; + bool add_relocs = UseZGC && is_oop; + bool add_extras = add_handlers || add_relocs; + GrowableArray
entries; + GrowableArray
extras; + int expected_entry_count = (entry != nullptr ? 2 : 1); + int expected_handler_count = (add_handlers ? 2 : 0) * UnsafeMemoryAccess::COLUMN_COUNT; // 0/2 x UMAM {start,end,handler} + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == expected_entry_count, "sanity check"); + GrowableArray
* entries_ptr = (entry_count == 1 ? nullptr : &entries); + GrowableArray
* extras_ptr = (add_extras ? &extras : nullptr); + address start = load_archive_data(stub_id, entries_ptr, extras_ptr); + if (start != nullptr) { + assert(entries.length() == expected_entry_count - 1, + "unexpected entry count %d", entries.length()); + assert(!add_handlers || extras.length() == expected_handler_count, + "unexpected handler addresses count %d", extras.length()); + if (entry != nullptr) { + *entry = entries.at(0); + } + if (add_handlers) { + // restore 2 UMAM {start,end,handler} addresses from extras + register_unsafe_access_handlers(extras, 0, 2); + } +#if INCLUDE_ZGC + // register addresses at which ZGC does colour patching + if (add_relocs) { + register_reloc_addresses(extras, 0, extras.length()); + } +#endif // INCLUDE_ZGC + return start; + } __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Label L_copy_bytes, L_copy_8_bytes, L_exit; const Register from = rdi; // source array address @@ -2331,6 +2763,7 @@ address StubGenerator::generate_conjoint_long_oop_copy(StubId stub_id, address n if (entry != nullptr) { *entry = __ pc(); + entries.append(*entry); // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) BLOCK_COMMENT("Entry:"); } @@ -2352,7 +2785,7 @@ address StubGenerator::generate_conjoint_long_oop_copy(StubId stub_id, address n bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count); { // UnsafeMemoryAccess page error: continue after unsafe access - UnsafeMemoryAccessMark umam(this, !is_oop && !aligned, true); + UnsafeMemoryAccessMark umam(this, add_handlers, true); __ jmp(L_copy_bytes); @@ -2379,7 +2812,7 @@ address StubGenerator::generate_conjoint_long_oop_copy(StubId stub_id, address n } { // UnsafeMemoryAccess page error: continue after unsafe access - UnsafeMemoryAccessMark umam(this, !is_oop && !aligned, true); + UnsafeMemoryAccessMark umam(this, add_handlers, true); // Copy in multi-bytes chunks copy_bytes_backward(from, to, qword_count, rax, r10, L_copy_bytes, L_copy_8_bytes, decorators, is_oop ? T_OBJECT : T_LONG); @@ -2395,6 +2828,24 @@ address StubGenerator::generate_conjoint_long_oop_copy(StubId stub_id, address n __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + + // retrieve the registered handler addresses + address end = __ pc(); + if (add_handlers) { + retrieve_unsafe_access_handlers(start, end, extras); + } + assert(extras.length() == expected_handler_count, + "unexpected handler addresses count %d", extras.length()); +#if INCLUDE_ZGC + // retrieve addresses at which ZGC does colour patching + if ((UseZGC && is_oop)) { + retrieve_reloc_addresses(start, end, extras); + } +#endif // INCLUDE_ZGC + // record the stub entry and end plus the no_push entry and any + // extra handler addresses + store_archive_data(stub_id, start, end, entries_ptr, extras_ptr); + return start; } @@ -2450,6 +2901,28 @@ address StubGenerator::generate_checkcast_copy(StubId stub_id, address *entry) { ShouldNotReachHere(); } + GrowableArray
entries; + GrowableArray
extras; + int expected_entry_count = (entry != nullptr ? 2 : 1); + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == expected_entry_count, "sanity check"); + GrowableArray
* entries_ptr = (entry_count == 1 ? nullptr : &entries); + GrowableArray
* extras_ptr = (UseZGC ? &extras : nullptr); + address start = load_archive_data(stub_id, entries_ptr, extras_ptr); + if (start != nullptr) { + assert(entries.length() == expected_entry_count - 1, + "unexpected addresses count %d", entries.length()); + if (entry != nullptr) { + *entry = entries.at(0); + } +#if INCLUDE_ZGC + if (UseZGC) { + register_reloc_addresses(extras, 0, extras.length()); + } +#endif // INCLUDE_ZGC + return start; + } + Label L_load_element, L_store_element, L_do_card_marks, L_done; // Input registers (after setup_arg_regs) @@ -2479,7 +2952,7 @@ address StubGenerator::generate_checkcast_copy(StubId stub_id, address *entry) { __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); // required for proper stackwalking of RuntimeStub frame @@ -2504,6 +2977,7 @@ address StubGenerator::generate_checkcast_copy(StubId stub_id, address *entry) { // Caller of this entry point must set up the argument registers. if (entry != nullptr) { *entry = __ pc(); + entries.append(*entry); BLOCK_COMMENT("Entry:"); } @@ -2638,6 +3112,16 @@ address StubGenerator::generate_checkcast_copy(StubId stub_id, address *entry) { __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + address end = __ pc(); +#if INCLUDE_ZGC + // retrieve addresses at which ZGC does colour patching + if (UseZGC) { + retrieve_reloc_addresses(start, end, extras); + } +#endif // INCLUDE_ZGC + // record the stub entry and end plus the no_push entry + store_archive_data(stub_id, start, end, entries_ptr, extras_ptr); + return start; } @@ -2657,6 +3141,14 @@ address StubGenerator::generate_checkcast_copy(StubId stub_id, address *entry) { address StubGenerator::generate_unsafe_copy(address byte_copy_entry, address short_copy_entry, address int_copy_entry, address long_copy_entry) { + StubId stub_id = StubId::stubgen_unsafe_arraycopy_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + Label L_long_aligned, L_int_aligned, L_short_aligned; // Input registers (before setup_arg_regs) @@ -2668,9 +3160,8 @@ address StubGenerator::generate_unsafe_copy(address byte_copy_entry, address sho const Register bits = rax; // test copy of low bits __ align(CodeEntryAlignment); - StubId stub_id = StubId::stubgen_unsafe_arraycopy_id; StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); // required for proper stackwalking of RuntimeStub frame @@ -2702,6 +3193,9 @@ address StubGenerator::generate_unsafe_copy(address byte_copy_entry, address sho __ shrptr(size, LogBytesPerLong); // size => qword_count __ jump(RuntimeAddress(long_copy_entry)); + // record the stub entry and end plus + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -2803,10 +3297,23 @@ static void do_setmemory_atomic_loop(USM_TYPE type, Register dest, // to an int, short, or byte fill loop. // address StubGenerator::generate_unsafe_setmemory(address unsafe_byte_fill) { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_unsafe_setmemory_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + // we expect three set of extra unsafememory access handler entries + GrowableArray
extras; + int expected_handler_count = 3 * UnsafeMemoryAccess::COLUMN_COUNT; + address start = load_archive_data(stub_id, nullptr, &extras); + if (start != nullptr) { + assert(extras.length() == expected_handler_count, + "unexpected handler addresses count %d", extras.length()); + register_unsafe_access_handlers(extras, 0, 3); + return start; + } + + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); // required for proper stackwalking of RuntimeStub frame assert(unsafe_byte_fill != nullptr, "Invalid call"); @@ -2896,6 +3403,16 @@ address StubGenerator::generate_unsafe_setmemory(address unsafe_byte_fill) { __ jump(RuntimeAddress(unsafe_byte_fill)); } + // retrieve the registered handler addresses + address end = __ pc(); + retrieve_unsafe_access_handlers(start, end, extras); + assert(extras.length() == expected_handler_count, + "unexpected handler addresses count %d", extras.length()); + + // record the stub entry and end plus the no_push entry and any + // extra handler addresses + store_archive_data(stub_id, start, end, nullptr, &extras); + return start; } @@ -2952,7 +3469,15 @@ address StubGenerator::generate_generic_copy(address byte_copy_entry, address sh address int_copy_entry, address oop_copy_entry, address long_copy_entry, address checkcast_copy_entry) { - Label L_failed, L_failed_0, L_objArray; + StubId stub_id = StubId::stubgen_generic_arraycopy_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + + Label L_failed, L_failed_0, L_skip_failed_0, L_objArray; Label L_copy_shorts, L_copy_ints, L_copy_longs; // Input registers @@ -2968,22 +3493,9 @@ address StubGenerator::generate_generic_copy(address byte_copy_entry, address sh const Register rklass_tmp = rdi; // load_klass #endif - { int modulus = CodeEntryAlignment; - int target = modulus - 5; // 5 = sizeof jmp(L_failed) - int advance = target - (__ offset() % modulus); - if (advance < 0) advance += modulus; - if (advance > 0) __ nop(advance); - } - StubId stub_id = StubId::stubgen_generic_arraycopy_id; StubCodeMark mark(this, stub_id); - - // Short-hop target to L_failed. Makes for denser prologue code. - __ BIND(L_failed_0); - __ jmp(L_failed); - assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed"); - __ align(CodeEntryAlignment); - address start = __ pc(); + start = __ pc(); __ enter(); // required for proper stackwalking of RuntimeStub frame @@ -3024,7 +3536,8 @@ address StubGenerator::generate_generic_copy(address byte_copy_entry, address sh // if (dst_pos < 0) return -1; __ testl(dst_pos, dst_pos); // dst_pos (32-bits) size_t j4off = __ offset(); - __ jccb(Assembler::negative, L_failed_0); + // skip over the failure trampoline + __ jccb(Assembler::positive, L_skip_failed_0); // The first four tests are very dense code, // but not quite dense enough to put four @@ -3034,6 +3547,13 @@ address StubGenerator::generate_generic_copy(address byte_copy_entry, address sh // Make sure of this. guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps"); + // Short-hop target to L_failed. Makes for denser prologue code. + __ BIND(L_failed_0); + __ jmp(L_failed); + + // continue here if first 4 checks pass + __ bind(L_skip_failed_0); + // registers used as temp const Register r11_length = r11; // elements count to copy const Register r10_src_klass = r10; // array klass @@ -3256,6 +3776,9 @@ __ BIND(L_failed); __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_cbrt.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_cbrt.cpp index 73330dedc0f..4c647b7d9dc 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_cbrt.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_cbrt.cpp @@ -191,8 +191,14 @@ ATTRIBUTE_ALIGNED(4) static const juint _D_table[] = address StubGenerator::generate_libmCbrt() { StubId stub_id = StubId::stubgen_dcbrt_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Label L_2TAG_PACKET_0_0_1, L_2TAG_PACKET_1_0_1, L_2TAG_PACKET_2_0_1; Label B1_1, B1_2, B1_4; @@ -335,7 +341,34 @@ address StubGenerator::generate_libmCbrt() { __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } #undef __ + +#if INCLUDE_CDS +void StubGenerator::init_AOTAddressTable_cbrt(GrowableArray
& external_addresses) { +#define ADD(addr) external_addresses.append((address)(addr)) + ADD(_ABS_MASK); + ADD(_SIG_MASK); + ADD(_EXP_MASK); + ADD(_EXP_MSK2); + ADD(_EXP_MSK3); + ADD(_SCALE63); + ADD(_ZERON); + ADD(_INF); + ADD(_NEG_INF); + address coeff_table = (address)_coeff_table; + ADD(coeff_table); + ADD(coeff_table + 16); + ADD(coeff_table + 32); + ADD(coeff_table + 48); + ADD(_rcp_table); + ADD(_cbrt_table); + ADD(_D_table); +#undef ADD +} +#endif // INCLUDE_CDS diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_chacha.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_chacha.cpp index 7afaf34e031..1fa51cd2f18 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_chacha.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_chacha.cpp @@ -111,10 +111,16 @@ void StubGenerator::generate_chacha_stubs() { /* The 2-block AVX/AVX2-enabled ChaCha20 block function implementation */ address StubGenerator::generate_chacha20Block_avx() { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_chacha20Block_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Label L_twoRounds; const Register state = c_rarg0; @@ -295,15 +301,25 @@ address StubGenerator::generate_chacha20Block_avx() { } __ leave(); __ ret(0); + + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } /* The 4-block AVX512-enabled ChaCha20 block function implementation */ address StubGenerator::generate_chacha20Block_avx512() { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_chacha20Block_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Label L_twoRounds; const Register state = c_rarg0; @@ -466,6 +482,10 @@ address StubGenerator::generate_chacha20Block_avx512() { __ vzeroupper(); __ leave(); __ ret(0); + + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -584,3 +604,13 @@ bVec, } #undef __ + +#if INCLUDE_CDS +void StubGenerator::init_AOTAddressTable_chacha(GrowableArray
& external_addresses) { +#define ADD(addr) external_addresses.append((address)(addr)) + ADD(CC20_COUNTER_ADD_AVX); + ADD(CC20_COUNTER_ADD_AVX512); + ADD(CC20_LROT_CONSTS); +#undef ADD +} +#endif // INCLUDE_CDS diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_constants.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_constants.cpp index 93fa7e650db..19e1ca680b3 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_constants.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_constants.cpp @@ -233,3 +233,30 @@ ATTRIBUTE_ALIGNED(16) static const juint _Ctable[] = { }; address StubGenerator::Ctable = (address)_Ctable; +#if INCLUDE_CDS +void StubGenerator::init_AOTAddressTable_constants(GrowableArray
& external_addresses) { +#define ADD(addr) external_addresses.append((address)(addr)) + ADD(_ONE); + ADD(_ONEHALF); + ADD(_SIGN_MASK); + ADD(_TWO_POW_55); + ADD(_TWO_POW_M55); + ADD(_SHIFTER); + ADD(_ZERO); + ADD(_SC_1); + ADD(_SC_2); + ADD(_SC_3); + ADD(_SC_4); + // Use value which was already cast to (address): StubGenerator::PI_4; + ADD(PI_4); + ADD(PI_4 + 8); + ADD(_PI32INV); + ADD(_NEG_ZERO); + ADD(_P_1); + ADD(_P_2); + ADD(_P_3); + ADD(_PI_INV_TABLE); + ADD(_Ctable); +#undef ADD +} +#endif // INCLUDE_CDS diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_cos.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_cos.cpp index 8cb6ead21fd..8dedd50cd97 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_cos.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_cos.cpp @@ -174,8 +174,14 @@ address StubGenerator::generate_libmCos() { StubId stub_id = StubId::stubgen_dcos_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Label L_2TAG_PACKET_0_0_1, L_2TAG_PACKET_1_0_1, L_2TAG_PACKET_2_0_1, L_2TAG_PACKET_3_0_1; Label L_2TAG_PACKET_4_0_1, L_2TAG_PACKET_5_0_1, L_2TAG_PACKET_6_0_1, L_2TAG_PACKET_7_0_1; @@ -619,6 +625,9 @@ address StubGenerator::generate_libmCos() { __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_dilithium.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_dilithium.cpp index b9590939468..de8f52a3c05 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_dilithium.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_dilithium.cpp @@ -401,10 +401,16 @@ static void storeXmms(Register destination, int offset, const XMMRegister xmmReg // static address generate_dilithiumAlmostNtt_avx(StubGenerator *stubgen, int vector_len, MacroAssembler *_masm) { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_dilithiumAlmostNtt_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = stubgen->load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(stubgen, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); const Register coeffs = c_rarg0; @@ -646,6 +652,9 @@ static address generate_dilithiumAlmostNtt_avx(StubGenerator *stubgen, __ mov64(rax, 0); // return 0 __ ret(0); + // record the stub entry and end + stubgen->store_archive_data(stub_id, start, __ pc()); + return start; } @@ -657,10 +666,16 @@ static address generate_dilithiumAlmostNtt_avx(StubGenerator *stubgen, // zetas (int[128*8]) = c_rarg1 static address generate_dilithiumAlmostInverseNtt_avx(StubGenerator *stubgen, int vector_len, MacroAssembler *_masm) { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_dilithiumAlmostInverseNtt_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = stubgen->load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(stubgen, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); const Register coeffs = c_rarg0; @@ -886,6 +901,9 @@ static address generate_dilithiumAlmostInverseNtt_avx(StubGenerator *stubgen, __ mov64(rax, 0); // return 0 __ ret(0); + // record the stub entry and end + stubgen->store_archive_data(stub_id, start, __ pc()); + return start; } @@ -900,10 +918,16 @@ static address generate_dilithiumAlmostInverseNtt_avx(StubGenerator *stubgen, static address generate_dilithiumNttMult_avx(StubGenerator *stubgen, int vector_len, MacroAssembler *_masm) { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_dilithiumNttMult_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = stubgen->load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(stubgen, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); Label L_loop; @@ -972,6 +996,9 @@ static address generate_dilithiumNttMult_avx(StubGenerator *stubgen, __ mov64(rax, 0); // return 0 __ ret(0); + // record the stub entry and end + stubgen->store_archive_data(stub_id, start, __ pc()); + return start; } @@ -984,10 +1011,16 @@ static address generate_dilithiumNttMult_avx(StubGenerator *stubgen, static address generate_dilithiumMontMulByConstant_avx(StubGenerator *stubgen, int vector_len, MacroAssembler *_masm) { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_dilithiumMontMulByConstant_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = stubgen->load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(stubgen, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); Label L_loop; @@ -1059,6 +1092,9 @@ static address generate_dilithiumMontMulByConstant_avx(StubGenerator *stubgen, __ mov64(rax, 0); // return 0 __ ret(0); + // record the stub entry and end + stubgen->store_archive_data(stub_id, start, __ pc()); + return start; } @@ -1073,10 +1109,16 @@ static address generate_dilithiumMontMulByConstant_avx(StubGenerator *stubgen, // multiplier (int) = c_rarg4 static address generate_dilithiumDecomposePoly_avx(StubGenerator *stubgen, int vector_len, MacroAssembler *_masm) { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_dilithiumDecomposePoly_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = stubgen->load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(stubgen, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); Label L_loop; @@ -1318,6 +1360,9 @@ static address generate_dilithiumDecomposePoly_avx(StubGenerator *stubgen, __ mov64(rax, 0); // return 0 __ ret(0); + // record the stub entry and end + stubgen->store_archive_data(stub_id, start, __ pc()); + return start; } @@ -1340,3 +1385,21 @@ void StubGenerator::generate_dilithium_stubs() { generate_dilithiumDecomposePoly_avx(this, vector_len, _masm); } } + +#if INCLUDE_CDS +void StubGenerator::init_AOTAddressTable_dilithium(GrowableArray
& external_addresses) { +#define ADD(addr) external_addresses.append((address)(addr)) + // use accessors to correctly identify the relevant addresses + ADD(unshufflePermsAddr(0)); + ADD(unshufflePermsAddr(1)); + ADD(unshufflePermsAddr(2)); + ADD(unshufflePermsAddr(3)); + ADD(unshufflePermsAddr(4)); + ADD(unshufflePermsAddr(5)); + ADD(dilithiumAvx512ConstsAddr(montQInvModRIdx)); + ADD(dilithiumAvx512ConstsAddr(dilithium_qIdx)); + ADD(dilithiumAvx512ConstsAddr(montRSquareModQIdx)); + ADD(dilithiumAvx512ConstsAddr(barrettAddendIdx)); +#undef ADD +} +#endif // INCLUDE_CDS diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_exp.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_exp.cpp index 5130fd2c9d2..3c8babcbecf 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_exp.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_exp.cpp @@ -166,8 +166,14 @@ ATTRIBUTE_ALIGNED(4) static const juint _INF[] = address StubGenerator::generate_libmExp() { StubId stub_id = StubId::stubgen_dexp_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Label L_2TAG_PACKET_0_0_2, L_2TAG_PACKET_1_0_2, L_2TAG_PACKET_2_0_2, L_2TAG_PACKET_3_0_2; Label L_2TAG_PACKET_4_0_2, L_2TAG_PACKET_5_0_2, L_2TAG_PACKET_6_0_2, L_2TAG_PACKET_7_0_2; @@ -381,7 +387,32 @@ address StubGenerator::generate_libmExp() { __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } #undef __ + +#if INCLUDE_CDS +void StubGenerator::init_AOTAddressTable_exp(GrowableArray
& external_addresses) { +#define ADD(addr) external_addresses.append((address)(addr)); + address cv = (address)_cv; + ADD(cv); + ADD(cv + 16); + ADD(cv + 32); + ADD(cv + 48); + ADD(cv + 64); + ADD(cv + 80); + ADD(_mmask); + ADD(_bias); + ADD(_Tbl_addr); + ADD(_ALLONES); + ADD(_ebias); + ADD(_XMAX); + ADD(_XMIN); + ADD(_INF); +#undef ADD +} +#endif // INCLUDE_CDS diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_fmod.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_fmod.cpp index b1eaa4b8031..f53985a13b7 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_fmod.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_fmod.cpp @@ -72,13 +72,19 @@ ATTRIBUTE_ALIGNED(32) static const uint64_t CONST_e307[] = { }; address StubGenerator::generate_libmFmod() { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_fmod_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); // required for proper stackwalking of RuntimeStub frame - if (VM_Version::supports_avx512vlbwdq()) { // AVX512 version + if (VM_Version::supports_avx512vlbwdq() && VM_Version::supports_fma()) { // AVX512 version // Source used to generate the AVX512 fmod assembly below: // @@ -521,7 +527,22 @@ address StubGenerator::generate_libmFmod() { __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } #undef __ + +#if INCLUDE_CDS +void StubGenerator::init_AOTAddressTable_fmod(GrowableArray
& external_addresses) { +#define ADD(addr) external_addresses.append((address)(addr)); + ADD(CONST_NaN); + ADD(CONST_1p260); + ADD(CONST_MAX); + ADD(CONST_INF); + ADD(CONST_e307); +#undef ADD +} +#endif // INCLUDE_CDS diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_ghash.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_ghash.cpp index 6f05b1ab5e6..9ebab07589e 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_ghash.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_ghash.cpp @@ -80,11 +80,17 @@ void StubGenerator::generate_ghash_stubs() { // Single and multi-block ghash operations. address StubGenerator::generate_ghash_processBlocks() { - __ align(CodeEntryAlignment); - Label L_ghash_loop, L_exit; StubId stub_id = StubId::stubgen_ghash_processBlocks_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + Label L_ghash_loop, L_exit; + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); const Register state = c_rarg0; const Register subkeyH = c_rarg1; @@ -211,17 +217,25 @@ address StubGenerator::generate_ghash_processBlocks() { __ leave(); __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } // Ghash single and multi block operations using AVX instructions address StubGenerator::generate_avx_ghash_processBlocks() { - __ align(CodeEntryAlignment); - StubId stub_id = StubId::stubgen_ghash_processBlocks_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); // arguments const Register state = c_rarg0; @@ -237,6 +251,9 @@ address StubGenerator::generate_avx_ghash_processBlocks() { __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -538,3 +555,14 @@ void StubGenerator::generateHtbl_eight_blocks(Register htbl) { } #undef __ + +#if INCLUDE_CDS +void StubGenerator::init_AOTAddressTable_ghash(GrowableArray
& external_addresses) { +#define ADD(addr) external_addresses.append((address)(addr)); + ADD(GHASH_SHUFFLE_MASK); + ADD(GHASH_LONG_SWAP_MASK); + ADD(GHASH_BYTE_SWAP_MASK); + ADD(GHASH_POLYNOMIAL); +#undef ADD +} +#endif // INCLUDE_CDS diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_kyber.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_kyber.cpp index 7d5dee6a5df..347a9b936a8 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_kyber.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_kyber.cpp @@ -400,10 +400,16 @@ static int xmm29_29[] = {29, 29, 29, 29}; // ntt_zetas (short[256]) = c_rarg1 address generate_kyberNtt_avx512(StubGenerator *stubgen, MacroAssembler *_masm) { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_kyberNtt_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = stubgen->load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(stubgen, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); const Register coeffs = c_rarg0; @@ -487,6 +493,9 @@ address generate_kyberNtt_avx512(StubGenerator *stubgen, __ mov64(rax, 0); // return 0 __ ret(0); + // record the stub entry and end + stubgen->store_archive_data(stub_id, start, __ pc()); + return start; } @@ -496,11 +505,16 @@ address generate_kyberNtt_avx512(StubGenerator *stubgen, // ntt_zetas (short[256]) = c_rarg1 address generate_kyberInverseNtt_avx512(StubGenerator *stubgen, MacroAssembler *_masm) { - - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_kyberInverseNtt_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = stubgen->load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(stubgen, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); const Register coeffs = c_rarg0; @@ -610,6 +624,9 @@ address generate_kyberInverseNtt_avx512(StubGenerator *stubgen, __ mov64(rax, 0); // return 0 __ ret(0); + // record the stub entry and end + stubgen->store_archive_data(stub_id, start, __ pc()); + return start; } @@ -621,11 +638,16 @@ address generate_kyberInverseNtt_avx512(StubGenerator *stubgen, // zetas (short[128]) = c_rarg3 address generate_kyberNttMult_avx512(StubGenerator *stubgen, MacroAssembler *_masm) { - - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_kyberNttMult_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = stubgen->load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(stubgen, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); const Register result = c_rarg0; @@ -731,6 +753,9 @@ address generate_kyberNttMult_avx512(StubGenerator *stubgen, __ mov64(rax, 0); // return 0 __ ret(0); + // record the stub entry and end + stubgen->store_archive_data(stub_id, start, __ pc()); + return start; } @@ -741,11 +766,16 @@ address generate_kyberNttMult_avx512(StubGenerator *stubgen, // b (short[256]) = c_rarg2 address generate_kyberAddPoly_2_avx512(StubGenerator *stubgen, MacroAssembler *_masm) { - - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_kyberAddPoly_2_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = stubgen->load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(stubgen, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); const Register result = c_rarg0; @@ -776,6 +806,9 @@ address generate_kyberAddPoly_2_avx512(StubGenerator *stubgen, __ mov64(rax, 0); // return 0 __ ret(0); + // record the stub entry and end + stubgen->store_archive_data(stub_id, start, __ pc()); + return start; } @@ -787,11 +820,16 @@ address generate_kyberAddPoly_2_avx512(StubGenerator *stubgen, // c (short[256]) = c_rarg3 address generate_kyberAddPoly_3_avx512(StubGenerator *stubgen, MacroAssembler *_masm) { - - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_kyberAddPoly_3_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = stubgen->load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(stubgen, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); const Register result = c_rarg0; @@ -830,6 +868,9 @@ address generate_kyberAddPoly_3_avx512(StubGenerator *stubgen, __ mov64(rax, 0); // return 0 __ ret(0); + // record the stub entry and end + stubgen->store_archive_data(stub_id, start, __ pc()); + return start; } @@ -841,11 +882,16 @@ address generate_kyberAddPoly_3_avx512(StubGenerator *stubgen, // parsedLength (int) = c_rarg3 address generate_kyber12To16_avx512(StubGenerator *stubgen, MacroAssembler *_masm) { - - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_kyber12To16_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = stubgen->load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(stubgen, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); const Register condensed = c_rarg0; @@ -984,6 +1030,9 @@ address generate_kyber12To16_avx512(StubGenerator *stubgen, __ mov64(rax, 0); // return 0 __ ret(0); + // record the stub entry and end + stubgen->store_archive_data(stub_id, start, __ pc()); + return start; } @@ -993,11 +1042,16 @@ address generate_kyber12To16_avx512(StubGenerator *stubgen, // coeffs (short[256]) = c_rarg0 address generate_kyberBarrettReduce_avx512(StubGenerator *stubgen, MacroAssembler *_masm) { - - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_kyberBarrettReduce_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = stubgen->load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(stubgen, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); const Register coeffs = c_rarg0; @@ -1021,6 +1075,9 @@ address generate_kyberBarrettReduce_avx512(StubGenerator *stubgen, __ mov64(rax, 0); // return 0 __ ret(0); + // record the stub entry and end + stubgen->store_archive_data(stub_id, start, __ pc()); + return start; } @@ -1038,3 +1095,24 @@ void StubGenerator::generate_kyber_stubs() { } } } + +#if INCLUDE_CDS +void StubGenerator::init_AOTAddressTable_kyber(GrowableArray
& external_addresses) { +#define ADD(addr) external_addresses.append((address)(addr)) + // use accessors to correctly identify the relevant addresses + ADD(kyberAvx512NttPermsAddr()); + ADD(kyberAvx512InverseNttPermsAddr()); + ADD(kyberAvx512_nttMultPermsAddr()); + ADD(kyberAvx512_12To16PermsAddr()); + ADD(kyberAvx512_12To16DupAddr()); + ADD(kyberAvx512_12To16ShiftAddr()); + ADD(kyberAvx512_12To16AndAddr()); + ADD(kyberAvx512ConstsAddr(qOffset)); + ADD(kyberAvx512ConstsAddr(qInvModROffset)); + ADD(kyberAvx512ConstsAddr(dimHalfInverseOffset)); + ADD(kyberAvx512ConstsAddr(barretMultiplierOffset)); + ADD(kyberAvx512ConstsAddr(montRSquareModqOffset)); + ADD(kyberAvx512ConstsAddr(f00Offset)); +#undef ADD +} +#endif // INCLUDE_CDS diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_log.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_log.cpp index 6b5b4d704e3..07683a51e3d 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_log.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_log.cpp @@ -177,8 +177,14 @@ ATTRIBUTE_ALIGNED(16) static const juint _coeff[] = address StubGenerator::generate_libmLog() { StubId stub_id = StubId::stubgen_dlog_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Label L_2TAG_PACKET_0_0_2, L_2TAG_PACKET_1_0_2, L_2TAG_PACKET_2_0_2, L_2TAG_PACKET_3_0_2; Label L_2TAG_PACKET_4_0_2, L_2TAG_PACKET_5_0_2, L_2TAG_PACKET_6_0_2, L_2TAG_PACKET_7_0_2; @@ -359,6 +365,9 @@ address StubGenerator::generate_libmLog() { __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -516,8 +525,14 @@ ATTRIBUTE_ALIGNED(16) static const juint _coeff_log10[] = address StubGenerator::generate_libmLog10() { StubId stub_id = StubId::stubgen_dlog10_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Label L_2TAG_PACKET_0_0_2, L_2TAG_PACKET_1_0_2, L_2TAG_PACKET_2_0_2, L_2TAG_PACKET_3_0_2; Label L_2TAG_PACKET_4_0_2, L_2TAG_PACKET_5_0_2, L_2TAG_PACKET_6_0_2, L_2TAG_PACKET_7_0_2; @@ -704,7 +719,38 @@ address StubGenerator::generate_libmLog10() { __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } #undef __ + +#if INCLUDE_CDS +void StubGenerator::init_AOTAddressTable_log(GrowableArray
& external_addresses) { +#define ADD(addr) external_addresses.append((address)(addr)); + address log2 = (address)_log2; + address coeff = (address)_coeff; + address LOG10_E = (address)_LOG10_E; + address log2_log10 = (address)_log2_log10; + address coeff_log10 = (address)_coeff_log10; + + ADD(_L_tbl); + ADD(log2); + ADD(log2 + 8); + ADD(coeff); + ADD(coeff + 16); + ADD(coeff + 32); + ADD(_HIGHSIGMASK_log10); + ADD(LOG10_E); + ADD(LOG10_E + 8); + ADD(_L_tbl_log10); + ADD(log2_log10); + ADD(log2_log10 + 8); + ADD(coeff_log10); + ADD(coeff_log10 + 16); + ADD(coeff_log10 + 32); +#undef ADD +} +#endif // INCLUDE_CDS diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_poly1305.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_poly1305.cpp index c80b2d16181..ea7e6d64254 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_poly1305.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_poly1305.cpp @@ -909,10 +909,16 @@ void StubGenerator::poly1305_process_blocks_avx512( // After execution, input and length will point at remaining (unprocessed) data // and accumulator will point to the current accumulator value address StubGenerator::generate_poly1305_processBlocks() { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_poly1305_processBlocks_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); // Save all 'SOE' registers @@ -1028,6 +1034,10 @@ address StubGenerator::generate_poly1305_processBlocks() { __ leave(); __ ret(0); + + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -1695,3 +1705,14 @@ void StubGenerator::poly1305_msg_mul_reduce_vec4_avx2( __ vpaddq(A1, A1, YTMP2, Assembler::AVX_256bit); //Add medium 42-bit bits from new blocks to accumulator __ vpaddq(A1, A1, YTMP5, Assembler::AVX_256bit); } +#undef __ + +#if INCLUDE_CDS +void StubGenerator::init_AOTAddressTable_poly1305(GrowableArray
& external_addresses) { +#define ADD(addr) external_addresses.append((address)(addr)); + ADD(POLY1305_PAD_MSG); + ADD(POLY1305_MASK42); + ADD(POLY1305_MASK44); +#undef ADD +} +#endif // INCLUDE_CDS diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_poly_mont.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_poly_mont.cpp index c439e0b370f..308a8042993 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_poly_mont.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_poly_mont.cpp @@ -558,10 +558,16 @@ void montgomeryMultiplyAVX2(const Register aLimbs, const Register bLimbs, const } address StubGenerator::generate_intpoly_montgomeryMult_P256() { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_intpoly_montgomeryMult_P256_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); if (VM_Version::supports_avx512ifma() && VM_Version::supports_avx512vlbw()) { @@ -620,6 +626,10 @@ address StubGenerator::generate_intpoly_montgomeryMult_P256() { __ leave(); __ ret(0); + + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } @@ -680,10 +690,16 @@ address StubGenerator::generate_intpoly_assign() { // P521OrderField: 19 = 8 + 8 + 2 + 1 // Special Cases 5, 10, 14, 16, 19 - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_intpoly_assign_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); __ enter(); // Inputs @@ -762,5 +778,24 @@ address StubGenerator::generate_intpoly_assign() { __ bind(L_Done); __ leave(); __ ret(0); + + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } +#undef __ + +#if INCLUDE_CDS +void StubGenerator::init_AOTAddressTable_poly_mont(GrowableArray
& external_addresses) { +#define ADD(addr) external_addresses.append((address)(addr)); + // use accessors to retrieve all correct addresses + ADD(shift_1L()); + ADD(shift_1R()); + ADD(p256_mask52()); + ADD(mask_limb5()); + ADD(modulus_p256()); + ADD(modulus_p256(1)); +#undef ADD +} +#endif // INCLUDE_CDS diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_pow.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_pow.cpp index 3c3df7e6ac4..a9a6dc10da4 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_pow.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_pow.cpp @@ -760,8 +760,14 @@ ATTRIBUTE_ALIGNED(8) static const juint _DOUBLE0DOT5[] = { address StubGenerator::generate_libmPow() { StubId stub_id = StubId::stubgen_dpow_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Label L_2TAG_PACKET_0_0_2, L_2TAG_PACKET_1_0_2, L_2TAG_PACKET_2_0_2, L_2TAG_PACKET_3_0_2; Label L_2TAG_PACKET_4_0_2, L_2TAG_PACKET_5_0_2, L_2TAG_PACKET_6_0_2, L_2TAG_PACKET_7_0_2; @@ -1859,7 +1865,45 @@ address StubGenerator::generate_libmPow() { __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } #undef __ + +#if INCLUDE_CDS +void StubGenerator::init_AOTAddressTable_pow(GrowableArray
& external_addresses) { +#define ADD(addr) external_addresses.append((address)(addr)); + address HIGHMASK_Y = (address)_HIGHMASK_Y; + address e_coeff = (address)_e_coeff; + address coeff_h = (address)_coeff_h; + address coeff_pow = (address)_coeff_pow; + + ADD(_HIGHSIGMASK); + ADD(_LOG2_E); + ADD(HIGHMASK_Y); + ADD(HIGHMASK_Y + 8); + ADD(_T_exp); + ADD(e_coeff); + ADD(e_coeff + 16); + ADD(e_coeff + 32); + ADD(coeff_h); + ADD(coeff_h + 8); + ADD(_HIGHMASK_LOG_X); + ADD(_HALFMASK); + ADD(coeff_pow); + ADD(coeff_pow + 16); + ADD(coeff_pow + 32); + ADD(coeff_pow + 48); + ADD(coeff_pow + 64); + ADD(coeff_pow + 80); + ADD(_L_tbl_pow); + ADD(_log2_pow); + ADD(_DOUBLE2); + ADD(_DOUBLE0); + ADD(_DOUBLE0DOT5); +#undef ADD +} +#endif // INCLUDE_CDS diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_sha3.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_sha3.cpp index f9d876f34f3..58f81652a0c 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_sha3.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_sha3.cpp @@ -104,10 +104,15 @@ static address generate_sha3_implCompress(StubId stub_id, default: ShouldNotReachHere(); } - + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = stubgen->load_archive_data(stub_id); + if (start != nullptr) { + return start; + } __ align(CodeEntryAlignment); StubCodeMark mark(stubgen, stub_id); - address start = __ pc(); + start = __ pc(); const Register buf = c_rarg0; const Register state = c_rarg1; @@ -316,6 +321,9 @@ static address generate_sha3_implCompress(StubId stub_id, __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // record the stub entry and end + stubgen->store_archive_data(stub_id, start, __ pc()); + return start; } @@ -326,10 +334,16 @@ static address generate_sha3_implCompress(StubId stub_id, // Performs two keccak() computations in parallel. The steps of the // two computations are executed interleaved. static address generate_double_keccak(StubGenerator *stubgen, MacroAssembler *_masm) { - __ align(CodeEntryAlignment); StubId stub_id = StubId::stubgen_double_keccak_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = stubgen->load_archive_data(stub_id); + if (start != nullptr) { + return start; + } + __ align(CodeEntryAlignment); StubCodeMark mark(stubgen, stub_id); - address start = __ pc(); + start = __ pc(); const Register state0 = c_rarg0; const Register state1 = c_rarg1; @@ -495,6 +509,9 @@ static address generate_double_keccak(StubGenerator *stubgen, MacroAssembler *_m __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // record the stub entry and end + stubgen->store_archive_data(stub_id, start, __ pc()); + return start; } @@ -508,3 +525,14 @@ void StubGenerator::generate_sha3_stubs() { generate_sha3_implCompress(StubId::stubgen_sha3_implCompressMB_id, this, _masm); } } + +#undef __ + +#if INCLUDE_CDS +void StubGenerator::init_AOTAddressTable_sha3(GrowableArray
& external_addresses) { +#define ADD(addr) external_addresses.append((address)(addr)); + ADD(round_constsAddr()); + ADD(permsAndRotsAddr()); +#undef ADD +} +#endif // INCLUDE_CDS diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_sin.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_sin.cpp index 5290e737581..00c759a369b 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_sin.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_sin.cpp @@ -181,8 +181,14 @@ ATTRIBUTE_ALIGNED(8) static const juint _ALL_ONES[] = address StubGenerator::generate_libmSin() { StubId stub_id = StubId::stubgen_dsin_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Label L_2TAG_PACKET_0_0_1, L_2TAG_PACKET_1_0_1, L_2TAG_PACKET_2_0_1, L_2TAG_PACKET_3_0_1; Label L_2TAG_PACKET_4_0_1, L_2TAG_PACKET_5_0_1, L_2TAG_PACKET_6_0_1, L_2TAG_PACKET_7_0_1; @@ -645,7 +651,18 @@ address StubGenerator::generate_libmSin() { __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } #undef __ + +#if INCLUDE_CDS +void StubGenerator::init_AOTAddressTable_sin(GrowableArray
& external_addresses) { +#define ADD(addr) external_addresses.append((address)(addr)); + ADD(_ALL_ONES); +#undef ADD +} +#endif // INCLUDE_CDS diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_sinh.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_sinh.cpp index 86e4ac20176..9969866cfc7 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_sinh.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_sinh.cpp @@ -290,8 +290,14 @@ ATTRIBUTE_ALIGNED(16) static const juint _T2_neg_f[] = address StubGenerator::generate_libmSinh() { StubId stub_id = StubId::stubgen_dsinh_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Label L_2TAG_PACKET_0_0_2, L_2TAG_PACKET_1_0_2, L_2TAG_PACKET_3_0_2, L_2TAG_PACKET_4_0_2; Label L_2TAG_PACKET_5_0_2, L_2TAG_PACKET_6_0_2; @@ -519,7 +525,36 @@ address StubGenerator::generate_libmSinh() { __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } #undef __ + +#if INCLUDE_CDS +void StubGenerator::init_AOTAddressTable_sinh(GrowableArray
& external_addresses) { +#define ADD(addr) external_addresses.append((address)(addr)); + address L2E = (address)_L2E; + address cv = (address)_cv; + address pv = (address)_pv; + + ADD(L2E); + ADD(L2E + 8); + ADD(_HALFMASK); + ADD(_Shifter); + ADD(cv); + ADD(cv + 16); + ADD(cv + 32); + ADD(cv + 48); + ADD(cv + 64); + ADD(_T2f); + ADD(_T2_neg_f); + ADD(pv); + ADD(pv + 16); + ADD(pv + 32); + ADD(_MASK3); +#undef ADD +} +#endif // INCLUDE_CDS diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_tan.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_tan.cpp index 4f14414652c..9f91b9e8f84 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_tan.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_tan.cpp @@ -456,8 +456,14 @@ ATTRIBUTE_ALIGNED(8) static const juint _QQ_2_tan[] = address StubGenerator::generate_libmTan() { StubId stub_id = StubId::stubgen_dtan_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Label L_2TAG_PACKET_0_0_1, L_2TAG_PACKET_1_0_1, L_2TAG_PACKET_2_0_1, L_2TAG_PACKET_3_0_1; Label L_2TAG_PACKET_4_0_1, L_2TAG_PACKET_5_0_1, L_2TAG_PACKET_6_0_1, L_2TAG_PACKET_7_0_1; @@ -1025,7 +1031,35 @@ address StubGenerator::generate_libmTan() { __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } #undef __ + +#if INCLUDE_CDS +void StubGenerator::init_AOTAddressTable_tan(GrowableArray
& external_addresses) { +#define ADD(addr) external_addresses.append((address)(addr)); + address PI_4_tan = (address)_PI_4_tan; + + ADD(_MUL16); + ADD(_sign_mask_tan); + ADD(_PI32INV_tan); + ADD(_P_1_tan); + ADD(_P_2_tan); + ADD(_P_3_tan); + ADD(_Ctable_tan); + ADD(_MASK_35_tan); + ADD(_Q_11_tan); + ADD(_Q_9_tan); + ADD(_Q_7_tan); + ADD(_Q_5_tan); + ADD(_Q_3_tan); + ADD(PI_4_tan); + ADD(PI_4_tan + 8); + ADD(_QQ_2_tan); +#undef ADD +} +#endif // INCLUDE_CDS diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_tanh.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_tanh.cpp index dce4fbfc455..4f2fe8a460b 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_tanh.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_tanh.cpp @@ -303,8 +303,14 @@ ATTRIBUTE_ALIGNED(16) static const juint _T2_neg_f[] = address StubGenerator::generate_libmTanh() { StubId stub_id = StubId::stubgen_dtanh_id; + int entry_count = StubInfo::entry_count(stub_id); + assert(entry_count == 1, "sanity check"); + address start = load_archive_data(stub_id); + if (start != nullptr) { + return start; + } StubCodeMark mark(this, stub_id); - address start = __ pc(); + start = __ pc(); Label L_2TAG_PACKET_0_0_1, L_2TAG_PACKET_1_0_1, L_2TAG_PACKET_2_0_1, L_2TAG_PACKET_3_0_1; Label L_2TAG_PACKET_4_0_1, L_2TAG_PACKET_5_0_1; @@ -495,7 +501,36 @@ address StubGenerator::generate_libmTanh() { __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); + // record the stub entry and end + store_archive_data(stub_id, start, __ pc()); + return start; } #undef __ + +#if INCLUDE_CDS +void StubGenerator::init_AOTAddressTable_tanh(GrowableArray
& external_addresses) { +#define ADD(addr) external_addresses.append((address)(addr)); + address L2E = (address)_L2E; + address cv = (address)_cv; + address pv = (address)_pv; + + ADD(L2E); + ADD(L2E + 8); + ADD(_HALFMASK); + ADD(_ONEMASK); + ADD(_TWOMASK); + ADD(_Shifter); + ADD(cv); + ADD(cv + 16); + ADD(cv + 32); + ADD(_T2_neg_f); + ADD(pv); + ADD(pv + 16); + ADD(pv + 32); + ADD(_MASK3); + ADD(_RMASK); +#undef ADD +} +#endif // INCLUDE_CDS diff --git a/src/hotspot/cpu/x86/stubRoutines_x86.cpp b/src/hotspot/cpu/x86/stubRoutines_x86.cpp index ee9cea08e64..ce11925dde2 100644 --- a/src/hotspot/cpu/x86/stubRoutines_x86.cpp +++ b/src/hotspot/cpu/x86/stubRoutines_x86.cpp @@ -28,6 +28,10 @@ #include "runtime/stubRoutines.hpp" #include "utilities/globalDefinitions.hpp" #include "crc32c.h" +#include "stubGenerator_x86_64.hpp" +#ifdef COMPILER1 +#include "c1/c1_LIRAssembler.hpp" +#endif // Implementation of the platform-specific part of StubRoutines - for // a description of how to extend it, see the stubRoutines.hpp file. @@ -40,8 +44,12 @@ #define DEFINE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \ address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) = CAST_FROM_FN_PTR(address, init_function); -STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY, DEFINE_ARCH_ENTRY_INIT) +#define DEFINE_ARCH_ENTRY_ARRAY(arch, blob_name, stub_name, field_name, getter_name, count) \ + address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) [count]; +STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY, DEFINE_ARCH_ENTRY_INIT, DEFINE_ARCH_ENTRY_ARRAY) + +#undef DEFINE_ARCH_ENTRY_ARRAY #undef DEFINE_ARCH_ENTRY_INIT #undef DEFINE_ARCH_ENTRY @@ -411,3 +419,46 @@ ATTRIBUTE_ALIGNED(64) const julong StubRoutines::x86::_k512_W[] = 0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL, 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL, }; + +#if INCLUDE_CDS + +void StubRoutines::init_AOTAddressTable() { + ResourceMark rm; + GrowableArray
external_addresses; + // publish static addresses referred to by main x86 generator and + // auxiliary x86 generators + StubGenerator::init_AOTAddressTable(external_addresses); + // publish external data addresses defined in nested x86 class + StubRoutines::x86::init_AOTAddressTable(external_addresses); +#ifdef COMPILER1 + LIR_Assembler::init_AOTAddressTable(external_addresses); +#endif + AOTCodeCache::publish_external_addresses(external_addresses); +} + +// publish addresses of external data defined in this file which may +// be referenced from stub or code +void StubRoutines::x86::init_AOTAddressTable(GrowableArray
& external_addresses) { +#define ADD(addr) external_addresses.append((address)(addr)); + ADD(&_mxcsr_std); + ADD(&_mxcsr_rz); + ADD(crc_by128_masks_addr()); + ADD(crc_by128_masks_addr() + 16); + ADD(crc_by128_masks_addr() + 32); + // this is added in generic code + // ADD(_crc_table); + ADD(crc_by128_masks_avx512_addr()); + ADD(crc_by128_masks_avx512_addr() + 16); + ADD(crc_by128_masks_avx512_addr() + 32); + ADD(_crc_table_avx512); + ADD(_crc32c_table_avx512); + ADD(_shuf_table_crc32_avx512); + // n.b. call accessor for this one to ensure the table is generated + ADD(crc32c_table_addr()); + ADD(_arrays_hashcode_powers_of_31); + ADD(_k256); + ADD(_k256_W); + ADD(_k512_W); +#undef ADD +} +#endif // INCLUDE_CDS diff --git a/src/hotspot/cpu/x86/stubRoutines_x86.hpp b/src/hotspot/cpu/x86/stubRoutines_x86.hpp index 3654b644131..7283798888b 100644 --- a/src/hotspot/cpu/x86/stubRoutines_x86.hpp +++ b/src/hotspot/cpu/x86/stubRoutines_x86.hpp @@ -55,9 +55,13 @@ class x86 { #define DECLARE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \ DECLARE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name) -private: - STUBGEN_ARCH_ENTRIES_DO(DECLARE_ARCH_ENTRY, DECLARE_ARCH_ENTRY_INIT) +#define DECLARE_ARCH_ENTRY_ARRAY(arch, blob_name, stub_name, field_name, getter_name, count) \ + static address STUB_FIELD_NAME(field_name) [count] ; +private: + STUBGEN_ARCH_ENTRIES_DO(DECLARE_ARCH_ENTRY, DECLARE_ARCH_ENTRY_INIT, DECLARE_ARCH_ENTRY_ARRAY) + +#undef DECLARE_ARCH_ENTRY_ARRAY #undef DECLARE_ARCH_ENTRY_INIT #undef DECLARE_ARCH_ENTRY @@ -70,9 +74,13 @@ private: #define DEFINE_ARCH_ENTRY_GETTER_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \ DEFINE_ARCH_ENTRY_GETTER(arch, blob_name, stub_name, field_name, getter_name) -public: - STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY_GETTER, DEFINE_ARCH_ENTRY_GETTER_INIT) +#define DEFINE_ARCH_ENTRY_GETTER_ARRAY(arch, blob_name, stub_name, field_name, getter_name, count) \ + static address getter_name(int idx) { return STUB_FIELD_NAME(field_name) [idx]; } +public: + STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY_GETTER, DEFINE_ARCH_ENTRY_GETTER_INIT, DEFINE_ARCH_ENTRY_GETTER_ARRAY) + +#undef DEFINE_ARCH_ENTRY_GETTER_ARRAY #undef DEFINE_ARCH_ENTRY_GETTER_INIT #undef DEFINE_ARCH_GETTER_ENTRY @@ -112,6 +120,8 @@ public: static address arrays_hashcode_powers_of_31() { return (address)_arrays_hashcode_powers_of_31; } static void generate_CRC32C_table(bool is_pclmulqdq_supported); + + static void init_AOTAddressTable(GrowableArray
& external_addresses); }; #endif // CPU_X86_STUBROUTINES_X86_HPP diff --git a/src/hotspot/cpu/x86/vm_version_x86.cpp b/src/hotspot/cpu/x86/vm_version_x86.cpp index 78d6dec08cf..cf9de40a237 100644 --- a/src/hotspot/cpu/x86/vm_version_x86.cpp +++ b/src/hotspot/cpu/x86/vm_version_x86.cpp @@ -958,9 +958,17 @@ void VM_Version::get_processor_features() { if (UseSSE < 1) _features.clear_feature(CPU_SSE); - //since AVX instructions is slower than SSE in some ZX cpus, force USEAVX=0. - if (is_zx() && ((cpu_family() == 6) || (cpu_family() == 7))) { - UseAVX = 0; + // ZX cpus specific settings + if (is_zx() && FLAG_IS_DEFAULT(UseAVX)) { + if (cpu_family() == 7) { + if (extended_cpu_model() == 0x5B || extended_cpu_model() == 0x6B) { + UseAVX = 1; + } else if (extended_cpu_model() == 0x1B || extended_cpu_model() == 0x3B) { + UseAVX = 0; + } + } else if (cpu_family() == 6) { + UseAVX = 0; + } } // UseSSE is set to the smaller of what hardware supports and what @@ -1086,15 +1094,36 @@ void VM_Version::get_processor_features() { } } - // Currently APX support is only enabled for targets supporting AVX512VL feature. - bool apx_supported = os_supports_apx_egprs() && supports_apx_f() && supports_avx512vl(); - if (UseAPX && !apx_supported) { - warning("UseAPX is not supported on this CPU, setting it to false"); + // Currently APX support is only enabled for targets supporting AVX512VL feature. + if (supports_apx_f() && os_supports_apx_egprs() && supports_avx512vl()) { + if (FLAG_IS_DEFAULT(UseAPX)) { + UseAPX = false; // by default UseAPX is false + _features.clear_feature(CPU_APX_F); + } else if (!UseAPX) { + _features.clear_feature(CPU_APX_F); + } + } else if (UseAPX) { + if (!FLAG_IS_DEFAULT(UseAPX)) { + warning("APX is not supported on this CPU, setting it to false)"); + } FLAG_SET_DEFAULT(UseAPX, false); } - if (!UseAPX) { - _features.clear_feature(CPU_APX_F); + CHECK_CPU_FEATURE(supports_clmul, CLMUL); + CHECK_CPU_FEATURE(supports_aes, AES); + CHECK_CPU_FEATURE(supports_fma, FMA); + + if (supports_sha() || (supports_avx2() && supports_bmi2())) { + if (FLAG_IS_DEFAULT(UseSHA)) { + UseSHA = true; + } else if (!UseSHA) { + _features.clear_feature(CPU_SHA); + } + } else if (UseSHA) { + if (!FLAG_IS_DEFAULT(UseSHA)) { + warning("SHA instructions are not available on this CPU"); + } + FLAG_SET_DEFAULT(UseSHA, false); } if (FLAG_IS_DEFAULT(IntelJccErratumMitigation)) { @@ -1144,10 +1173,50 @@ void VM_Version::get_processor_features() { // Use AES instructions if available. if (supports_aes()) { - if (FLAG_IS_DEFAULT(UseAES)) { - FLAG_SET_DEFAULT(UseAES, true); + if (supports_sse3()) { + if (FLAG_IS_DEFAULT(UseAESIntrinsics)) { + FLAG_SET_DEFAULT(UseAESIntrinsics, true); + } + } else if (UseAESIntrinsics) { + // The AES intrinsic stubs require AES instruction support (of course) + // but also require sse3 mode or higher for instructions it use. + if (!FLAG_IS_DEFAULT(UseAESIntrinsics)) { + warning("X86 AES intrinsics require SSE3 instructions or higher. Intrinsics will be disabled."); + } + FLAG_SET_DEFAULT(UseAESIntrinsics, false); } - if (!UseAES) { + if (!UseAESIntrinsics) { + if (UseAESCTRIntrinsics) { + if (!FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { + warning("AES-CTR intrinsics require UseAESIntrinsics flag to be enabled. Intrinsics will be disabled."); + } + FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); + } + } else { + if (supports_sse4_1()) { + if (FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { + FLAG_SET_DEFAULT(UseAESCTRIntrinsics, true); + } + } else if (UseAESCTRIntrinsics) { + // The AES-CTR intrinsic stubs require AES instruction support (of course) + // but also require sse4.1 mode or higher for instructions it use. + if (!FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { + warning("X86 AES-CTR intrinsics require SSE4.1 instructions or higher. Intrinsics will be disabled."); + } + FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); + } + } + } else { + if (!cpu_supports_aes()) { + if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { + warning("AES intrinsics are not available on this CPU"); + } + FLAG_SET_DEFAULT(UseAESIntrinsics, false); + if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { + warning("AES-CTR intrinsics are not available on this CPU"); + } + FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); + } else if (!UseAES) { if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { warning("AES intrinsics require UseAES flag to be enabled. Intrinsics will be disabled."); } @@ -1156,66 +1225,7 @@ void VM_Version::get_processor_features() { warning("AES_CTR intrinsics require UseAES flag to be enabled. AES_CTR intrinsics will be disabled."); } FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); - } else { - if (UseSSE > 2) { - if (FLAG_IS_DEFAULT(UseAESIntrinsics)) { - FLAG_SET_DEFAULT(UseAESIntrinsics, true); - } - } else { - // The AES intrinsic stubs require AES instruction support (of course) - // but also require sse3 mode or higher for instructions it use. - if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { - warning("X86 AES intrinsics require SSE3 instructions or higher. Intrinsics will be disabled."); - } - FLAG_SET_DEFAULT(UseAESIntrinsics, false); - } - - // --AES-CTR begins-- - if (!UseAESIntrinsics) { - if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { - warning("AES-CTR intrinsics require UseAESIntrinsics flag to be enabled. Intrinsics will be disabled."); - } - FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); - } else { - if (supports_sse4_1()) { - if (FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { - FLAG_SET_DEFAULT(UseAESCTRIntrinsics, true); - } - } else { - // The AES-CTR intrinsic stubs require AES instruction support (of course) - // but also require sse4.1 mode or higher for instructions it use. - if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { - warning("X86 AES-CTR intrinsics require SSE4.1 instructions or higher. Intrinsics will be disabled."); - } - FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); - } - } - // --AES-CTR ends-- } - } else if (UseAES || UseAESIntrinsics || UseAESCTRIntrinsics) { - if (UseAES && !FLAG_IS_DEFAULT(UseAES)) { - warning("AES instructions are not available on this CPU"); - } - FLAG_SET_DEFAULT(UseAES, false); - if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { - warning("AES intrinsics are not available on this CPU"); - } - FLAG_SET_DEFAULT(UseAESIntrinsics, false); - if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { - warning("AES-CTR intrinsics are not available on this CPU"); - } - FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); - } - - // Use CLMUL instructions if available. - if (supports_clmul()) { - if (FLAG_IS_DEFAULT(UseCLMUL)) { - UseCLMUL = true; - } - } else if (UseCLMUL) { - if (!FLAG_IS_DEFAULT(UseCLMUL)) - warning("CLMUL instructions not available on this CPU (AVX may also be required)"); - FLAG_SET_DEFAULT(UseCLMUL, false); } if (UseCLMUL && (UseSSE > 2)) { @@ -1256,8 +1266,9 @@ void VM_Version::get_processor_features() { UseGHASHIntrinsics = true; } } else if (UseGHASHIntrinsics) { - if (!FLAG_IS_DEFAULT(UseGHASHIntrinsics)) + if (!FLAG_IS_DEFAULT(UseGHASHIntrinsics)) { warning("GHASH intrinsic requires CLMUL and SSE2 instructions on this CPU"); + } FLAG_SET_DEFAULT(UseGHASHIntrinsics, false); } @@ -1267,26 +1278,27 @@ void VM_Version::get_processor_features() { // based on the VM capabilities whether to use an AVX2 or AVX512-enabled // version. if (UseAVX >= 1) { - if (FLAG_IS_DEFAULT(UseChaCha20Intrinsics)) { - UseChaCha20Intrinsics = true; - } + if (FLAG_IS_DEFAULT(UseChaCha20Intrinsics)) { + UseChaCha20Intrinsics = true; + } } else if (UseChaCha20Intrinsics) { - if (!FLAG_IS_DEFAULT(UseChaCha20Intrinsics)) { - warning("ChaCha20 intrinsic requires AVX instructions"); - } - FLAG_SET_DEFAULT(UseChaCha20Intrinsics, false); + if (!FLAG_IS_DEFAULT(UseChaCha20Intrinsics)) { + warning("ChaCha20 intrinsic requires AVX instructions"); + } + FLAG_SET_DEFAULT(UseChaCha20Intrinsics, false); } // Kyber Intrinsics // Currently we only have them for AVX512 if (supports_evex() && supports_avx512bw()) { - if (FLAG_IS_DEFAULT(UseKyberIntrinsics)) { - UseKyberIntrinsics = true; - } - } else - if (UseKyberIntrinsics) { - warning("Intrinsics for ML-KEM are not available on this CPU."); - FLAG_SET_DEFAULT(UseKyberIntrinsics, false); + if (FLAG_IS_DEFAULT(UseKyberIntrinsics)) { + UseKyberIntrinsics = true; + } + } else if (UseKyberIntrinsics) { + if (!FLAG_IS_DEFAULT(UseKyberIntrinsics)) { + warning("Intrinsics for ML-KEM are not available on this CPU."); + } + FLAG_SET_DEFAULT(UseKyberIntrinsics, false); } // Dilithium Intrinsics @@ -1295,8 +1307,10 @@ void VM_Version::get_processor_features() { UseDilithiumIntrinsics = true; } } else if (UseDilithiumIntrinsics) { + if (!FLAG_IS_DEFAULT(UseDilithiumIntrinsics)) { warning("Intrinsics for ML-DSA are not available on this CPU."); - FLAG_SET_DEFAULT(UseDilithiumIntrinsics, false); + } + FLAG_SET_DEFAULT(UseDilithiumIntrinsics, false); } // Base64 Intrinsics (Check the condition for which the intrinsic will be active) @@ -1305,39 +1319,24 @@ void VM_Version::get_processor_features() { UseBASE64Intrinsics = true; } } else if (UseBASE64Intrinsics) { - if (!FLAG_IS_DEFAULT(UseBASE64Intrinsics)) + if (!FLAG_IS_DEFAULT(UseBASE64Intrinsics)) { warning("Base64 intrinsic requires EVEX instructions on this CPU"); - FLAG_SET_DEFAULT(UseBASE64Intrinsics, false); - } - - if (supports_fma()) { - if (FLAG_IS_DEFAULT(UseFMA)) { - UseFMA = true; } - } else if (UseFMA) { - warning("FMA instructions are not available on this CPU"); - FLAG_SET_DEFAULT(UseFMA, false); + FLAG_SET_DEFAULT(UseBASE64Intrinsics, false); } if (FLAG_IS_DEFAULT(UseMD5Intrinsics)) { UseMD5Intrinsics = true; } - if (supports_sha() || (supports_avx2() && supports_bmi2())) { - if (FLAG_IS_DEFAULT(UseSHA)) { - UseSHA = true; - } - } else if (UseSHA) { - warning("SHA instructions are not available on this CPU"); - FLAG_SET_DEFAULT(UseSHA, false); - } - if (supports_sha() && supports_sse4_1() && UseSHA) { if (FLAG_IS_DEFAULT(UseSHA1Intrinsics)) { FLAG_SET_DEFAULT(UseSHA1Intrinsics, true); } } else if (UseSHA1Intrinsics) { - warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU."); + if (!FLAG_IS_DEFAULT(UseSHA1Intrinsics)) { + warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU."); + } FLAG_SET_DEFAULT(UseSHA1Intrinsics, false); } @@ -1346,7 +1345,9 @@ void VM_Version::get_processor_features() { FLAG_SET_DEFAULT(UseSHA256Intrinsics, true); } } else if (UseSHA256Intrinsics) { - warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU."); + if (!FLAG_IS_DEFAULT(UseSHA256Intrinsics)) { + warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU."); + } FLAG_SET_DEFAULT(UseSHA256Intrinsics, false); } @@ -1355,7 +1356,9 @@ void VM_Version::get_processor_features() { FLAG_SET_DEFAULT(UseSHA512Intrinsics, true); } } else if (UseSHA512Intrinsics) { - warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU."); + if (!FLAG_IS_DEFAULT(UseSHA512Intrinsics)) { + warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU."); + } FLAG_SET_DEFAULT(UseSHA512Intrinsics, false); } @@ -1364,14 +1367,12 @@ void VM_Version::get_processor_features() { FLAG_SET_DEFAULT(UseSHA3Intrinsics, true); } } else if (UseSHA3Intrinsics) { - warning("Intrinsics for SHA3-224, SHA3-256, SHA3-384 and SHA3-512 crypto hash functions not available on this CPU."); + if (!FLAG_IS_DEFAULT(UseSHA3Intrinsics)) { + warning("Intrinsics for SHA3-224, SHA3-256, SHA3-384 and SHA3-512 crypto hash functions not available on this CPU."); + } FLAG_SET_DEFAULT(UseSHA3Intrinsics, false); } - if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics || UseSHA3Intrinsics)) { - FLAG_SET_DEFAULT(UseSHA, false); - } - #if COMPILER2_OR_JVMCI int max_vector_size = 0; if (UseAVX == 0 || !os_supports_avx_vectors()) { @@ -1427,7 +1428,9 @@ void VM_Version::get_processor_features() { FLAG_SET_DEFAULT(UsePoly1305Intrinsics, true); } } else if (UsePoly1305Intrinsics) { - warning("Intrinsics for Poly1305 crypto hash functions not available on this CPU."); + if (!FLAG_IS_DEFAULT(UsePoly1305Intrinsics)) { + warning("Intrinsics for Poly1305 crypto hash functions not available on this CPU."); + } FLAG_SET_DEFAULT(UsePoly1305Intrinsics, false); } @@ -1436,7 +1439,9 @@ void VM_Version::get_processor_features() { FLAG_SET_DEFAULT(UseIntPolyIntrinsics, true); } } else if (UseIntPolyIntrinsics) { - warning("Intrinsics for Polynomial crypto functions not available on this CPU."); + if (!FLAG_IS_DEFAULT(UseIntPolyIntrinsics)) { + warning("Intrinsics for Polynomial crypto functions not available on this CPU."); + } FLAG_SET_DEFAULT(UseIntPolyIntrinsics, false); } @@ -1500,9 +1505,6 @@ void VM_Version::get_processor_features() { MaxLoopPad = 11; } #endif // COMPILER2 - if (FLAG_IS_DEFAULT(UseXMMForArrayCopy)) { - UseXMMForArrayCopy = true; // use SSE2 movq on new ZX cpus - } if (supports_sse4_2()) { // new ZX cpus if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { UseUnalignedLoadStores = true; // use movdqu on newest ZX cpus @@ -1520,10 +1522,6 @@ void VM_Version::get_processor_features() { // Use it on new AMD cpus starting from Opteron. UseAddressNop = true; } - if (supports_sse2() && FLAG_IS_DEFAULT(UseNewLongLShift)) { - // Use it on new AMD cpus starting from Opteron. - UseNewLongLShift = true; - } if (FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper)) { if (supports_sse4a()) { UseXmmLoadAndClearUpper = true; // use movsd only on '10h' Opteron @@ -1563,10 +1561,6 @@ void VM_Version::get_processor_features() { if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) { FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3); } - // On family 15h processors use XMM and UnalignedLoadStores for Array Copy - if (supports_sse2() && FLAG_IS_DEFAULT(UseXMMForArrayCopy)) { - FLAG_SET_DEFAULT(UseXMMForArrayCopy, true); - } if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { FLAG_SET_DEFAULT(UseUnalignedLoadStores, true); } @@ -1583,9 +1577,6 @@ void VM_Version::get_processor_features() { if (cpu_family() >= 0x17) { // On family >=17h processors use XMM and UnalignedLoadStores // for Array Copy - if (supports_sse2() && FLAG_IS_DEFAULT(UseXMMForArrayCopy)) { - FLAG_SET_DEFAULT(UseXMMForArrayCopy, true); - } if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { FLAG_SET_DEFAULT(UseUnalignedLoadStores, true); } @@ -1632,10 +1623,7 @@ void VM_Version::get_processor_features() { } #endif // COMPILER2 - if (FLAG_IS_DEFAULT(UseXMMForArrayCopy)) { - UseXMMForArrayCopy = true; // use SSE2 movq on new Intel cpus - } - if ((supports_sse4_2() && supports_ht()) || supports_avx()) { // Newest Intel cpus + if (is_intel_modern_cpu()) { // Newest Intel cpus if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus } @@ -1703,8 +1691,8 @@ void VM_Version::get_processor_features() { if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) { FLAG_SET_DEFAULT(UseSSE42Intrinsics, true); } - } else { - if (UseSSE42Intrinsics && !FLAG_IS_DEFAULT(UseSSE42Intrinsics)) { + } else if (UseSSE42Intrinsics) { + if (!FLAG_IS_DEFAULT(UseSSE42Intrinsics)) { warning("SSE4.2 intrinsics require SSE4.2 instructions or higher. Intrinsics will be disabled."); } FLAG_SET_DEFAULT(UseSSE42Intrinsics, false); @@ -1714,15 +1702,17 @@ void VM_Version::get_processor_features() { UseVectorizedMismatchIntrinsic = true; } } else if (UseVectorizedMismatchIntrinsic) { - if (!FLAG_IS_DEFAULT(UseVectorizedMismatchIntrinsic)) + if (!FLAG_IS_DEFAULT(UseVectorizedMismatchIntrinsic)) { warning("vectorizedMismatch intrinsics are not available on this CPU"); + } FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false); } if (UseAVX >= 2) { FLAG_SET_DEFAULT(UseVectorizedHashCodeIntrinsic, true); } else if (UseVectorizedHashCodeIntrinsic) { - if (!FLAG_IS_DEFAULT(UseVectorizedHashCodeIntrinsic)) + if (!FLAG_IS_DEFAULT(UseVectorizedHashCodeIntrinsic)) { warning("vectorizedHashCode intrinsics are not available on this CPU"); + } FLAG_SET_DEFAULT(UseVectorizedHashCodeIntrinsic, false); } @@ -1732,7 +1722,9 @@ void VM_Version::get_processor_features() { UseCountLeadingZerosInstruction = true; } } else if (UseCountLeadingZerosInstruction) { - warning("lzcnt instruction is not available on this CPU"); + if (!FLAG_IS_DEFAULT(UseCountLeadingZerosInstruction)) { + warning("lzcnt instruction is not available on this CPU"); + } FLAG_SET_DEFAULT(UseCountLeadingZerosInstruction, false); } @@ -1748,7 +1740,9 @@ void VM_Version::get_processor_features() { } } } else if (UseCountTrailingZerosInstruction) { - warning("tzcnt instruction is not available on this CPU"); + if (!FLAG_IS_DEFAULT(UseCountTrailingZerosInstruction)) { + warning("tzcnt instruction is not available on this CPU"); + } FLAG_SET_DEFAULT(UseCountTrailingZerosInstruction, false); } @@ -1759,7 +1753,9 @@ void VM_Version::get_processor_features() { UseBMI1Instructions = true; } } else if (UseBMI1Instructions) { - warning("BMI1 instructions are not available on this CPU (AVX is also required)"); + if (!FLAG_IS_DEFAULT(UseBMI1Instructions)) { + warning("BMI1 instructions are not available on this CPU (AVX is also required)"); + } FLAG_SET_DEFAULT(UseBMI1Instructions, false); } @@ -1768,7 +1764,9 @@ void VM_Version::get_processor_features() { UseBMI2Instructions = true; } } else if (UseBMI2Instructions) { - warning("BMI2 instructions are not available on this CPU (AVX is also required)"); + if (!FLAG_IS_DEFAULT(UseBMI2Instructions)) { + warning("BMI2 instructions are not available on this CPU (AVX is also required)"); + } FLAG_SET_DEFAULT(UseBMI2Instructions, false); } @@ -1778,7 +1776,9 @@ void VM_Version::get_processor_features() { UsePopCountInstruction = true; } } else if (UsePopCountInstruction) { - warning("POPCNT instruction is not available on this CPU"); + if (!FLAG_IS_DEFAULT(UsePopCountInstruction)) { + warning("POPCNT instruction is not available on this CPU"); + } FLAG_SET_DEFAULT(UsePopCountInstruction, false); } @@ -1788,7 +1788,9 @@ void VM_Version::get_processor_features() { UseFastStosb = true; } } else if (UseFastStosb) { - warning("fast-string operations are not available on this CPU"); + if (!FLAG_IS_DEFAULT(UseFastStosb)) { + warning("fast-string operations are not available on this CPU"); + } FLAG_SET_DEFAULT(UseFastStosb, false); } @@ -1814,7 +1816,9 @@ void VM_Version::get_processor_features() { UseXMMForObjInit = true; } } else if (UseXMMForObjInit) { - warning("UseXMMForObjInit requires SSE2 and unaligned load/stores. Feature is switched off."); + if (!FLAG_IS_DEFAULT(UseXMMForObjInit)) { + warning("UseXMMForObjInit requires SSE2 and unaligned load/stores. Feature is switched off."); + } FLAG_SET_DEFAULT(UseXMMForObjInit, false); } @@ -1855,7 +1859,7 @@ void VM_Version::get_processor_features() { if (is_intel() && is_intel_server_family() && supports_sse3()) { if (FLAG_IS_DEFAULT(AllocatePrefetchLines) && - supports_sse4_2() && supports_ht()) { // Nehalem based cpus + is_intel_modern_cpu()) { // Nehalem based cpus FLAG_SET_DEFAULT(AllocatePrefetchLines, 4); } #ifdef COMPILER2 @@ -1894,7 +1898,7 @@ void VM_Version::get_processor_features() { if (FLAG_IS_DEFAULT(ContendedPaddingWidth) && (cache_line_size > ContendedPaddingWidth)) - ContendedPaddingWidth = cache_line_size; + ContendedPaddingWidth = cache_line_size; // This machine allows unaligned memory accesses if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) { @@ -1959,6 +1963,18 @@ void VM_Version::get_processor_features() { if (FLAG_IS_DEFAULT(UseCopySignIntrinsic)) { FLAG_SET_DEFAULT(UseCopySignIntrinsic, true); } + // CopyAVX3Threshold is the threshold at which 64-byte instructions are used + // for implementing the array copy and clear operations. + // The Intel platforms that supports the serialize instruction + // have improved implementation of 64-byte load/stores and so the default + // threshold is set to 0 for these platforms. + if (FLAG_IS_DEFAULT(CopyAVX3Threshold)) { + if (is_intel() && is_intel_server_family() && supports_serialize()) { + FLAG_SET_DEFAULT(CopyAVX3Threshold, 0); + } else { + FLAG_SET_DEFAULT(CopyAVX3Threshold, AVX3Threshold); + } + } } void VM_Version::print_platform_virtualization_info(outputStream* st) { @@ -2114,17 +2130,6 @@ bool VM_Version::is_intel_darkmont() { return is_intel() && is_intel_server_family() && (_model == 0xCC || _model == 0xDD); } -// avx3_threshold() sets the threshold at which 64-byte instructions are used -// for implementing the array copy and clear operations. -// The Intel platforms that supports the serialize instruction -// has improved implementation of 64-byte load/stores and so the default -// threshold is set to 0 for these platforms. -int VM_Version::avx3_threshold() { - return (is_intel_server_family() && - supports_serialize() && - FLAG_IS_DEFAULT(AVX3Threshold)) ? 0 : AVX3Threshold; -} - void VM_Version::clear_apx_test_state() { clear_apx_test_state_stub(); } @@ -2623,6 +2628,23 @@ const char* VM_Version::cpu_family_description(void) { return _family_id_intel[cpu_family_id]; } } + if (is_zx()) { + int cpu_model_id = extended_cpu_model(); + if (cpu_family_id == 7) { + switch (cpu_model_id) { + case 0x1B: + return "wudaokou"; + case 0x3B: + return "lujiazui"; + case 0x5B: + return "yongfeng"; + case 0x6B: + return "shijidadao"; + } + } else if (cpu_family_id == 6) { + return "zhangjiang"; + } + } if (is_hygon()) { return "Dhyana"; } @@ -2642,6 +2664,9 @@ int VM_Version::cpu_type_description(char* const buf, size_t buf_len) { } else if (is_amd()) { cpu_type = "AMD"; x64 = cpu_is_em64t() ? " AMD64" : ""; + } else if (is_zx()) { + cpu_type = "Zhaoxin"; + x64 = cpu_is_em64t() ? " x86_64" : ""; } else if (is_hygon()) { cpu_type = "Hygon"; x64 = cpu_is_em64t() ? " AMD64" : ""; @@ -3259,9 +3284,15 @@ int VM_Version::allocate_prefetch_distance(bool use_watermark_prefetch) { } else { return 128; // Athlon } + } else if (is_zx()) { + if (supports_sse2()) { + return 256; + } else { + return 128; + } } else { // Intel if (supports_sse3() && is_intel_server_family()) { - if (supports_sse4_2() && supports_ht()) { // Nehalem based cpus + if (is_intel_modern_cpu()) { // Nehalem based cpus return 192; } else if (use_watermark_prefetch) { // watermark prefetching on Core return 384; diff --git a/src/hotspot/cpu/x86/vm_version_x86.hpp b/src/hotspot/cpu/x86/vm_version_x86.hpp index e0a895737b7..f721635a02e 100644 --- a/src/hotspot/cpu/x86/vm_version_x86.hpp +++ b/src/hotspot/cpu/x86/vm_version_x86.hpp @@ -535,6 +535,10 @@ protected: static const char* _features_names[]; + static void clear_feature(Feature_Flag feature) { + _features.clear_feature(feature); + } + static void clear_cpu_features() { _features = VM_Features(); _cpu_features = VM_Features(); @@ -828,7 +832,7 @@ public: static uint32_t cpu_stepping() { return _cpuid_info.cpu_stepping(); } static int cpu_family() { return _cpu;} static bool is_P6() { return cpu_family() >= 6; } - static bool is_intel_server_family() { return cpu_family() == 6 || cpu_family() == 19; } + static bool is_intel_server_family() { return cpu_family() == 6 || cpu_family() == 18 || cpu_family() == 19; } static bool is_amd() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x68747541; } // 'htuA' static bool is_hygon() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x6F677948; } // 'ogyH' static bool is_amd_family() { return is_amd() || is_hygon(); } @@ -930,6 +934,7 @@ public: // Feature identification not affected by VM flags // static bool cpu_supports_evex() { return _cpu_features.supports_feature(CPU_AVX512F); } + static bool cpu_supports_aes() { return _cpu_features.supports_feature(CPU_AES); } static bool supports_avx512_simd_sort() { if (supports_avx512dq()) { @@ -958,7 +963,11 @@ public: static bool is_intel_darkmont(); - static int avx3_threshold(); + static bool is_intel_modern_cpu() { + precond(is_intel()); // should be called only for intel CPU + // Efficient cores in hybrid CPU may not support hyper-threads. + return (supports_avx() || (supports_sse4_2() && (supports_ht() || supports_hybrid()))); + } static bool is_intel_tsc_synched_at_init(); diff --git a/src/hotspot/cpu/x86/x86.ad b/src/hotspot/cpu/x86/x86.ad index 0ffa4c2031c..eaa88d900c7 100644 --- a/src/hotspot/cpu/x86/x86.ad +++ b/src/hotspot/cpu/x86/x86.ad @@ -1708,84 +1708,99 @@ static void emit_cmpfp3(MacroAssembler* masm, Register dst) { __ bind(done); } -// Math.min() # Math.max() -// -------------------------- -// ucomis[s/d] # -// ja -> b # a -// jp -> NaN # NaN -// jb -> a # b -// je # -// |-jz -> a | b # a & b -// | -> a # +enum FP_PREC { + fp_prec_hlf, + fp_prec_flt, + fp_prec_dbl +}; + +static inline void emit_fp_ucom(MacroAssembler* masm, enum FP_PREC pt, + XMMRegister p, XMMRegister q) { + if (pt == fp_prec_hlf) { + __ evucomish(p, q); + } else if (pt == fp_prec_flt) { + __ ucomiss(p, q); + } else { + __ ucomisd(p, q); + } +} + +static inline void movfp(MacroAssembler* masm, enum FP_PREC pt, + XMMRegister dst, XMMRegister src, Register scratch) { + if (pt == fp_prec_hlf) { + __ movhlf(dst, src, scratch); + } else if (pt == fp_prec_flt) { + __ movflt(dst, src); + } else { + __ movdbl(dst, src); + } +} + +// Math.min() # Math.max() +// ----------------------------- +// (v)ucomis[h/s/d] # +// ja -> b # a +// jp -> NaN # NaN +// jb -> a # b +// je # +// |-jz -> a | b # a & b +// | -> a # static void emit_fp_min_max(MacroAssembler* masm, XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister xmmt, Register rt, - bool min, bool single) { + bool min, enum FP_PREC pt) { Label nan, zero, below, above, done; - if (single) - __ ucomiss(a, b); - else - __ ucomisd(a, b); + emit_fp_ucom(masm, pt, a, b); - if (dst->encoding() != (min ? b : a)->encoding()) + if (dst->encoding() != (min ? b : a)->encoding()) { __ jccb(Assembler::above, above); // CF=0 & ZF=0 - else + } else { __ jccb(Assembler::above, done); + } __ jccb(Assembler::parity, nan); // PF=1 __ jccb(Assembler::below, below); // CF=1 // equal __ vpxor(xmmt, xmmt, xmmt, Assembler::AVX_128bit); - if (single) { - __ ucomiss(a, xmmt); - __ jccb(Assembler::equal, zero); + emit_fp_ucom(masm, pt, a, xmmt); - __ movflt(dst, a); - __ jmp(done); - } - else { - __ ucomisd(a, xmmt); - __ jccb(Assembler::equal, zero); + __ jccb(Assembler::equal, zero); + movfp(masm, pt, dst, a, rt); - __ movdbl(dst, a); - __ jmp(done); - } + __ jmp(done); __ bind(zero); - if (min) + if (min) { __ vpor(dst, a, b, Assembler::AVX_128bit); - else + } else { __ vpand(dst, a, b, Assembler::AVX_128bit); + } __ jmp(done); __ bind(above); - if (single) - __ movflt(dst, min ? b : a); - else - __ movdbl(dst, min ? b : a); + movfp(masm, pt, dst, min ? b : a, rt); __ jmp(done); __ bind(nan); - if (single) { + if (pt == fp_prec_hlf) { + __ movl(rt, 0x00007e00); // Float16.NaN + __ evmovw(dst, rt); + } else if (pt == fp_prec_flt) { __ movl(rt, 0x7fc00000); // Float.NaN __ movdl(dst, rt); - } - else { + } else { __ mov64(rt, 0x7ff8000000000000L); // Double.NaN __ movdq(dst, rt); } __ jmp(done); __ bind(below); - if (single) - __ movflt(dst, min ? a : b); - else - __ movdbl(dst, min ? a : b); + movfp(masm, pt, dst, min ? a : b, rt); __ bind(done); } @@ -2605,13 +2620,8 @@ uint BoxLockNode::size(PhaseRegAlloc *ra_) const #ifndef PRODUCT void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const { - if (UseCompressedClassPointers) { - st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass"); - st->print_cr("\tcmpl rscratch1, [rax + CompiledICData::speculated_klass_offset()]\t # Inline cache check"); - } else { - st->print_cr("movq rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass"); - st->print_cr("\tcmpq rscratch1, [rax + CompiledICData::speculated_klass_offset()]\t # Inline cache check"); - } + st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass"); + st->print_cr("\tcmpl rscratch1, [rax + CompiledICData::speculated_klass_offset()]\t # Inline cache check"); st->print_cr("\tjne SharedRuntime::_ic_miss_stub"); } #endif @@ -2726,11 +2736,8 @@ bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) { return (-128 <= offset && offset <= 127); } +#ifdef ASSERT // Return whether or not this register is ever used as an argument. -// This function is used on startup to build the trampoline stubs in -// generateOptoStub. Registers not mentioned will be killed by the VM -// call in the trampoline, and arguments in those registers not be -// available to the callee. bool Matcher::can_be_java_arg(int reg) { return @@ -2750,11 +2757,7 @@ bool Matcher::can_be_java_arg(int reg) reg == XMM6_num || reg == XMM6b_num || reg == XMM7_num || reg == XMM7b_num; } - -bool Matcher::is_spillable_arg(int reg) -{ - return can_be_java_arg(reg); -} +#endif uint Matcher::int_pressure_limit() { @@ -2770,13 +2773,6 @@ uint Matcher::float_pressure_limit() return (FLOATPRESSURE == -1) ? default_float_pressure_threshold : FLOATPRESSURE; } -bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) { - // In 64 bit mode a code which use multiply when - // devisor is constant is faster than hardware - // DIV instruction (it uses MulHiL). - return false; -} - // Register for DIVI projection of divmodI const RegMask& Matcher::divI_proj_mask() { return INT_RAX_REG_mask(); @@ -3341,6 +3337,18 @@ bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) { return false; } break; + case Op_UMinReductionV: + case Op_UMaxReductionV: + if (UseAVX == 0) { + return false; + } + if (bt == T_LONG && !VM_Version::supports_avx512vl()) { + return false; + } + if (UseAVX > 2 && size_in_bits == 512 && !VM_Version::supports_avx512vl()) { + return false; + } + break; case Op_MaxV: case Op_MinV: if (UseSSE < 4 && is_integral_type(bt)) { @@ -4679,11 +4687,6 @@ frame // Compiled code's Frame Pointer frame_pointer(RSP); - // Interpreter stores its frame pointer in a register which is - // stored to the stack by I2CAdaptors. - // I2CAdaptors convert from interpreted java to compiled java. - interpreter_frame_pointer(RBP); - // Stack alignment requirement stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes) @@ -7357,146 +7360,140 @@ instruct loadAOTRCAddress(rRegP dst, immAOTRuntimeConstantsAddress con) ins_pipe(ialu_reg_fat); %} +// min = java.lang.Math.min(float a, float b) // max = java.lang.Math.max(float a, float b) -instruct maxF_reg_avx10_2(regF dst, regF a, regF b) %{ - predicate(VM_Version::supports_avx10_2()); +instruct minmaxF_reg_avx10_2(regF dst, regF a, regF b) +%{ + predicate(VM_Version::supports_avx10_2() && !VLoopReductions::is_reduction(n)); match(Set dst (MaxF a b)); - format %{ "maxF $dst, $a, $b" %} - ins_encode %{ - __ eminmaxss($dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, AVX10_2_MINMAX_MAX_COMPARE_SIGN); - %} - ins_pipe( pipe_slow ); -%} - -// max = java.lang.Math.max(float a, float b) -instruct maxF_reg(legRegF dst, legRegF a, legRegF b, legRegF tmp, legRegF atmp, legRegF btmp) %{ - predicate(!VM_Version::supports_avx10_2() && UseAVX > 0 && !VLoopReductions::is_reduction(n)); - match(Set dst (MaxF a b)); - effect(USE a, USE b, TEMP tmp, TEMP atmp, TEMP btmp); - format %{ "maxF $dst, $a, $b \t! using $tmp, $atmp and $btmp as TEMP" %} - ins_encode %{ - __ vminmax_fp(Op_MaxV, T_FLOAT, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $tmp$$XMMRegister, $atmp$$XMMRegister, $btmp$$XMMRegister, Assembler::AVX_128bit); - %} - ins_pipe( pipe_slow ); -%} - -instruct maxF_reduction_reg(legRegF dst, legRegF a, legRegF b, legRegF xtmp, rRegI rtmp, rFlagsReg cr) %{ - predicate(!VM_Version::supports_avx10_2() && UseAVX > 0 && VLoopReductions::is_reduction(n)); - match(Set dst (MaxF a b)); - effect(USE a, USE b, TEMP xtmp, TEMP rtmp, KILL cr); - - format %{ "maxF_reduction $dst, $a, $b \t!using $xtmp and $rtmp as TEMP" %} - ins_encode %{ - emit_fp_min_max(masm, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $xtmp$$XMMRegister, $rtmp$$Register, - false /*min*/, true /*single*/); - %} - ins_pipe( pipe_slow ); -%} - -// max = java.lang.Math.max(double a, double b) -instruct maxD_reg_avx10_2(regD dst, regD a, regD b) %{ - predicate(VM_Version::supports_avx10_2()); - match(Set dst (MaxD a b)); - format %{ "maxD $dst, $a, $b" %} - ins_encode %{ - __ eminmaxsd($dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, AVX10_2_MINMAX_MAX_COMPARE_SIGN); - %} - ins_pipe( pipe_slow ); -%} - -// max = java.lang.Math.max(double a, double b) -instruct maxD_reg(legRegD dst, legRegD a, legRegD b, legRegD tmp, legRegD atmp, legRegD btmp) %{ - predicate(!VM_Version::supports_avx10_2() && UseAVX > 0 && !VLoopReductions::is_reduction(n)); - match(Set dst (MaxD a b)); - effect(USE a, USE b, TEMP atmp, TEMP btmp, TEMP tmp); - format %{ "maxD $dst, $a, $b \t! using $tmp, $atmp and $btmp as TEMP" %} - ins_encode %{ - __ vminmax_fp(Op_MaxV, T_DOUBLE, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $tmp$$XMMRegister, $atmp$$XMMRegister, $btmp$$XMMRegister, Assembler::AVX_128bit); - %} - ins_pipe( pipe_slow ); -%} - -instruct maxD_reduction_reg(legRegD dst, legRegD a, legRegD b, legRegD xtmp, rRegL rtmp, rFlagsReg cr) %{ - predicate(!VM_Version::supports_avx10_2() && UseAVX > 0 && VLoopReductions::is_reduction(n)); - match(Set dst (MaxD a b)); - effect(USE a, USE b, TEMP xtmp, TEMP rtmp, KILL cr); - - format %{ "maxD_reduction $dst, $a, $b \t! using $xtmp and $rtmp as TEMP" %} - ins_encode %{ - emit_fp_min_max(masm, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $xtmp$$XMMRegister, $rtmp$$Register, - false /*min*/, false /*single*/); - %} - ins_pipe( pipe_slow ); -%} - -// max = java.lang.Math.min(float a, float b) -instruct minF_reg_avx10_2(regF dst, regF a, regF b) %{ - predicate(VM_Version::supports_avx10_2()); match(Set dst (MinF a b)); - format %{ "minF $dst, $a, $b" %} + + format %{ "minmaxF $dst, $a, $b" %} ins_encode %{ - __ eminmaxss($dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, AVX10_2_MINMAX_MIN_COMPARE_SIGN); + int opcode = this->ideal_Opcode(); + __ sminmax_fp_avx10_2(opcode, T_FLOAT, $dst$$XMMRegister, k0, $a$$XMMRegister, $b$$XMMRegister); + %} + ins_pipe( pipe_slow ); +%} + +instruct minmaxF_reduction_reg_avx10_2(regF dst, regF a, regF b, regF xtmp, rRegI rtmp, rFlagsReg cr) +%{ + predicate(VM_Version::supports_avx10_2() && VLoopReductions::is_reduction(n)); + match(Set dst (MaxF a b)); + match(Set dst (MinF a b)); + effect(USE a, USE b, TEMP xtmp, TEMP rtmp, KILL cr); + + format %{ "minmaxF_reduction $dst, $a, $b \t! using $xtmp and $rtmp as TEMP" %} + ins_encode %{ + int opcode = this->ideal_Opcode(); + bool min = (opcode == Op_MinF) ? true : false; + emit_fp_min_max(masm, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $xtmp$$XMMRegister, $rtmp$$Register, + min, fp_prec_flt /*pt*/); %} ins_pipe( pipe_slow ); %} // min = java.lang.Math.min(float a, float b) -instruct minF_reg(legRegF dst, legRegF a, legRegF b, legRegF tmp, legRegF atmp, legRegF btmp) %{ +// max = java.lang.Math.max(float a, float b) +instruct minmaxF_reg(legRegF dst, legRegF a, legRegF b, legRegF tmp, legRegF atmp, legRegF btmp) +%{ predicate(!VM_Version::supports_avx10_2() && UseAVX > 0 && !VLoopReductions::is_reduction(n)); + match(Set dst (MaxF a b)); match(Set dst (MinF a b)); effect(USE a, USE b, TEMP tmp, TEMP atmp, TEMP btmp); - format %{ "minF $dst, $a, $b \t! using $tmp, $atmp and $btmp as TEMP" %} + + format %{ "minmaxF $dst, $a, $b \t! using $tmp, $atmp and $btmp as TEMP" %} ins_encode %{ - __ vminmax_fp(Op_MinV, T_FLOAT, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $tmp$$XMMRegister, $atmp$$XMMRegister, $btmp$$XMMRegister, Assembler::AVX_128bit); + int opcode = this->ideal_Opcode(); + int param_opcode = (opcode == Op_MinF) ? Op_MinV : Op_MaxV; + __ vminmax_fp(param_opcode, T_FLOAT, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $tmp$$XMMRegister, + $atmp$$XMMRegister, $btmp$$XMMRegister, Assembler::AVX_128bit); %} ins_pipe( pipe_slow ); %} -instruct minF_reduction_reg(legRegF dst, legRegF a, legRegF b, legRegF xtmp, rRegI rtmp, rFlagsReg cr) %{ +instruct minmaxF_reduction_reg(legRegF dst, legRegF a, legRegF b, legRegF xtmp, rRegI rtmp, rFlagsReg cr) +%{ predicate(!VM_Version::supports_avx10_2() && UseAVX > 0 && VLoopReductions::is_reduction(n)); + match(Set dst (MaxF a b)); match(Set dst (MinF a b)); effect(USE a, USE b, TEMP xtmp, TEMP rtmp, KILL cr); - format %{ "minF_reduction $dst, $a, $b \t! using $xtmp and $rtmp as TEMP" %} + format %{ "minmaxF_reduction $dst, $a, $b \t!using $xtmp and $rtmp as TEMP" %} ins_encode %{ + int opcode = this->ideal_Opcode(); + bool min = (opcode == Op_MinF) ? true : false; emit_fp_min_max(masm, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $xtmp$$XMMRegister, $rtmp$$Register, - true /*min*/, true /*single*/); - %} - ins_pipe( pipe_slow ); -%} - -// max = java.lang.Math.min(double a, double b) -instruct minD_reg_avx10_2(regD dst, regD a, regD b) %{ - predicate(VM_Version::supports_avx10_2()); - match(Set dst (MinD a b)); - format %{ "minD $dst, $a, $b" %} - ins_encode %{ - __ eminmaxsd($dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, AVX10_2_MINMAX_MIN_COMPARE_SIGN); + min, fp_prec_flt /*pt*/); %} ins_pipe( pipe_slow ); %} // min = java.lang.Math.min(double a, double b) -instruct minD_reg(legRegD dst, legRegD a, legRegD b, legRegD tmp, legRegD atmp, legRegD btmp) %{ - predicate(!VM_Version::supports_avx10_2() && UseAVX > 0 && !VLoopReductions::is_reduction(n)); +// max = java.lang.Math.max(double a, double b) +instruct minmaxD_reg_avx10_2(regD dst, regD a, regD b) +%{ + predicate(VM_Version::supports_avx10_2() && !VLoopReductions::is_reduction(n)); + match(Set dst (MaxD a b)); match(Set dst (MinD a b)); - effect(USE a, USE b, TEMP tmp, TEMP atmp, TEMP btmp); - format %{ "minD $dst, $a, $b \t! using $tmp, $atmp and $btmp as TEMP" %} + + format %{ "minmaxD $dst, $a, $b" %} ins_encode %{ - __ vminmax_fp(Op_MinV, T_DOUBLE, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $tmp$$XMMRegister, $atmp$$XMMRegister, $btmp$$XMMRegister, Assembler::AVX_128bit); + int opcode = this->ideal_Opcode(); + __ sminmax_fp_avx10_2(opcode, T_DOUBLE, $dst$$XMMRegister, k0, $a$$XMMRegister, $b$$XMMRegister); %} ins_pipe( pipe_slow ); %} -instruct minD_reduction_reg(legRegD dst, legRegD a, legRegD b, legRegD xtmp, rRegL rtmp, rFlagsReg cr) %{ - predicate(!VM_Version::supports_avx10_2() && UseAVX > 0 && VLoopReductions::is_reduction(n)); +instruct minmaxD_reduction_reg_avx10_2(regD dst, regD a, regD b, regD xtmp, rRegI rtmp, rFlagsReg cr) +%{ + predicate(VM_Version::supports_avx10_2() && VLoopReductions::is_reduction(n)); + match(Set dst (MaxD a b)); match(Set dst (MinD a b)); effect(USE a, USE b, TEMP xtmp, TEMP rtmp, KILL cr); - format %{ "maxD_reduction $dst, $a, $b \t! using $xtmp and $rtmp as TEMP" %} + format %{ "minmaxD_reduction $dst, $a, $b \t! using $xtmp and $rtmp as TEMP" %} ins_encode %{ + int opcode = this->ideal_Opcode(); + bool min = (opcode == Op_MinD) ? true : false; emit_fp_min_max(masm, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $xtmp$$XMMRegister, $rtmp$$Register, - true /*min*/, false /*single*/); + min, fp_prec_dbl /*pt*/); + %} + ins_pipe( pipe_slow ); +%} + +// min = java.lang.Math.min(double a, double b) +// max = java.lang.Math.max(double a, double b) +instruct minmaxD_reg(legRegD dst, legRegD a, legRegD b, legRegD tmp, legRegD atmp, legRegD btmp) +%{ + predicate(!VM_Version::supports_avx10_2() && UseAVX > 0 && !VLoopReductions::is_reduction(n)); + match(Set dst (MaxD a b)); + match(Set dst (MinD a b)); + effect(USE a, USE b, TEMP atmp, TEMP btmp, TEMP tmp); + + format %{ "minmaxD $dst, $a, $b \t! using $tmp, $atmp and $btmp as TEMP" %} + ins_encode %{ + int opcode = this->ideal_Opcode(); + int param_opcode = (opcode == Op_MinD) ? Op_MinV : Op_MaxV; + __ vminmax_fp(param_opcode, T_DOUBLE, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $tmp$$XMMRegister, + $atmp$$XMMRegister, $btmp$$XMMRegister, Assembler::AVX_128bit); + %} + ins_pipe( pipe_slow ); +%} + +instruct minmaxD_reduction_reg(legRegD dst, legRegD a, legRegD b, legRegD xtmp, rRegL rtmp, rFlagsReg cr) +%{ + predicate(!VM_Version::supports_avx10_2() && UseAVX > 0 && VLoopReductions::is_reduction(n)); + match(Set dst (MaxD a b)); + match(Set dst (MinD a b)); + effect(USE a, USE b, TEMP xtmp, TEMP rtmp, KILL cr); + + format %{ "minmaxD_reduction $dst, $a, $b \t! using $xtmp and $rtmp as TEMP" %} + ins_encode %{ + int opcode = this->ideal_Opcode(); + bool min = (opcode == Op_MinD) ? true : false; + emit_fp_min_max(masm, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $xtmp$$XMMRegister, $rtmp$$Register, + min, fp_prec_dbl /*pt*/); %} ins_pipe( pipe_slow ); %} @@ -8852,6 +8849,21 @@ instruct membar_release_lock() ins_pipe(empty); %} +instruct membar_storeload(rFlagsReg cr) %{ + match(MemBarStoreLoad); + effect(KILL cr); + ins_cost(400); + + format %{ + $$template + $$emit$$"lock addl [rsp + #0], 0\t! membar_storeload" + %} + ins_encode %{ + __ membar(Assembler::StoreLoad); + %} + ins_pipe(pipe_slow); +%} + instruct membar_volatile(rFlagsReg cr) %{ match(MemBarVolatile); effect(KILL cr); @@ -8879,6 +8891,21 @@ instruct unnecessary_membar_volatile() ins_pipe(empty); %} +instruct membar_full(rFlagsReg cr) %{ + match(MemBarFull); + effect(KILL cr); + ins_cost(400); + + format %{ + $$template + $$emit$$"lock addl [rsp + #0], 0\t! membar_full" + %} + ins_encode %{ + __ membar(Assembler::StoreLoad); + %} + ins_pipe(pipe_slow); +%} + instruct membar_storestore() %{ match(MemBarStoreStore); match(StoreStoreFence); @@ -14376,9 +14403,9 @@ instruct cmpF_cc_regCFE(rFlagsRegUCFE cr, regF src1, regF src2) %{ match(Set cr (CmpF src1 src2)); ins_cost(100); - format %{ "vucomxss $src1, $src2" %} + format %{ "evucomxss $src1, $src2" %} ins_encode %{ - __ vucomxss($src1$$XMMRegister, $src2$$XMMRegister); + __ evucomxss($src1$$XMMRegister, $src2$$XMMRegister); %} ins_pipe(pipe_slow); %} @@ -14398,9 +14425,9 @@ instruct cmpF_cc_memCFE(rFlagsRegUCFE cr, regF src1, memory src2) %{ match(Set cr (CmpF src1 (LoadF src2))); ins_cost(100); - format %{ "vucomxss $src1, $src2" %} + format %{ "evucomxss $src1, $src2" %} ins_encode %{ - __ vucomxss($src1$$XMMRegister, $src2$$Address); + __ evucomxss($src1$$XMMRegister, $src2$$Address); %} ins_pipe(pipe_slow); %} @@ -14420,9 +14447,9 @@ instruct cmpF_cc_immCFE(rFlagsRegUCFE cr, regF src, immF con) %{ match(Set cr (CmpF src con)); ins_cost(100); - format %{ "vucomxss $src, [$constantaddress]\t# load from constant table: float=$con" %} + format %{ "evucomxss $src, [$constantaddress]\t# load from constant table: float=$con" %} ins_encode %{ - __ vucomxss($src$$XMMRegister, $constantaddress($con)); + __ evucomxss($src$$XMMRegister, $constantaddress($con)); %} ins_pipe(pipe_slow); %} @@ -14461,9 +14488,9 @@ instruct cmpD_cc_regCFE(rFlagsRegUCFE cr, regD src1, regD src2) %{ match(Set cr (CmpD src1 src2)); ins_cost(100); - format %{ "vucomxsd $src1, $src2 test" %} + format %{ "evucomxsd $src1, $src2 test" %} ins_encode %{ - __ vucomxsd($src1$$XMMRegister, $src2$$XMMRegister); + __ evucomxsd($src1$$XMMRegister, $src2$$XMMRegister); %} ins_pipe(pipe_slow); %} @@ -14483,9 +14510,9 @@ instruct cmpD_cc_memCFE(rFlagsRegUCFE cr, regD src1, memory src2) %{ match(Set cr (CmpD src1 (LoadD src2))); ins_cost(100); - format %{ "vucomxsd $src1, $src2" %} + format %{ "evucomxsd $src1, $src2" %} ins_encode %{ - __ vucomxsd($src1$$XMMRegister, $src2$$Address); + __ evucomxsd($src1$$XMMRegister, $src2$$Address); %} ins_pipe(pipe_slow); %} @@ -14504,9 +14531,9 @@ instruct cmpD_cc_immCFE(rFlagsRegUCFE cr, regD src, immD con) %{ match(Set cr (CmpD src con)); ins_cost(100); - format %{ "vucomxsd $src, [$constantaddress]\t# load from constant table: double=$con" %} + format %{ "evucomxsd $src, [$constantaddress]\t# load from constant table: double=$con" %} ins_encode %{ - __ vucomxsd($src$$XMMRegister, $constantaddress($con)); + __ evucomxsd($src$$XMMRegister, $constantaddress($con)); %} ins_pipe(pipe_slow); %} @@ -18814,7 +18841,7 @@ instruct ReplHF_reg(vec dst, regF src, rRegI rtmp) %{ format %{ "replicateHF $dst, $src \t! using $rtmp as TEMP" %} ins_encode %{ int vlen_enc = vector_length_encoding(this); - __ vmovw($rtmp$$Register, $src$$XMMRegister); + __ evmovw($rtmp$$Register, $src$$XMMRegister); __ evpbroadcastw($dst$$XMMRegister, $rtmp$$Register, vlen_enc); %} ins_pipe( pipe_slow ); @@ -19371,6 +19398,8 @@ instruct reductionI(rRegI dst, rRegI src1, legVec src2, legVec vtmp1, legVec vtm match(Set dst (XorReductionV src1 src2)); match(Set dst (MinReductionV src1 src2)); match(Set dst (MaxReductionV src1 src2)); + match(Set dst (UMinReductionV src1 src2)); + match(Set dst (UMaxReductionV src1 src2)); effect(TEMP vtmp1, TEMP vtmp2); format %{ "vector_reduction_int $dst,$src1,$src2 ; using $vtmp1, $vtmp2 as TEMP" %} ins_encode %{ @@ -19392,6 +19421,8 @@ instruct reductionL(rRegL dst, rRegL src1, legVec src2, legVec vtmp1, legVec vtm match(Set dst (XorReductionV src1 src2)); match(Set dst (MinReductionV src1 src2)); match(Set dst (MaxReductionV src1 src2)); + match(Set dst (UMinReductionV src1 src2)); + match(Set dst (UMaxReductionV src1 src2)); effect(TEMP vtmp1, TEMP vtmp2); format %{ "vector_reduction_long $dst,$src1,$src2 ; using $vtmp1, $vtmp2 as TEMP" %} ins_encode %{ @@ -19411,6 +19442,8 @@ instruct reductionL_avx512dq(rRegL dst, rRegL src1, vec src2, vec vtmp1, vec vtm match(Set dst (XorReductionV src1 src2)); match(Set dst (MinReductionV src1 src2)); match(Set dst (MaxReductionV src1 src2)); + match(Set dst (UMinReductionV src1 src2)); + match(Set dst (UMaxReductionV src1 src2)); effect(TEMP vtmp1, TEMP vtmp2); format %{ "vector_reduction_long $dst,$src1,$src2 ; using $vtmp1, $vtmp2 as TEMP" %} ins_encode %{ @@ -19639,6 +19672,8 @@ instruct reductionB(rRegI dst, rRegI src1, legVec src2, legVec vtmp1, legVec vtm match(Set dst (XorReductionV src1 src2)); match(Set dst (MinReductionV src1 src2)); match(Set dst (MaxReductionV src1 src2)); + match(Set dst (UMinReductionV src1 src2)); + match(Set dst (UMaxReductionV src1 src2)); effect(TEMP vtmp1, TEMP vtmp2); format %{ "vector_reduction_byte $dst,$src1,$src2 ; using $vtmp1, $vtmp2 as TEMP" %} ins_encode %{ @@ -19657,6 +19692,8 @@ instruct reductionB_avx512bw(rRegI dst, rRegI src1, vec src2, vec vtmp1, vec vtm match(Set dst (XorReductionV src1 src2)); match(Set dst (MinReductionV src1 src2)); match(Set dst (MaxReductionV src1 src2)); + match(Set dst (UMinReductionV src1 src2)); + match(Set dst (UMaxReductionV src1 src2)); effect(TEMP vtmp1, TEMP vtmp2); format %{ "vector_reduction_byte $dst,$src1,$src2 ; using $vtmp1, $vtmp2 as TEMP" %} ins_encode %{ @@ -19678,6 +19715,8 @@ instruct reductionS(rRegI dst, rRegI src1, legVec src2, legVec vtmp1, legVec vtm match(Set dst (XorReductionV src1 src2)); match(Set dst (MinReductionV src1 src2)); match(Set dst (MaxReductionV src1 src2)); + match(Set dst (UMinReductionV src1 src2)); + match(Set dst (UMaxReductionV src1 src2)); effect(TEMP vtmp1, TEMP vtmp2); format %{ "vector_reduction_short $dst,$src1,$src2 ; using $vtmp1, $vtmp2 as TEMP" %} ins_encode %{ @@ -20917,7 +20956,7 @@ instruct minmaxFP_reg_avx10_2(vec dst, vec a, vec b) %{ int vlen_enc = vector_length_encoding(this); int opcode = this->ideal_Opcode(); BasicType elem_bt = Matcher::vector_element_basic_type(this); - __ vminmax_fp(opcode, elem_bt, $dst$$XMMRegister, k0, $a$$XMMRegister, $b$$XMMRegister, vlen_enc); + __ vminmax_fp_avx10_2(opcode, elem_bt, $dst$$XMMRegister, k0, $a$$XMMRegister, $b$$XMMRegister, vlen_enc); %} ins_pipe( pipe_slow ); %} @@ -23933,8 +23972,12 @@ instruct vmask_gen_imm(kReg dst, immL len, rRegL temp) %{ format %{ "vector_mask_gen $len \t! vector mask generator" %} effect(TEMP temp); ins_encode %{ - __ mov64($temp$$Register, (0xFFFFFFFFFFFFFFFFUL >> (64 -$len$$constant))); - __ kmovql($dst$$KRegister, $temp$$Register); + if ($len$$constant > 0) { + __ mov64($temp$$Register, right_n_bits($len$$constant)); + __ kmovql($dst$$KRegister, $temp$$Register); + } else { + __ kxorql($dst$$KRegister, $dst$$KRegister, $dst$$KRegister); + } %} ins_pipe( pipe_slow ); %} @@ -25261,9 +25304,9 @@ instruct vector_selectfrom_twovectors_reg_evex(vec index, vec src1, vec src2) instruct reinterpretS2HF(regF dst, rRegI src) %{ match(Set dst (ReinterpretS2HF src)); - format %{ "vmovw $dst, $src" %} + format %{ "evmovw $dst, $src" %} ins_encode %{ - __ vmovw($dst$$XMMRegister, $src$$Register); + __ evmovw($dst$$XMMRegister, $src$$Register); %} ins_pipe(pipe_slow); %} @@ -25271,9 +25314,9 @@ instruct reinterpretS2HF(regF dst, rRegI src) instruct reinterpretHF2S(rRegI dst, regF src) %{ match(Set dst (ReinterpretHF2S src)); - format %{ "vmovw $dst, $src" %} + format %{ "evmovw $dst, $src" %} ins_encode %{ - __ vmovw($dst$$Register, $src$$XMMRegister); + __ evmovw($dst$$Register, $src$$XMMRegister); %} ins_pipe(pipe_slow); %} @@ -25327,10 +25370,11 @@ instruct scalar_minmax_HF_reg_avx10_2(regF dst, regF src1, regF src2) predicate(VM_Version::supports_avx10_2()); match(Set dst (MaxHF src1 src2)); match(Set dst (MinHF src1 src2)); + format %{ "scalar_min_max_fp16 $dst, $src1, $src2" %} ins_encode %{ - int function = this->ideal_Opcode() == Op_MinHF ? AVX10_2_MINMAX_MIN_COMPARE_SIGN : AVX10_2_MINMAX_MAX_COMPARE_SIGN; - __ eminmaxsh($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, function); + int opcode = this->ideal_Opcode(); + __ sminmax_fp16_avx10_2(opcode, $dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, k0); %} ins_pipe( pipe_slow ); %} @@ -25341,11 +25385,12 @@ instruct scalar_minmax_HF_reg(regF dst, regF src1, regF src2, kReg ktmp, regF xt match(Set dst (MaxHF src1 src2)); match(Set dst (MinHF src1 src2)); effect(TEMP_DEF dst, TEMP ktmp, TEMP xtmp1, TEMP xtmp2); + format %{ "scalar_min_max_fp16 $dst, $src1, $src2\t using $ktmp, $xtmp1 and $xtmp2 as TEMP" %} ins_encode %{ int opcode = this->ideal_Opcode(); - __ scalar_max_min_fp16(opcode, $dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, $ktmp$$KRegister, - $xtmp1$$XMMRegister, $xtmp2$$XMMRegister); + __ sminmax_fp16(opcode, $dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, $ktmp$$KRegister, + $xtmp1$$XMMRegister, $xtmp2$$XMMRegister); %} ins_pipe( pipe_slow ); %} @@ -25445,8 +25490,9 @@ instruct vector_minmax_HF_mem_avx10_2(vec dst, vec src1, memory src2) format %{ "vector_min_max_fp16_mem $dst, $src1, $src2" %} ins_encode %{ int vlen_enc = vector_length_encoding(this); - int function = this->ideal_Opcode() == Op_MinVHF ? AVX10_2_MINMAX_MIN_COMPARE_SIGN : AVX10_2_MINMAX_MAX_COMPARE_SIGN; - __ evminmaxph($dst$$XMMRegister, k0, $src1$$XMMRegister, $src2$$Address, true, function, vlen_enc); + int opcode = this->ideal_Opcode(); + __ vminmax_fp16_avx10_2(opcode, $dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address, + k0, vlen_enc); %} ins_pipe( pipe_slow ); %} @@ -25459,8 +25505,9 @@ instruct vector_minmax_HF_reg_avx10_2(vec dst, vec src1, vec src2) format %{ "vector_min_max_fp16 $dst, $src1, $src2" %} ins_encode %{ int vlen_enc = vector_length_encoding(this); - int function = this->ideal_Opcode() == Op_MinVHF ? AVX10_2_MINMAX_MIN_COMPARE_SIGN : AVX10_2_MINMAX_MAX_COMPARE_SIGN; - __ evminmaxph($dst$$XMMRegister, k0, $src1$$XMMRegister, $src2$$XMMRegister, true, function, vlen_enc); + int opcode = this->ideal_Opcode(); + __ vminmax_fp16_avx10_2(opcode, $dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, + k0, vlen_enc); %} ins_pipe( pipe_slow ); %} @@ -25475,8 +25522,8 @@ instruct vector_minmax_HF_reg(vec dst, vec src1, vec src2, kReg ktmp, vec xtmp1, ins_encode %{ int vlen_enc = vector_length_encoding(this); int opcode = this->ideal_Opcode(); - __ vector_max_min_fp16(opcode, $dst$$XMMRegister, $src2$$XMMRegister, $src1$$XMMRegister, $ktmp$$KRegister, - $xtmp1$$XMMRegister, $xtmp2$$XMMRegister, vlen_enc); + __ vminmax_fp16(opcode, $dst$$XMMRegister, $src2$$XMMRegister, $src1$$XMMRegister, $ktmp$$KRegister, + $xtmp1$$XMMRegister, $xtmp2$$XMMRegister, vlen_enc); %} ins_pipe( pipe_slow ); %} diff --git a/src/hotspot/cpu/zero/stubDeclarations_zero.hpp b/src/hotspot/cpu/zero/stubDeclarations_zero.hpp index 2357bbb5169..9abe313b3a7 100644 --- a/src/hotspot/cpu/zero/stubDeclarations_zero.hpp +++ b/src/hotspot/cpu/zero/stubDeclarations_zero.hpp @@ -29,35 +29,40 @@ #define STUBGEN_PREUNIVERSE_BLOBS_ARCH_DO(do_stub, \ do_arch_blob, \ do_arch_entry, \ - do_arch_entry_init) \ + do_arch_entry_init, \ + do_arch_entry_array) \ do_arch_blob(preuniverse, 0) \ #define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \ do_arch_blob, \ do_arch_entry, \ - do_arch_entry_init) \ + do_arch_entry_init, \ + do_arch_entry_array) \ do_arch_blob(initial, 0) \ #define STUBGEN_CONTINUATION_BLOBS_ARCH_DO(do_stub, \ do_arch_blob, \ do_arch_entry, \ - do_arch_entry_init) \ + do_arch_entry_init, \ + do_arch_entry_array) \ do_arch_blob(continuation, 0) \ #define STUBGEN_COMPILER_BLOBS_ARCH_DO(do_stub, \ do_arch_blob, \ do_arch_entry, \ - do_arch_entry_init) \ + do_arch_entry_init, \ + do_arch_entry_array) \ do_arch_blob(compiler, 0) \ #define STUBGEN_FINAL_BLOBS_ARCH_DO(do_stub, \ do_arch_blob, \ do_arch_entry, \ - do_arch_entry_init) \ + do_arch_entry_init, \ + do_arch_entry_array) \ do_arch_blob(final, 0) \ diff --git a/src/hotspot/cpu/zero/stubGenerator_zero.cpp b/src/hotspot/cpu/zero/stubGenerator_zero.cpp index 08cb173b507..569a2fa8ca9 100644 --- a/src/hotspot/cpu/zero/stubGenerator_zero.cpp +++ b/src/hotspot/cpu/zero/stubGenerator_zero.cpp @@ -213,7 +213,7 @@ class StubGenerator: public StubCodeGenerator { } public: - StubGenerator(CodeBuffer* code, BlobId blob_id) : StubCodeGenerator(code, blob_id) { + StubGenerator(CodeBuffer* code, BlobId blob_id, AOTStubData *stub_data) : StubCodeGenerator(code, blob_id, stub_data) { switch(blob_id) { case BlobId::stubgen_preuniverse_id: generate_preuniverse_stubs(); @@ -237,8 +237,8 @@ class StubGenerator: public StubCodeGenerator { } }; -void StubGenerator_generate(CodeBuffer* code, BlobId blob_id) { - StubGenerator g(code, blob_id); +void StubGenerator_generate(CodeBuffer* code, BlobId blob_id, AOTStubData *stub_data) { + StubGenerator g(code, blob_id, stub_data); } EntryFrame *EntryFrame::build(const intptr_t* parameters, diff --git a/src/hotspot/cpu/zero/stubRoutines_zero.cpp b/src/hotspot/cpu/zero/stubRoutines_zero.cpp index 9b53f09be5d..196907b061f 100644 --- a/src/hotspot/cpu/zero/stubRoutines_zero.cpp +++ b/src/hotspot/cpu/zero/stubRoutines_zero.cpp @@ -30,3 +30,9 @@ address StubRoutines::crc_table_addr() { ShouldNotCallThis(); return nullptr; } address StubRoutines::crc32c_table_addr() { ShouldNotCallThis(); return nullptr; } + +#if INCLUDE_CDS +// nothing to do for zero +void StubRoutines::init_AOTAddressTable() { +} +#endif // INCLUDE_CDS diff --git a/src/hotspot/os/aix/globals_aix.hpp b/src/hotspot/os/aix/globals_aix.hpp index 473d7759063..adc189666ef 100644 --- a/src/hotspot/os/aix/globals_aix.hpp +++ b/src/hotspot/os/aix/globals_aix.hpp @@ -37,16 +37,6 @@ range, \ constraint) \ \ - /* Whether to allow the VM to run if EXTSHM=ON. EXTSHM is an environment */ \ - /* variable used on AIX to activate certain hacks which allow more shm segments */\ - /* for 32bit processes. For 64bit processes, it is pointless and may have */ \ - /* harmful side effects (e.g. for some reasonn prevents allocation of 64k pages */\ - /* via shmctl). */ \ - /* Per default we quit with an error if that variable is found; for certain */ \ - /* customer scenarios, we may want to be able to run despite that variable. */ \ - product(bool, AllowExtshm, false, DIAGNOSTIC, \ - "Allow VM to run with EXTSHM=ON.") \ - \ /* Maximum expected size of the data segment. That correlates with the */ \ /* maximum C Heap consumption we expect. */ \ /* We need to leave "breathing space" for the data segment when */ \ diff --git a/src/hotspot/os/aix/os_aix.cpp b/src/hotspot/os/aix/os_aix.cpp index af743dc7484..3cad24d388c 100644 --- a/src/hotspot/os/aix/os_aix.cpp +++ b/src/hotspot/os/aix/os_aix.cpp @@ -126,7 +126,6 @@ int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t); // for multipage initialization error analysis (in 'g_multipage_error') #define ERROR_MP_OS_TOO_OLD 100 -#define ERROR_MP_EXTSHM_ACTIVE 101 #define ERROR_MP_VMGETINFO_FAILED 102 #define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103 @@ -178,9 +177,6 @@ uint32_t os::Aix::_os_version = 0; // -1 = uninitialized, 0 - no, 1 - yes int os::Aix::_xpg_sus_mode = -1; -// -1 = uninitialized, 0 - no, 1 - yes -int os::Aix::_extshm = -1; - //////////////////////////////////////////////////////////////////////////////// // local variables @@ -1195,13 +1191,6 @@ void os::print_memory_info(outputStream* st) { const char* const ldr_cntrl = ::getenv("LDR_CNTRL"); st->print_cr(" LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : ""); - // Print out EXTSHM because it is an unsupported setting. - const char* const extshm = ::getenv("EXTSHM"); - st->print_cr(" EXTSHM=%s.", extshm ? extshm : ""); - if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) { - st->print_cr(" *** Unsupported! Please remove EXTSHM from your environment! ***"); - } - // Print out AIXTHREAD_GUARDPAGES because it affects the size of pthread stacks. const char* const aixthread_guardpages = ::getenv("AIXTHREAD_GUARDPAGES"); st->print_cr(" AIXTHREAD_GUARDPAGES=%s.", @@ -2133,8 +2122,6 @@ void os::init(void) { // datapsize = 64k. Data segment, thread stacks are 64k paged. // This normally means that we can allocate 64k pages dynamically. - // (There is one special case where this may be false: EXTSHM=on. - // but we decided to not support that mode). assert0(g_multipage_support.can_use_64K_pages || g_multipage_support.can_use_64K_mmap_pages); set_page_size(64*K); @@ -2543,28 +2530,13 @@ void os::Aix::initialize_os_info() { void os::Aix::scan_environment() { char* p; - int rc; - // Warn explicitly if EXTSHM=ON is used. That switch changes how - // System V shared memory behaves. One effect is that page size of - // shared memory cannot be change dynamically, effectivly preventing - // large pages from working. - // This switch was needed on AIX 32bit, but on AIX 64bit the general - // recommendation is (in OSS notes) to switch it off. + // Reject EXTSHM=ON. That switch changes how System V shared memory behaves + // and prevents allocation of 64k pages for the heap. p = ::getenv("EXTSHM"); trcVerbose("EXTSHM=%s.", p ? p : ""); if (p && strcasecmp(p, "ON") == 0) { - _extshm = 1; - log_warning(os)("*** Unsupported mode! Please remove EXTSHM from your environment! ***"); - if (!AllowExtshm) { - // We allow under certain conditions the user to continue. However, we want this - // to be a fatal error by default. On certain AIX systems, leaving EXTSHM=ON means - // that the VM is not able to allocate 64k pages for the heap. - // We do not want to run with reduced performance. - vm_exit_during_initialization("EXTSHM is ON. Please remove EXTSHM from your environment."); - } - } else { - _extshm = 0; + vm_exit_during_initialization("EXTSHM is ON. Please remove EXTSHM from your environment."); } // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs. @@ -2695,3 +2667,7 @@ void os::print_memory_mappings(char* addr, size_t bytes, outputStream* st) {} void os::jfr_report_memory_info() {} #endif // INCLUDE_JFR + +void os::print_open_file_descriptors(outputStream* st) { + // File descriptor counting not implemented on AIX +} diff --git a/src/hotspot/os/aix/os_aix.hpp b/src/hotspot/os/aix/os_aix.hpp index a30e2077fc2..e21d2cf81bb 100644 --- a/src/hotspot/os/aix/os_aix.hpp +++ b/src/hotspot/os/aix/os_aix.hpp @@ -49,11 +49,6 @@ class os::Aix { // 1 - SPEC1170 requested (XPG_SUS_ENV is ON) static int _xpg_sus_mode; - // -1 = uninitialized, - // 0 - EXTSHM=OFF or not set - // 1 - EXTSHM=ON - static int _extshm; - static bool available_memory(physical_memory_size_type& value); static bool free_memory(physical_memory_size_type& value); static physical_memory_size_type physical_memory() { return _physical_memory; } @@ -111,12 +106,6 @@ class os::Aix { return _xpg_sus_mode; } - // Returns true if EXTSHM=ON. - static bool extshm() { - assert(_extshm != -1, "not initialized"); - return _extshm; - } - // result struct for get_meminfo() struct meminfo_t { diff --git a/src/hotspot/os/aix/porting_aix.cpp b/src/hotspot/os/aix/porting_aix.cpp index b3f878fbfdd..f0527136d90 100644 --- a/src/hotspot/os/aix/porting_aix.cpp +++ b/src/hotspot/os/aix/porting_aix.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2024 SAP SE. All rights reserved. + * Copyright (c) 2012, 2026 SAP SE. All rights reserved. * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -426,6 +426,10 @@ int dladdr(void* addr, Dl_info* info) { } +int JVM_dladdr(void* addr, Dl_info* info) { + return dladdr(addr, info); +} + ///////////////////////////////////////////////////////////////////////////// // Native callstack dumping diff --git a/src/hotspot/os/aix/porting_aix.hpp b/src/hotspot/os/aix/porting_aix.hpp index a1a22d81471..0bd71079d0a 100644 --- a/src/hotspot/os/aix/porting_aix.hpp +++ b/src/hotspot/os/aix/porting_aix.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2024 SAP SE. All rights reserved. + * Copyright (c) 2012, 2026 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,25 +37,9 @@ // (see http://linux.die.net/man/3/dladdr) // dladdr(3) is not POSIX but a GNU extension, and is not available on AIX. // -// Differences between AIX dladdr and Linux dladdr: -// -// 1) Dl_info.dli_fbase: can never work, is disabled. -// A loaded image on AIX is divided in multiple segments, at least two -// (text and data) but potentially also far more. This is because the loader may -// load each member into an own segment, as for instance happens with the libC.a -// 2) Dl_info.dli_sname: This only works for code symbols (functions); for data, a -// zero-length string is returned (""). -// 3) Dl_info.dli_saddr: For code, this will return the entry point of the function, -// not the function descriptor. -typedef struct { - const char *dli_fname; // file path of loaded library - // void *dli_fbase; - const char *dli_sname; // symbol name; "" if not known - void *dli_saddr; // address of *entry* of function; not function descriptor; -} Dl_info; +#include "dl_info.h" -// Note: we export this to use it inside J2se too #ifdef __cplusplus extern "C" #endif diff --git a/src/hotspot/os/bsd/os_bsd.cpp b/src/hotspot/os/bsd/os_bsd.cpp index 29ebe65e0db..a4d9a2197a5 100644 --- a/src/hotspot/os/bsd/os_bsd.cpp +++ b/src/hotspot/os/bsd/os_bsd.cpp @@ -76,6 +76,7 @@ # include # include # include +# include # include # include # include @@ -102,6 +103,7 @@ #endif #ifdef __APPLE__ + #include #include #include #endif @@ -2596,3 +2598,45 @@ bool os::pd_dll_unload(void* libhandle, char* ebuf, int ebuflen) { return res; } // end: os::pd_dll_unload() + +void os::print_open_file_descriptors(outputStream* st) { +#ifdef __APPLE__ + char buf[1024 * sizeof(struct proc_fdinfo)]; + os::Bsd::print_open_file_descriptors(st, buf, sizeof(buf)); +#else + st->print_cr("Open File Descriptors: unknown"); +#endif +} + +void os::Bsd::print_open_file_descriptors(outputStream* st, char* buf, size_t buflen) { +#ifdef __APPLE__ + pid_t my_pid; + + // ensure the scratch buffer is big enough for at least one FD info struct + precond(buflen >= sizeof(struct proc_fdinfo)); + kern_return_t kres = pid_for_task(mach_task_self(), &my_pid); + if (kres != KERN_SUCCESS) { + st->print_cr("Open File Descriptors: unknown"); + return; + } + size_t max_fds = buflen / sizeof(struct proc_fdinfo); + struct proc_fdinfo* fds = reinterpret_cast(buf); + + // fill our buffer with FD info, up to the available buffer size + int res = proc_pidinfo(my_pid, PROC_PIDLISTFDS, 0, fds, max_fds * sizeof(struct proc_fdinfo)); + if (res <= 0) { + st->print_cr("Open File Descriptors: unknown"); + return; + } + + // print lower threshold if count exceeds buffer size + int nfiles = res / sizeof(struct proc_fdinfo); + if ((size_t)nfiles >= max_fds) { + st->print_cr("Open File Descriptors: > %zu", max_fds); + return; + } + st->print_cr("Open File Descriptors: %d", nfiles); +#else + st->print_cr("Open File Descriptors: unknown"); +#endif +} diff --git a/src/hotspot/os/bsd/os_bsd.hpp b/src/hotspot/os/bsd/os_bsd.hpp index da73211b9a7..e87a680b2d2 100644 --- a/src/hotspot/os/bsd/os_bsd.hpp +++ b/src/hotspot/os/bsd/os_bsd.hpp @@ -123,6 +123,8 @@ class os::Bsd { static int get_node_by_cpu(int cpu_id); static void print_uptime_info(outputStream* st); + static void print_open_file_descriptors(outputStream* st, char* buf, size_t buflen); + static void print_open_file_descriptors(outputStream* st); }; #endif // OS_BSD_OS_BSD_HPP diff --git a/src/hotspot/os/bsd/semaphore_bsd.cpp b/src/hotspot/os/bsd/semaphore_bsd.cpp index 827c955677e..c35712ff2da 100644 --- a/src/hotspot/os/bsd/semaphore_bsd.cpp +++ b/src/hotspot/os/bsd/semaphore_bsd.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -81,27 +81,37 @@ bool OSXSemaphore::timedwait(int64_t millis) { // kernel semaphores take a relative timeout mach_timespec_t waitspec; - int secs = millis / MILLIUNITS; - int nsecs = millis_to_nanos(millis % MILLIUNITS); - waitspec.tv_sec = secs; - waitspec.tv_nsec = nsecs; + int64_t starttime; + const bool is_trywait = millis == 0; - int64_t starttime = os::javaTimeNanos(); + if (!is_trywait) { + int secs = millis / MILLIUNITS; + int nsecs = millis_to_nanos(millis % MILLIUNITS); + waitspec.tv_sec = secs; + waitspec.tv_nsec = nsecs; + + starttime = os::javaTimeNanos(); + } else { + waitspec.tv_sec = 0; + waitspec.tv_nsec = 0; + } kr = semaphore_timedwait(_semaphore, waitspec); while (kr == KERN_ABORTED) { - // reduce the timeout and try again - int64_t totalwait = millis_to_nanos(millis); - int64_t current = os::javaTimeNanos(); - int64_t passedtime = current - starttime; + if (!is_trywait) { + // reduce the timeout and try again + int64_t totalwait = millis_to_nanos(millis); + int64_t current = os::javaTimeNanos(); + int64_t passedtime = current - starttime; - if (passedtime >= totalwait) { - waitspec.tv_sec = 0; - waitspec.tv_nsec = 0; - } else { - int64_t waittime = totalwait - (current - starttime); - waitspec.tv_sec = waittime / NANOSECS_PER_SEC; - waitspec.tv_nsec = waittime % NANOSECS_PER_SEC; + if (passedtime >= totalwait) { + waitspec.tv_sec = 0; + waitspec.tv_nsec = 0; + } else { + int64_t waittime = totalwait - (current - starttime); + waitspec.tv_sec = waittime / NANOSECS_PER_SEC; + waitspec.tv_nsec = waittime % NANOSECS_PER_SEC; + } } kr = semaphore_timedwait(_semaphore, waitspec); diff --git a/src/hotspot/os/linux/cgroupSubsystem_linux.cpp b/src/hotspot/os/linux/cgroupSubsystem_linux.cpp index e49d070890e..4a2d75ecdf3 100644 --- a/src/hotspot/os/linux/cgroupSubsystem_linux.cpp +++ b/src/hotspot/os/linux/cgroupSubsystem_linux.cpp @@ -28,7 +28,6 @@ #include "cgroupV2Subsystem_linux.hpp" #include "logging/log.hpp" #include "memory/allocation.hpp" -#include "os_linux.hpp" #include "runtime/globals.hpp" #include "runtime/os.hpp" #include "utilities/globalDefinitions.hpp" @@ -41,6 +40,8 @@ // Inlined from for portability. #ifndef CGROUP2_SUPER_MAGIC # define CGROUP2_SUPER_MAGIC 0x63677270 +#else + STATIC_ASSERT(CGROUP2_SUPER_MAGIC == 0x63677270); #endif // controller names have to match the *_IDX indices @@ -605,6 +606,11 @@ void CgroupSubsystemFactory::cleanup(CgroupInfo* cg_infos) { } } +void CgroupSubsystem::adjust_controllers(physical_memory_size_type upper_mem_bound, int upper_cpu_bound) { + CgroupUtil::adjust_controller(memory_controller()->controller(), upper_mem_bound); + CgroupUtil::adjust_controller(cpu_controller()->controller(), upper_cpu_bound); +} + /* active_processor_count * * Calculate an appropriate number of active processors for the @@ -631,7 +637,7 @@ void CgroupSubsystemFactory::cleanup(CgroupInfo* cg_infos) { * return: * true if there were no errors. false otherwise. */ -bool CgroupSubsystem::active_processor_count(double& value) { +bool CgroupSubsystem::active_processor_count(int (*cpu_bound_func)(), double& value) { // We use a cache with a timeout to avoid performing expensive // computations in the event this function is called frequently. // [See 8227006]. @@ -643,7 +649,7 @@ bool CgroupSubsystem::active_processor_count(double& value) { return true; } - int cpu_count = os::Linux::active_processor_count(); + int cpu_count = cpu_bound_func(); double result = -1; if (!CgroupUtil::processor_count(contrl->controller(), cpu_count, result)) { return false; diff --git a/src/hotspot/os/linux/cgroupSubsystem_linux.hpp b/src/hotspot/os/linux/cgroupSubsystem_linux.hpp index d083a9985c2..adde37e1c77 100644 --- a/src/hotspot/os/linux/cgroupSubsystem_linux.hpp +++ b/src/hotspot/os/linux/cgroupSubsystem_linux.hpp @@ -278,7 +278,7 @@ class CgroupMemoryController: public CHeapObj { class CgroupSubsystem: public CHeapObj { public: bool memory_limit_in_bytes(physical_memory_size_type upper_bound, physical_memory_size_type& value); - bool active_processor_count(double& value); + bool active_processor_count(int (*cpu_bound_func)(), double& value); virtual bool pids_max(uint64_t& value) = 0; virtual bool pids_current(uint64_t& value) = 0; @@ -291,6 +291,8 @@ class CgroupSubsystem: public CHeapObj { virtual CachingCgroupController* cpu_controller() = 0; virtual CgroupCpuacctController* cpuacct_controller() = 0; + void adjust_controllers(physical_memory_size_type upper_mem_bound, int upper_cpu_bound); + bool cpu_quota(int& value); bool cpu_period(int& value); bool cpu_shares(int& value); diff --git a/src/hotspot/os/linux/cgroupUtil_linux.cpp b/src/hotspot/os/linux/cgroupUtil_linux.cpp index 570b335940b..f166f6cd5e4 100644 --- a/src/hotspot/os/linux/cgroupUtil_linux.cpp +++ b/src/hotspot/os/linux/cgroupUtil_linux.cpp @@ -24,7 +24,6 @@ */ #include "cgroupUtil_linux.hpp" -#include "os_linux.hpp" bool CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int upper_bound, double& value) { assert(upper_bound > 0, "upper bound of cpus must be positive"); @@ -82,7 +81,7 @@ double CgroupUtil::get_updated_cpu_limit(CgroupCpuController* cpu, return lowest; } -void CgroupUtil::adjust_controller(CgroupMemoryController* mem) { +void CgroupUtil::adjust_controller(CgroupMemoryController* mem, physical_memory_size_type upper_bound) { assert(mem->cgroup_path() != nullptr, "invariant"); if (strstr(mem->cgroup_path(), "../") != nullptr) { log_warning(os, container)("Cgroup memory controller path at '%s' seems to have moved " @@ -100,17 +99,16 @@ void CgroupUtil::adjust_controller(CgroupMemoryController* mem) { char* cg_path = os::strdup(orig); char* last_slash; assert(cg_path[0] == '/', "cgroup path must start with '/'"); - physical_memory_size_type phys_mem = os::Linux::physical_memory(); char* limit_cg_path = nullptr; physical_memory_size_type limit = value_unlimited; - physical_memory_size_type lowest_limit = phys_mem; - lowest_limit = get_updated_mem_limit(mem, lowest_limit, phys_mem); - physical_memory_size_type orig_limit = lowest_limit != phys_mem ? lowest_limit : phys_mem; + physical_memory_size_type lowest_limit = upper_bound; + lowest_limit = get_updated_mem_limit(mem, lowest_limit, upper_bound); + physical_memory_size_type orig_limit = lowest_limit != upper_bound ? lowest_limit : upper_bound; while ((last_slash = strrchr(cg_path, '/')) != cg_path) { *last_slash = '\0'; // strip path // update to shortened path and try again mem->set_subsystem_path(cg_path); - limit = get_updated_mem_limit(mem, lowest_limit, phys_mem); + limit = get_updated_mem_limit(mem, lowest_limit, upper_bound); if (limit < lowest_limit) { lowest_limit = limit; os::free(limit_cg_path); // handles nullptr @@ -119,13 +117,13 @@ void CgroupUtil::adjust_controller(CgroupMemoryController* mem) { } // need to check limit at mount point mem->set_subsystem_path("/"); - limit = get_updated_mem_limit(mem, lowest_limit, phys_mem); + limit = get_updated_mem_limit(mem, lowest_limit, upper_bound); if (limit < lowest_limit) { lowest_limit = limit; os::free(limit_cg_path); // handles nullptr limit_cg_path = os::strdup("/"); } - assert(lowest_limit <= phys_mem, "limit must not exceed host memory"); + assert(lowest_limit <= upper_bound, "limit must not exceed upper bound"); if (lowest_limit != orig_limit) { // we've found a lower limit anywhere in the hierarchy, // set the path to the limit path @@ -147,7 +145,7 @@ void CgroupUtil::adjust_controller(CgroupMemoryController* mem) { os::free(limit_cg_path); } -void CgroupUtil::adjust_controller(CgroupCpuController* cpu) { +void CgroupUtil::adjust_controller(CgroupCpuController* cpu, int upper_bound) { assert(cpu->cgroup_path() != nullptr, "invariant"); if (strstr(cpu->cgroup_path(), "../") != nullptr) { log_warning(os, container)("Cgroup cpu controller path at '%s' seems to have moved " @@ -165,17 +163,16 @@ void CgroupUtil::adjust_controller(CgroupCpuController* cpu) { char* cg_path = os::strdup(orig); char* last_slash; assert(cg_path[0] == '/', "cgroup path must start with '/'"); - int host_cpus = os::Linux::active_processor_count(); - int lowest_limit = host_cpus; - double cpus = get_updated_cpu_limit(cpu, lowest_limit, host_cpus); - int orig_limit = lowest_limit != host_cpus ? lowest_limit : host_cpus; + int lowest_limit = upper_bound; + double cpus = get_updated_cpu_limit(cpu, lowest_limit, upper_bound); + int orig_limit = lowest_limit != upper_bound ? lowest_limit : upper_bound; char* limit_cg_path = nullptr; while ((last_slash = strrchr(cg_path, '/')) != cg_path) { *last_slash = '\0'; // strip path // update to shortened path and try again cpu->set_subsystem_path(cg_path); - cpus = get_updated_cpu_limit(cpu, lowest_limit, host_cpus); - if (cpus != host_cpus && cpus < lowest_limit) { + cpus = get_updated_cpu_limit(cpu, lowest_limit, upper_bound); + if (cpus != upper_bound && cpus < lowest_limit) { lowest_limit = cpus; os::free(limit_cg_path); // handles nullptr limit_cg_path = os::strdup(cg_path); @@ -183,8 +180,8 @@ void CgroupUtil::adjust_controller(CgroupCpuController* cpu) { } // need to check limit at mount point cpu->set_subsystem_path("/"); - cpus = get_updated_cpu_limit(cpu, lowest_limit, host_cpus); - if (cpus != host_cpus && cpus < lowest_limit) { + cpus = get_updated_cpu_limit(cpu, lowest_limit, upper_bound); + if (cpus != upper_bound && cpus < lowest_limit) { lowest_limit = cpus; os::free(limit_cg_path); // handles nullptr limit_cg_path = os::strdup(cg_path); diff --git a/src/hotspot/os/linux/cgroupUtil_linux.hpp b/src/hotspot/os/linux/cgroupUtil_linux.hpp index 1fd2a7d872b..68585c22c2d 100644 --- a/src/hotspot/os/linux/cgroupUtil_linux.hpp +++ b/src/hotspot/os/linux/cgroupUtil_linux.hpp @@ -35,10 +35,10 @@ class CgroupUtil: AllStatic { static bool processor_count(CgroupCpuController* cpu, int upper_bound, double& value); // Given a memory controller, adjust its path to a point in the hierarchy // that represents the closest memory limit. - static void adjust_controller(CgroupMemoryController* m); + static void adjust_controller(CgroupMemoryController* m, physical_memory_size_type upper_bound); // Given a cpu controller, adjust its path to a point in the hierarchy // that represents the closest cpu limit. - static void adjust_controller(CgroupCpuController* c); + static void adjust_controller(CgroupCpuController* c, int upper_bound); private: static physical_memory_size_type get_updated_mem_limit(CgroupMemoryController* m, physical_memory_size_type lowest, diff --git a/src/hotspot/os/linux/cgroupV1Subsystem_linux.cpp b/src/hotspot/os/linux/cgroupV1Subsystem_linux.cpp index c8f5a290c99..e42b7a13391 100644 --- a/src/hotspot/os/linux/cgroupV1Subsystem_linux.cpp +++ b/src/hotspot/os/linux/cgroupV1Subsystem_linux.cpp @@ -326,8 +326,6 @@ CgroupV1Subsystem::CgroupV1Subsystem(CgroupV1Controller* cpuset, _cpuset(cpuset), _cpuacct(cpuacct), _pids(pids) { - CgroupUtil::adjust_controller(memory); - CgroupUtil::adjust_controller(cpu); _memory = new CachingCgroupController(memory); _cpu = new CachingCgroupController(cpu); } diff --git a/src/hotspot/os/linux/cgroupV2Subsystem_linux.cpp b/src/hotspot/os/linux/cgroupV2Subsystem_linux.cpp index 30e1affc646..edd80bb7427 100644 --- a/src/hotspot/os/linux/cgroupV2Subsystem_linux.cpp +++ b/src/hotspot/os/linux/cgroupV2Subsystem_linux.cpp @@ -154,8 +154,6 @@ CgroupV2Subsystem::CgroupV2Subsystem(CgroupV2MemoryController * memory, CgroupV2CpuacctController* cpuacct, CgroupV2Controller unified) : _unified(unified) { - CgroupUtil::adjust_controller(memory); - CgroupUtil::adjust_controller(cpu); _memory = new CachingCgroupController(memory); _cpu = new CachingCgroupController(cpu); _cpuacct = cpuacct; diff --git a/src/hotspot/os/linux/hugepages.cpp b/src/hotspot/os/linux/hugepages.cpp index 5472c093d3f..b065f7b1496 100644 --- a/src/hotspot/os/linux/hugepages.cpp +++ b/src/hotspot/os/linux/hugepages.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011, 2024, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -35,11 +35,16 @@ #include ExplicitHugePageSupport::ExplicitHugePageSupport() : - _initialized(false), _pagesizes(), _default_hugepage_size(SIZE_MAX), _inconsistent(false) {} + _initialized{false}, _os_supported{}, _pre_allocated{}, _default_hugepage_size{0}, _inconsistent{false} {} -os::PageSizes ExplicitHugePageSupport::pagesizes() const { +os::PageSizes ExplicitHugePageSupport::os_supported() const { assert(_initialized, "Not initialized"); - return _pagesizes; + return _os_supported; +} + +os::PageSizes ExplicitHugePageSupport::pre_allocated() const { + assert(_initialized, "Not initialized"); + return _pre_allocated; } size_t ExplicitHugePageSupport::default_hugepage_size() const { @@ -63,7 +68,7 @@ static size_t scan_default_hugepagesize() { // format has been changed), we'll set largest page size to 0 FILE *fp = os::fopen("/proc/meminfo", "r"); - if (fp) { + if (fp != nullptr) { while (!feof(fp)) { int x = 0; char buf[16]; @@ -76,7 +81,7 @@ static size_t scan_default_hugepagesize() { // skip to next line for (;;) { int ch = fgetc(fp); - if (ch == EOF || ch == (int)'\n') break; + if (ch == EOF || ch == '\n') break; } } } @@ -129,10 +134,24 @@ static os::PageSizes scan_hugepages() { return pagesizes; } +static os::PageSizes filter_pre_allocated_hugepages(os::PageSizes pagesizes) { + os::PageSizes pre_allocated{}; + char filename[PATH_MAX]; + for (size_t ps = pagesizes.smallest(); ps != 0; ps = pagesizes.next_larger(ps)) { + os::snprintf_checked(filename, sizeof(filename), "%s/hugepages-%zukB/nr_hugepages", sys_hugepages, ps / K); + size_t pages; + bool read_success = read_number_file(filename, &pages); + if (read_success && pages > 0) { + pre_allocated.add(ps); + } + } + return pre_allocated; +} + void ExplicitHugePageSupport::print_on(outputStream* os) { if (_initialized) { os->print_cr("Explicit hugepage support:"); - for (size_t s = _pagesizes.smallest(); s != 0; s = _pagesizes.next_larger(s)) { + for (size_t s = _os_supported.smallest(); s != 0; s = _os_supported.next_larger(s)) { os->print_cr(" hugepage size: " EXACTFMT, EXACTFMTARGS(s)); } os->print_cr(" default hugepage size: " EXACTFMT, EXACTFMTARGS(_default_hugepage_size)); @@ -147,14 +166,15 @@ void ExplicitHugePageSupport::print_on(outputStream* os) { void ExplicitHugePageSupport::scan_os() { _default_hugepage_size = scan_default_hugepagesize(); if (_default_hugepage_size > 0) { - _pagesizes = scan_hugepages(); + _os_supported = scan_hugepages(); + _pre_allocated = filter_pre_allocated_hugepages(_os_supported); // See https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt: /proc/meminfo should match // /sys/kernel/mm/hugepages/hugepages-xxxx. However, we may run on a broken kernel (e.g. on WSL) // that only exposes /proc/meminfo but not /sys/kernel/mm/hugepages. In that case, we are not // sure about the state of hugepage support by the kernel, so we won't use explicit hugepages. - if (!_pagesizes.contains(_default_hugepage_size)) { + if (!_os_supported.contains(_default_hugepage_size)) { log_info(pagesize)("Unexpected configuration: default pagesize (%zu) " - "has no associated directory in /sys/kernel/mm/hugepages..", _default_hugepage_size); + "has no associated directory in /sys/kernel/mm/hugepages.", _default_hugepage_size); _inconsistent = true; } } @@ -167,7 +187,7 @@ void ExplicitHugePageSupport::scan_os() { } THPSupport::THPSupport() : - _initialized(false), _mode(THPMode::never), _pagesize(SIZE_MAX) {} + _initialized{false}, _mode{THPMode::never}, _pagesize{0} {} THPMode THPSupport::mode() const { @@ -201,7 +221,6 @@ void THPSupport::scan_os() { } // Scan large page size for THP from hpage_pmd_size - _pagesize = 0; if (read_number_file("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size", &_pagesize)) { assert(_pagesize > 0, "Expected"); } diff --git a/src/hotspot/os/linux/hugepages.hpp b/src/hotspot/os/linux/hugepages.hpp index efd27c55fd6..5a9767b4ff8 100644 --- a/src/hotspot/os/linux/hugepages.hpp +++ b/src/hotspot/os/linux/hugepages.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011, 2024, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -45,7 +45,10 @@ class ExplicitHugePageSupport { // All supported hugepage sizes (sizes for which entries exist // in /sys/kernel/mm/hugepages/hugepage-xxx) - os::PageSizes _pagesizes; + os::PageSizes _os_supported; + + // Above pages filtered for where the contents of file nr_hugepages was larger than zero + os::PageSizes _pre_allocated; // Contains the default hugepage. The "default hugepage size" is the one that // - is marked in /proc/meminfo as "Hugepagesize" @@ -60,7 +63,8 @@ public: void scan_os(); - os::PageSizes pagesizes() const; + os::PageSizes os_supported() const; + os::PageSizes pre_allocated() const; size_t default_hugepage_size() const; void print_on(outputStream* os); diff --git a/src/hotspot/os/linux/osContainer_linux.cpp b/src/hotspot/os/linux/osContainer_linux.cpp index b46263efd99..da2cbf381e6 100644 --- a/src/hotspot/os/linux/osContainer_linux.cpp +++ b/src/hotspot/os/linux/osContainer_linux.cpp @@ -59,6 +59,11 @@ void OSContainer::init() { if (cgroup_subsystem == nullptr) { return; // Required subsystem files not found or other error } + // Adjust controller paths once subsystem is initialized + physical_memory_size_type phys_mem = os::Linux::physical_memory(); + int host_cpus = os::Linux::active_processor_count(); + cgroup_subsystem->adjust_controllers(phys_mem, host_cpus); + /* * In order to avoid a false positive on is_containerized() on * Linux systems outside a container *and* to ensure compatibility @@ -252,7 +257,7 @@ char * OSContainer::cpu_cpuset_memory_nodes() { bool OSContainer::active_processor_count(double& value) { assert(cgroup_subsystem != nullptr, "cgroup subsystem not available"); - return cgroup_subsystem->active_processor_count(value); + return cgroup_subsystem->active_processor_count(&os::Linux::active_processor_count, value); } bool OSContainer::cpu_quota(int& value) { diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp index 9c2fbab7535..a87c0ab33fa 100644 --- a/src/hotspot/os/linux/os_linux.cpp +++ b/src/hotspot/os/linux/os_linux.cpp @@ -83,6 +83,7 @@ #endif # include +# include # include # include # include @@ -113,6 +114,7 @@ # include # include # include +# include # include #ifdef __GLIBC__ # include @@ -1311,7 +1313,7 @@ bool os::is_primordial_thread(void) { // Find the virtual memory area that contains addr static bool find_vma(address addr, address* vma_low, address* vma_high) { FILE *fp = os::fopen("/proc/self/maps", "r"); - if (fp) { + if (fp != nullptr) { address low, high; while (!feof(fp)) { if (fscanf(fp, "%p-%p", &low, &high) == 2) { @@ -1324,7 +1326,7 @@ static bool find_vma(address addr, address* vma_low, address* vma_high) { } for (;;) { int ch = fgetc(fp); - if (ch == EOF || ch == (int)'\n') break; + if (ch == EOF || ch == '\n') break; } } fclose(fp); @@ -3814,8 +3816,8 @@ static int hugetlbfs_page_size_flag(size_t page_size) { } static bool hugetlbfs_sanity_check(size_t page_size) { - const os::PageSizes page_sizes = HugePages::explicit_hugepage_info().pagesizes(); - assert(page_sizes.contains(page_size), "Invalid page sizes passed"); + const os::PageSizes os_supported = HugePages::explicit_hugepage_info().os_supported(); + assert(os_supported.contains(page_size), "Invalid page sizes passed (%zu)", page_size); // Include the page size flag to ensure we sanity check the correct page size. int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | hugetlbfs_page_size_flag(page_size); @@ -3829,16 +3831,16 @@ static bool hugetlbfs_sanity_check(size_t page_size) { log_info(pagesize)("Large page size (" EXACTFMT ") failed sanity check, " "checking if smaller large page sizes are usable", EXACTFMTARGS(page_size)); - for (size_t page_size_ = page_sizes.next_smaller(page_size); - page_size_ > os::vm_page_size(); - page_size_ = page_sizes.next_smaller(page_size_)) { - flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | hugetlbfs_page_size_flag(page_size_); - p = mmap(nullptr, page_size_, PROT_READ|PROT_WRITE, flags, -1, 0); + for (size_t size = os_supported.next_smaller(page_size); + size > os::vm_page_size(); + size = os_supported.next_smaller(size)) { + flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | hugetlbfs_page_size_flag(size); + p = mmap(nullptr, size, PROT_READ|PROT_WRITE, flags, -1, 0); if (p != MAP_FAILED) { // Mapping succeeded, sanity check passed. - munmap(p, page_size_); + munmap(p, size); log_info(pagesize)("Large page size (" EXACTFMT ") passed sanity check", - EXACTFMTARGS(page_size_)); + EXACTFMTARGS(size)); return true; } } @@ -4020,7 +4022,7 @@ void os::Linux::large_page_init() { // - os::large_page_size() is the default explicit hugepage size (/proc/meminfo "Hugepagesize") // - os::pagesizes() contains all hugepage sizes the kernel supports, regardless whether there // are pages configured in the pool or not (from /sys/kernel/hugepages/hugepage-xxxx ...) - os::PageSizes all_large_pages = HugePages::explicit_hugepage_info().pagesizes(); + os::PageSizes all_large_pages = HugePages::explicit_hugepage_info().os_supported(); const size_t default_large_page_size = HugePages::default_explicit_hugepage_size(); // 3) Consistency check and post-processing @@ -4062,10 +4064,10 @@ void os::Linux::large_page_init() { _large_page_size = large_page_size; - // Populate _page_sizes with large page sizes less than or equal to - // _large_page_size. - for (size_t page_size = _large_page_size; page_size != 0; - page_size = all_large_pages.next_smaller(page_size)) { + // Populate _page_sizes with _large_page_size (default large page size) even if not pre-allocated. + // Then, populate _page_sizes with all smaller large page sizes that have been pre-allocated. + os::PageSizes pre_allocated = HugePages::explicit_hugepage_info().pre_allocated(); + for (size_t page_size = _large_page_size; page_size != 0; page_size = pre_allocated.next_smaller(page_size)) { _page_sizes.add(page_size); } } @@ -4129,12 +4131,12 @@ static char* reserve_memory_special_huge_tlbfs(size_t bytes, size_t page_size, char* req_addr, bool exec) { - const os::PageSizes page_sizes = HugePages::explicit_hugepage_info().pagesizes(); + const os::PageSizes os_supported = HugePages::explicit_hugepage_info().os_supported(); assert(UseLargePages, "only for Huge TLBFS large pages"); assert(is_aligned(req_addr, alignment), "Must be"); assert(is_aligned(req_addr, page_size), "Must be"); assert(is_aligned(alignment, os::vm_allocation_granularity()), "Must be"); - assert(page_sizes.contains(page_size), "Must be a valid page size"); + assert(os_supported.contains(page_size), "Must be a valid page size"); assert(page_size > os::vm_page_size(), "Must be a large page size"); assert(bytes >= page_size, "Shouldn't allocate large pages for small sizes"); @@ -4380,7 +4382,7 @@ int os::Linux::get_namespace_pid(int vmid) { os::snprintf_checked(fname, sizeof(fname), "/proc/%d/status", vmid); FILE *fp = os::fopen(fname, "r"); - if (fp) { + if (fp != nullptr) { int pid, nspid; int ret; while (!feof(fp) && !ferror(fp)) { @@ -4394,7 +4396,7 @@ int os::Linux::get_namespace_pid(int vmid) { } for (;;) { int ch = fgetc(fp); - if (ch == EOF || ch == (int)'\n') break; + if (ch == EOF || ch == '\n') break; } } fclose(fp); @@ -4549,6 +4551,7 @@ void os::Linux::numa_init() { FLAG_SET_ERGO_IF_DEFAULT(UseNUMAInterleaving, true); } +#if INCLUDE_PARALLELGC if (UseParallelGC && UseNUMA && UseLargePages && !can_commit_large_page_memory()) { // With static large pages we cannot uncommit a page, so there's no way // we can make the adaptive lgrp chunk resizing work. If the user specified both @@ -4560,6 +4563,7 @@ void os::Linux::numa_init() { UseAdaptiveNUMAChunkSizing = false; } } +#endif } void os::Linux::disable_numa(const char* reason, bool warning) { @@ -5427,3 +5431,31 @@ bool os::pd_dll_unload(void* libhandle, char* ebuf, int ebuflen) { return res; } // end: os::pd_dll_unload() + +void os::print_open_file_descriptors(outputStream* st) { + DIR* dirp = opendir("/proc/self/fd"); + int fds = 0; + struct dirent* dentp; + const jlong TIMEOUT_NS = 50000000L; // 50 ms in nanoseconds + bool timed_out = false; + + // limit proc file read to 50ms + jlong start = os::javaTimeNanos(); + assert(dirp != nullptr, "No proc fs?"); + while ((dentp = readdir(dirp)) != nullptr && !timed_out) { + if (isdigit(dentp->d_name[0])) fds++; + if (fds % 100 == 0) { + jlong now = os::javaTimeNanos(); + if ((now - start) > TIMEOUT_NS) { + timed_out = true; + } + } + } + + closedir(dirp); + if (timed_out) { + st->print_cr("Open File Descriptors: > %d", fds); + } else { + st->print_cr("Open File Descriptors: %d", fds); + } +} diff --git a/src/hotspot/os/posix/dtrace/hotspot_jni.d b/src/hotspot/os/posix/dtrace/hotspot_jni.d index c5676921b37..1937769dcb2 100644 --- a/src/hotspot/os/posix/dtrace/hotspot_jni.d +++ b/src/hotspot/os/posix/dtrace/hotspot_jni.d @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -366,6 +366,8 @@ provider hotspot_jni { probe IsInstanceOf__return(uintptr_t); probe IsSameObject__entry(void*, void*, void*); probe IsSameObject__return(uintptr_t); + probe IsVirtualThread__entry(void*, void*); + probe IsVirtualThread__return(uintptr_t); probe MonitorEnter__entry(void*, void*); probe MonitorEnter__return(uint32_t); probe MonitorExit__entry(void*, void*); diff --git a/src/hotspot/os/posix/include/jvm_md.h b/src/hotspot/os/posix/include/jvm_md.h index eb8e1f0d7e9..061ef17aaae 100644 --- a/src/hotspot/os/posix/include/jvm_md.h +++ b/src/hotspot/os/posix/include/jvm_md.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -62,6 +62,19 @@ #define JVM_X_OK X_OK #define JVM_F_OK F_OK +#if defined(AIX) +#include "jni_md.h" +#include "dl_info.h" + +#ifdef __cplusplus +extern "C" { +#endif +JNIEXPORT int JVM_dladdr(void* addr, Dl_info* info); +#ifdef __cplusplus +} +#endif +#endif + /* * File I/O */ diff --git a/src/hotspot/os/posix/os_posix.cpp b/src/hotspot/os/posix/os_posix.cpp index f147ed4be93..1fb2a248bec 100644 --- a/src/hotspot/os/posix/os_posix.cpp +++ b/src/hotspot/os/posix/os_posix.cpp @@ -888,6 +888,14 @@ void* os::lookup_function(const char* name) { return dlsym(RTLD_DEFAULT, name); } +int64_t os::ftell(FILE* file) { + return ::ftell(file); +} + +int os::fseek(FILE* file, int64_t offset, int whence) { + return ::fseek(file, offset, whence); +} + jlong os::lseek(int fd, jlong offset, int whence) { return (jlong) ::lseek(fd, offset, whence); } diff --git a/src/hotspot/os/posix/perfMemory_posix.cpp b/src/hotspot/os/posix/perfMemory_posix.cpp index d9bde6fa825..c5046797e02 100644 --- a/src/hotspot/os/posix/perfMemory_posix.cpp +++ b/src/hotspot/os/posix/perfMemory_posix.cpp @@ -701,6 +701,39 @@ static void remove_file(const char* path) { } } +// Files newer than this threshold are considered to belong to a JVM that may +// still be starting up and are therefore not candidates for stale-file +// cleanup. This avoids racing a concurrent JVM startup while scanning the +// hsperfdata directory. +static const time_t cleanup_grace_period_seconds = 5; + +static bool is_cleanup_candidate(const char* filename, const char* dirname) { + struct stat statbuf; + int result; + + RESTARTABLE(::lstat(filename, &statbuf), result); + if (result == OS_ERR) { + log_debug(perf, memops)("lstat failed for %s/%s: %s", dirname, filename, os::strerror(errno)); + return false; + } + + if (!S_ISREG(statbuf.st_mode)) { + return false; + } + + const time_t now = time(nullptr); + if (now == (time_t)-1) { + return false; + } + + if (statbuf.st_mtime >= now - cleanup_grace_period_seconds) { + log_debug(perf, memops)("Skip cleanup of fresh file %s/%s", dirname, filename); + return false; + } + + return true; +} + // cleanup stale shared memory files // // This method attempts to remove all stale shared memory files in @@ -744,6 +777,11 @@ static void cleanup_sharedmem_files(const char* dirname) { continue; } + if (!is_cleanup_candidate(filename, dirname)) { + errno = 0; + continue; + } + #if defined(LINUX) // Special case on Linux, if multiple containers share the // same /tmp directory: @@ -872,16 +910,56 @@ static int create_sharedmem_file(const char* dirname, const char* filename, size return -1; } - // Open the filename in the current directory. - // Cannot use O_TRUNC here; truncation of an existing file has to happen - // after the is_file_secure() check below. - int fd; - RESTARTABLE(os::open(filename, O_RDWR|O_CREAT|O_NOFOLLOW, S_IRUSR|S_IWUSR), fd); + int fd = OS_ERR; + static const int create_sharedmem_file_retry_count = LINUX_ONLY(3) NOT_LINUX(1); + for (int attempt = 0; attempt < create_sharedmem_file_retry_count; attempt++) { + // Open the filename in the current directory. + // Use O_EXCL so that startup never reuses an existing pid file unless it + // has first been proven stale and removed in `cleanup_sharedmem_files`. + RESTARTABLE(os::open(filename, O_RDWR|O_CREAT|O_EXCL|O_NOFOLLOW, S_IRUSR|S_IWUSR), fd); + if (fd == OS_ERR) { + break; + } + +#if defined(LINUX) + // On Linux, different containerized processes that share the same /tmp + // directory (e.g., with "docker --volume ...") may have the same pid and + // try to use the same file. To avoid conflicts among such processes, we + // allow only one of them (the winner of the flock() call) to write to the + // file. If we lose the race, assume we may have collided with a concurrent + // scavenger briefly holding the lock on a fresh file and retry a few times + // before giving up. + int n; + RESTARTABLE(::flock(fd, LOCK_EX|LOCK_NB), n); + if (n == 0) { + break; + } + + const int flock_errno = errno; + ::close(fd); + fd = OS_ERR; + + if (attempt + 1 == create_sharedmem_file_retry_count || flock_errno != EWOULDBLOCK) { + log_warning(perf, memops)("Cannot use file %s/%s because %s (errno = %d)", dirname, filename, + (flock_errno == EWOULDBLOCK) ? + "it is locked by another process" : + "flock() failed", flock_errno); + errno = flock_errno; + break; + } + + // Short sleep to allow the lock to free up. + os::naked_short_sleep(1); +#endif + } + if (fd == OS_ERR) { if (log_is_enabled(Debug, perf)) { LogStreamHandle(Debug, perf) log; if (errno == ELOOP) { log.print_cr("file %s is a symlink and is not secure", filename); + } else if (errno == EEXIST) { + log.print_cr("could not create file %s: existing file is not provably stale", filename); } else { log.print_cr("could not create file %s: %s", filename, os::strerror(errno)); } @@ -901,27 +979,7 @@ static int create_sharedmem_file(const char* dirname, const char* filename, size } #if defined(LINUX) - // On Linux, different containerized processes that share the same /tmp - // directory (e.g., with "docker --volume ...") may have the same pid and - // try to use the same file. To avoid conflicts among such - // processes, we allow only one of them (the winner of the flock() call) - // to write to the file. All the other processes will give up and will - // have perfdata disabled. - // - // Note that the flock will be automatically given up when the winner - // process exits. - // - // The locking protocol works only with other JVMs that have the JDK-8286030 - // fix. If you are sharing the /tmp difrectory among different containers, - // do not use older JVMs that don't have this fix, or the behavior is undefined. - int n; - RESTARTABLE(::flock(fd, LOCK_EX|LOCK_NB), n); - if (n != 0) { - log_warning(perf, memops)("Cannot use file %s/%s because %s (errno = %d)", dirname, filename, - (errno == EWOULDBLOCK) ? - "it is locked by another process" : - "flock() failed", errno); - ::close(fd); + if (fd == OS_ERR) { return -1; } #endif @@ -1084,18 +1142,9 @@ static char* mmap_create_shared(size_t size) { // release a named shared memory region that was mmap-ed. // static void unmap_shared(char* addr, size_t bytes) { - int res; - if (MemTracker::enabled()) { - MemTracker::NmtVirtualMemoryLocker nvml; - res = ::munmap(addr, bytes); - if (res == 0) { - MemTracker::record_virtual_memory_release(addr, bytes); - } - } else { - res = ::munmap(addr, bytes); - } - if (res != 0) { - log_info(os)("os::release_memory failed (" PTR_FORMAT ", %zu)", p2i(addr), bytes); + MemTracker::record_virtual_memory_release(addr, bytes); + if (::munmap(addr, bytes) != 0) { + fatal("os::release_memory failed (" PTR_FORMAT ", %zu)", p2i(addr), bytes); } } diff --git a/src/hotspot/os/windows/os_windows.cpp b/src/hotspot/os/windows/os_windows.cpp index 76f47640e5a..9d8fb45f0d1 100644 --- a/src/hotspot/os/windows/os_windows.cpp +++ b/src/hotspot/os/windows/os_windows.cpp @@ -2528,12 +2528,6 @@ LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, return EXCEPTION_CONTINUE_EXECUTION; } - -// Used for PostMortemDump -extern "C" void safepoints(); -extern "C" void find(int x); -extern "C" void events(); - // According to Windows API documentation, an illegal instruction sequence should generate // the 0xC000001C exception code. However, real world experience shows that occasionnaly // the execution of an illegal instruction can generate the exception code 0xC000001E. This @@ -5114,6 +5108,13 @@ jlong os::seek_to_file_offset(int fd, jlong offset) { return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET); } +int64_t os::ftell(FILE* file) { + return ::_ftelli64(file); +} + +int os::fseek(FILE* file, int64_t offset, int whence) { + return ::_fseeki64(file,offset, whence); +} jlong os::lseek(int fd, jlong offset, int whence) { return (jlong) ::_lseeki64(fd, offset, whence); @@ -6276,6 +6277,10 @@ const void* os::get_saved_assert_context(const void** sigInfo) { return nullptr; } +void os::print_open_file_descriptors(outputStream* st) { + // File descriptor counting not supported on Windows. +} + /* * Windows/x64 does not use stack frames the way expected by Java: * [1] in most cases, there is no frame pointer. All locals are addressed via RSP diff --git a/src/hotspot/os/windows/perfMemory_windows.cpp b/src/hotspot/os/windows/perfMemory_windows.cpp index f54a2b52cca..dad2804f18a 100644 --- a/src/hotspot/os/windows/perfMemory_windows.cpp +++ b/src/hotspot/os/windows/perfMemory_windows.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1682,12 +1682,7 @@ void PerfMemory::detach(char* addr, size_t bytes) { return; } - if (MemTracker::enabled()) { - // it does not go through os api, the operation has to record from here - MemTracker::NmtVirtualMemoryLocker nvml; - remove_file_mapping(addr); - MemTracker::record_virtual_memory_release(addr, bytes); - } else { - remove_file_mapping(addr); - } + // it does not go through os api, the operation has to record from here + MemTracker::record_virtual_memory_release(addr, bytes); + remove_file_mapping(addr); } diff --git a/src/hotspot/share/cds/aotGrowableArray.cpp b/src/hotspot/os_cpu/aix_ppc/vm_version_aix_ppc.cpp similarity index 78% rename from src/hotspot/share/cds/aotGrowableArray.cpp rename to src/hotspot/os_cpu/aix_ppc/vm_version_aix_ppc.cpp index ec63e7aa57f..8cc8b715201 100644 --- a/src/hotspot/share/cds/aotGrowableArray.cpp +++ b/src/hotspot/os_cpu/aix_ppc/vm_version_aix_ppc.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2026 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,13 +23,14 @@ * */ -#include "cds/aotGrowableArray.hpp" -#include "cds/aotMetaspace.hpp" -#include "memory/allocation.inline.hpp" -#include "utilities/growableArray.hpp" +#include "runtime/vm_version.hpp" -void AOTGrowableArrayHelper::deallocate(void* mem) { - if (!AOTMetaspace::in_aot_cache(mem)) { - GrowableArrayCHeapAllocator::deallocate(mem); - } +#include + +int VM_Version::get_dcache_line_size() { + return _system_configuration.dcache_line; +} + +int VM_Version::get_icache_line_size() { + return _system_configuration.icache_line; } diff --git a/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp b/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp index 36599594842..49d879731ff 100644 --- a/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp +++ b/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp @@ -620,6 +620,8 @@ extern "C" { assert(VM_Version::supports_sb(), "current CPU does not support SB instruction"); asm volatile(".inst 0xd50330ff" : : : "memory"); break; + case SpinWait::WFET: + ShouldNotReachHere(); #ifdef ASSERT default: ShouldNotReachHere(); diff --git a/src/hotspot/os_cpu/bsd_zero/atomicAccess_bsd_zero.hpp b/src/hotspot/os_cpu/bsd_zero/atomicAccess_bsd_zero.hpp index 6c8684718fc..8e45490e5b6 100644 --- a/src/hotspot/os_cpu/bsd_zero/atomicAccess_bsd_zero.hpp +++ b/src/hotspot/os_cpu/bsd_zero/atomicAccess_bsd_zero.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright 2007, 2008, 2011, 2015, Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -27,7 +27,6 @@ #define OS_CPU_BSD_ZERO_ATOMICACCESS_BSD_ZERO_HPP #include "orderAccess_bsd_zero.hpp" -#include "runtime/os.hpp" // Implementation of class AtomicAccess diff --git a/src/hotspot/os_cpu/linux_aarch64/ic_ivau_probe_linux_aarch64.S b/src/hotspot/os_cpu/linux_aarch64/ic_ivau_probe_linux_aarch64.S new file mode 100644 index 00000000000..b82053d37b9 --- /dev/null +++ b/src/hotspot/os_cpu/linux_aarch64/ic_ivau_probe_linux_aarch64.S @@ -0,0 +1,69 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "defs.S.inc" + + # Probe whether IC IVAU is trapped. + # + # Returns 1 if IC IVAU is trapped (did not fault), 0 if not trapped + # (faulted on VA 0x0, signal handler redirected to continuation). + # + # int ic_ivau_probe(void); +DECLARE_FUNC(ic_ivau_probe): +DECLARE_FUNC(_ic_ivau_probe_fault): + ic ivau, xzr + mov x0, #1 + ret +DECLARE_FUNC(_ic_ivau_probe_continuation): + mov x0, #0 + ret + +/* Emit .note.gnu.property section in case of PAC or BTI being enabled. */ +#ifdef __ARM_FEATURE_BTI_DEFAULT + #ifdef __ARM_FEATURE_PAC_DEFAULT + #define GNU_PROPERTY_AARCH64_FEATURE 3 + #else + #define GNU_PROPERTY_AARCH64_FEATURE 1 + #endif +#else + #ifdef __ARM_FEATURE_PAC_DEFAULT + #define GNU_PROPERTY_AARCH64_FEATURE 2 + #else + #define GNU_PROPERTY_AARCH64_FEATURE 0 + #endif +#endif + +#if (GNU_PROPERTY_AARCH64_FEATURE != 0) + .pushsection .note.gnu.property, "a" + .align 3 + .long 4 /* name length */ + .long 0x10 /* data length */ + .long 5 /* note type: NT_GNU_PROPERTY_TYPE_0 */ + .string "GNU" /* vendor name */ + .long 0xc0000000 /* GNU_PROPERTY_AARCH64_FEATURE_1_AND */ + .long 4 /* pr_datasze */ + .long GNU_PROPERTY_AARCH64_FEATURE + .long 0 + .popsection +#endif diff --git a/src/hotspot/os_cpu/linux_aarch64/icache_linux_aarch64.cpp b/src/hotspot/os_cpu/linux_aarch64/icache_linux_aarch64.cpp new file mode 100644 index 00000000000..11911a48e06 --- /dev/null +++ b/src/hotspot/os_cpu/linux_aarch64/icache_linux_aarch64.cpp @@ -0,0 +1,28 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "runtime/icache.hpp" +#include "utilities/globalDefinitions.hpp" + +DEBUG_ONLY(THREAD_LOCAL AArch64ICacheInvalidationContext* AArch64ICacheInvalidationContext::_current_context = nullptr;) diff --git a/src/hotspot/os_cpu/linux_aarch64/icache_linux_aarch64.hpp b/src/hotspot/os_cpu/linux_aarch64/icache_linux_aarch64.hpp index 8fbaa7a6b6e..5121a875701 100644 --- a/src/hotspot/os_cpu/linux_aarch64/icache_linux_aarch64.hpp +++ b/src/hotspot/os_cpu/linux_aarch64/icache_linux_aarch64.hpp @@ -26,6 +26,11 @@ #ifndef OS_CPU_LINUX_AARCH64_ICACHE_AARCH64_HPP #define OS_CPU_LINUX_AARCH64_ICACHE_AARCH64_HPP +#include "memory/allocation.hpp" +#include "runtime/vm_version.hpp" +#include "utilities/globalDefinitions.hpp" +#include "vm_version_aarch64.hpp" + // Interface for updating the instruction cache. Whenever the VM // modifies code, part of the processor instruction cache potentially // has to be flushed. @@ -37,8 +42,105 @@ class ICache : public AbstractICache { __builtin___clear_cache((char *)addr, (char *)(addr + 4)); } static void invalidate_range(address start, int nbytes) { - __builtin___clear_cache((char *)start, (char *)(start + nbytes)); + if (NeoverseN1ICacheErratumMitigation) { + assert(VM_Version::is_cache_idc_enabled(), + "Expect CTR_EL0.IDC to be enabled for Neoverse N1 with erratum " + "1542419"); + assert(!VM_Version::is_cache_dic_enabled(), + "Expect CTR_EL0.DIC to be disabled for Neoverse N1 with erratum " + "1542419"); + assert(VM_Version::is_ic_ivau_trapped(), "Expect 'ic ivau, xzr' to be trapped"); + asm volatile("dsb ish \n" + "ic ivau, xzr \n" + "dsb ish \n" + "isb \n" + : : : "memory"); + } else { + __builtin___clear_cache((char *)start, (char *)(start + nbytes)); + } } }; +class AArch64ICacheInvalidationContext : StackObj { + private: + +#ifdef ASSERT + static THREAD_LOCAL AArch64ICacheInvalidationContext* _current_context; +#endif + + bool _has_modified_code; + + public: + NONCOPYABLE(AArch64ICacheInvalidationContext); + + AArch64ICacheInvalidationContext() + : _has_modified_code(false) { + assert(_current_context == nullptr, "nested ICacheInvalidationContext not supported"); +#ifdef ASSERT + _current_context = this; +#endif + } + + ~AArch64ICacheInvalidationContext() { + DEBUG_ONLY(_current_context = nullptr); + + if (!_has_modified_code || !UseSingleICacheInvalidation) { + return; + } + + assert(VM_Version::is_cache_idc_enabled(), "Expect CTR_EL0.IDC to be enabled"); + + asm volatile("dsb ish" : : : "memory"); + + if (NeoverseN1ICacheErratumMitigation) { + assert(!VM_Version::is_cache_dic_enabled(), + "Expect CTR_EL0.DIC to be disabled for Neoverse N1 with erratum " + "1542419"); + assert(VM_Version::is_ic_ivau_trapped(), "Expect 'ic ivau, xzr' to be trapped"); + + // Errata 1542419: Neoverse N1 cores with the 'COHERENT_ICACHE' feature + // may fetch stale instructions when software depends on + // prefetch-speculation-protection instead of explicit synchronization. + // + // Neoverse-N1 implementation mitigates the errata 1542419 with a + // workaround: + // - Disable coherent icache. + // - Trap IC IVAU instructions. + // - Execute: + // - tlbi vae3is, xzr + // - dsb sy + // - Ignore trapped IC IVAU instructions. + // + // `tlbi vae3is, xzr` invalidates all translation entries (all VAs, all + // possible levels). It waits for all memory accesses using in-scope old + // translation information to complete before it is considered complete. + // + // As this workaround has significant overhead, Arm Neoverse N1 (MP050) + // Software Developer Errata Notice version 29.0 suggests: + // + // "Since one TLB inner-shareable invalidation is enough to avoid this + // erratum, the number of injected TLB invalidations should be minimized + // in the trap handler to mitigate the performance impact due to this + // workaround." + // As the address for icache invalidation is not relevant and + // IC IVAU instruction is ignored, we use XZR in it. + asm volatile( + "ic ivau, xzr \n" + "dsb ish \n" + : + : + : "memory"); + } else { + assert(VM_Version::is_cache_dic_enabled(), "Expect CTR_EL0.DIC to be enabled"); + } + asm volatile("isb" : : : "memory"); + } + + void set_has_modified_code() { + _has_modified_code = true; + } +}; + +#define PD_ICACHE_INVALIDATION_CONTEXT AArch64ICacheInvalidationContext + #endif // OS_CPU_LINUX_AARCH64_ICACHE_AARCH64_HPP diff --git a/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp b/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp index da9e7e159f1..67e0569bf31 100644 --- a/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp +++ b/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp @@ -77,6 +77,11 @@ #define REG_LR 30 #define REG_BCP 22 +// IC IVAU trap probe. +// Defined in ic_ivau_probe_linux_aarch64.S. +extern "C" char _ic_ivau_probe_fault[] __attribute__ ((visibility ("hidden"))); +extern "C" char _ic_ivau_probe_continuation[] __attribute__ ((visibility ("hidden"))); + NOINLINE address os::current_stack_pointer() { return (address)__builtin_frame_address(0); } @@ -228,6 +233,12 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, } } + // IC IVAU trap probe during VM_Version initialization. + // If IC IVAU is not trapped, it faults on unmapped VA 0x0. + if (sig == SIGSEGV && pc == (address)_ic_ivau_probe_fault) { + stub = (address)_ic_ivau_probe_continuation; + } + if (thread->thread_state() == _thread_in_Java) { // Java thread running in Java code => find exception handler if any // a fault inside compiled code, the interpreter, or a stub diff --git a/src/hotspot/os_cpu/linux_aarch64/vm_version_linux_aarch64.cpp b/src/hotspot/os_cpu/linux_aarch64/vm_version_linux_aarch64.cpp index 1fe06dc640d..ee2d3013c4c 100644 --- a/src/hotspot/os_cpu/linux_aarch64/vm_version_linux_aarch64.cpp +++ b/src/hotspot/os_cpu/linux_aarch64/vm_version_linux_aarch64.cpp @@ -31,6 +31,10 @@ #include #include +// IC IVAU trap probe. +// Defined in ic_ivau_probe_linux_aarch64.S. +extern "C" int ic_ivau_probe(void); + #ifndef HWCAP_AES #define HWCAP_AES (1<<3) #endif @@ -95,6 +99,13 @@ #define HWCAP2_SVEBITPERM (1 << 4) #endif +#ifndef HWCAP2_ECV +#define HWCAP2_ECV (1 << 19) +#endif + +#ifndef HWCAP2_WFXT +#define HWCAP2_WFXT (1u << 31) +#endif #ifndef PR_SVE_GET_VL // For old toolchains which do not have SVE related macros defined. #define PR_SVE_SET_VL 50 @@ -158,6 +169,12 @@ void VM_Version::get_os_cpu_info() { if (auxv2 & HWCAP2_SVEBITPERM) { set_feature(CPU_SVEBITPERM); } + if (auxv2 & HWCAP2_ECV) { + set_feature(CPU_ECV); + } + if (auxv2 & HWCAP2_WFXT) { + set_feature(CPU_WFXT); + } uint64_t ctr_el0; uint64_t dczid_el0; @@ -169,6 +186,12 @@ void VM_Version::get_os_cpu_info() { _icache_line_size = (1 << (ctr_el0 & 0x0f)) * 4; _dcache_line_size = (1 << ((ctr_el0 >> 16) & 0x0f)) * 4; + _cache_idc_enabled = ((ctr_el0 >> 28) & 0x1) != 0; + _cache_dic_enabled = ((ctr_el0 >> 29) & 0x1) != 0; + + // Probe whether IC IVAU is trapped. + // Must run before VM_Version::initialize() sets NeoverseN1ICacheErratumMitigation. + _ic_ivau_trapped = (ic_ivau_probe() == 1); if (!(dczid_el0 & 0x10)) { _zva_length = 4 << (dczid_el0 & 0xf); diff --git a/src/hotspot/os_cpu/linux_arm/atomicAccess_linux_arm.hpp b/src/hotspot/os_cpu/linux_arm/atomicAccess_linux_arm.hpp index 390207f9e5e..c03f5ed1c8b 100644 --- a/src/hotspot/os_cpu/linux_arm/atomicAccess_linux_arm.hpp +++ b/src/hotspot/os_cpu/linux_arm/atomicAccess_linux_arm.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,8 +26,6 @@ #define OS_CPU_LINUX_ARM_ATOMICACCESS_LINUX_ARM_HPP #include "memory/allStatic.hpp" -#include "runtime/os.hpp" -#include "runtime/vm_version.hpp" // Implementation of class AtomicAccess diff --git a/src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.hpp b/src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.hpp index 3bb357704fb..49c6942b8e0 100644 --- a/src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.hpp +++ b/src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,7 +27,6 @@ // Included in orderAccess.hpp header file. -#include "runtime/os.hpp" #include "runtime/vm_version.hpp" // Implementation of class OrderAccess. diff --git a/src/hotspot/os_cpu/linux_ppc/vm_version_linux_ppc.cpp b/src/hotspot/os_cpu/linux_ppc/vm_version_linux_ppc.cpp new file mode 100644 index 00000000000..d64340edf5c --- /dev/null +++ b/src/hotspot/os_cpu/linux_ppc/vm_version_linux_ppc.cpp @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2026 SAP SE. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "runtime/vm_version.hpp" + +#include + +int VM_Version::get_dcache_line_size() { + // This should work on all modern linux versions: + int size = sysconf(_SC_LEVEL1_DCACHE_LINESIZE); + // It may fail with very old linux / glibc versions. We use DEFAULT_CACHE_LINE_SIZE in this case. + // That is the correct value for all currently supported processors. + return (size <= 0) ? DEFAULT_CACHE_LINE_SIZE : size; +} + +int VM_Version::get_icache_line_size() { + // This should work on all modern linux versions: + int size = sysconf(_SC_LEVEL1_ICACHE_LINESIZE); + // It may fail with very old linux / glibc versions. We use DEFAULT_CACHE_LINE_SIZE in this case. + // That is the correct value for all currently supported processors. + return (size <= 0) ? DEFAULT_CACHE_LINE_SIZE : size; +} diff --git a/src/hotspot/os_cpu/linux_riscv/vm_version_linux_riscv.cpp b/src/hotspot/os_cpu/linux_riscv/vm_version_linux_riscv.cpp index 35cbb75e8ff..648131b94a3 100644 --- a/src/hotspot/os_cpu/linux_riscv/vm_version_linux_riscv.cpp +++ b/src/hotspot/os_cpu/linux_riscv/vm_version_linux_riscv.cpp @@ -36,40 +36,42 @@ #include #include +static constexpr uint64_t feature_bit(int n) { return nth_bit(n); } + #ifndef HWCAP_ISA_I -#define HWCAP_ISA_I nth_bit('I' - 'A') +#define HWCAP_ISA_I feature_bit('I' - 'A') #endif #ifndef HWCAP_ISA_M -#define HWCAP_ISA_M nth_bit('M' - 'A') +#define HWCAP_ISA_M feature_bit('M' - 'A') #endif #ifndef HWCAP_ISA_A -#define HWCAP_ISA_A nth_bit('A' - 'A') +#define HWCAP_ISA_A feature_bit('A' - 'A') #endif #ifndef HWCAP_ISA_F -#define HWCAP_ISA_F nth_bit('F' - 'A') +#define HWCAP_ISA_F feature_bit('F' - 'A') #endif #ifndef HWCAP_ISA_D -#define HWCAP_ISA_D nth_bit('D' - 'A') +#define HWCAP_ISA_D feature_bit('D' - 'A') #endif #ifndef HWCAP_ISA_C -#define HWCAP_ISA_C nth_bit('C' - 'A') +#define HWCAP_ISA_C feature_bit('C' - 'A') #endif #ifndef HWCAP_ISA_Q -#define HWCAP_ISA_Q nth_bit('Q' - 'A') +#define HWCAP_ISA_Q feature_bit('Q' - 'A') #endif #ifndef HWCAP_ISA_H -#define HWCAP_ISA_H nth_bit('H' - 'A') +#define HWCAP_ISA_H feature_bit('H' - 'A') #endif #ifndef HWCAP_ISA_V -#define HWCAP_ISA_V nth_bit('V' - 'A') +#define HWCAP_ISA_V feature_bit('V' - 'A') #endif #define read_csr(csr) \ diff --git a/src/hotspot/os_cpu/linux_s390/atomicAccess_linux_s390.hpp b/src/hotspot/os_cpu/linux_s390/atomicAccess_linux_s390.hpp index f3c1e8f1a2c..492ccf73bdf 100644 --- a/src/hotspot/os_cpu/linux_s390/atomicAccess_linux_s390.hpp +++ b/src/hotspot/os_cpu/linux_s390/atomicAccess_linux_s390.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2019 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -26,10 +26,6 @@ #ifndef OS_CPU_LINUX_S390_ATOMICACCESS_LINUX_S390_HPP #define OS_CPU_LINUX_S390_ATOMICACCESS_LINUX_S390_HPP -#include "runtime/atomicAccess.hpp" -#include "runtime/os.hpp" -#include "runtime/vm_version.hpp" - // Note that the compare-and-swap instructions on System z perform // a serialization function before the storage operand is fetched // and again after the operation is completed. diff --git a/src/hotspot/os_cpu/windows_aarch64/atomicAccess_windows_aarch64.hpp b/src/hotspot/os_cpu/windows_aarch64/atomicAccess_windows_aarch64.hpp index f8119654c50..9238043f7a4 100644 --- a/src/hotspot/os_cpu/windows_aarch64/atomicAccess_windows_aarch64.hpp +++ b/src/hotspot/os_cpu/windows_aarch64/atomicAccess_windows_aarch64.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, Microsoft Corporation. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -27,9 +27,7 @@ #define OS_CPU_WINDOWS_AARCH64_ATOMICACCESS_WINDOWS_AARCH64_HPP #include -#include "runtime/os.hpp" -#include "runtime/vm_version.hpp" - +#include // As per atomicAccess.hpp all read-modify-write operations have to provide two-way // barriers semantics. The memory_order parameter is ignored - we always provide diff --git a/src/hotspot/os_cpu/windows_aarch64/prefetch_windows_aarch64.inline.hpp b/src/hotspot/os_cpu/windows_aarch64/prefetch_windows_aarch64.inline.hpp index a360ee342be..a2c8f0c685c 100644 --- a/src/hotspot/os_cpu/windows_aarch64/prefetch_windows_aarch64.inline.hpp +++ b/src/hotspot/os_cpu/windows_aarch64/prefetch_windows_aarch64.inline.hpp @@ -27,10 +27,24 @@ // Included in runtime/prefetch.inline.hpp +#include + +// __prefetch2(addr, prfop) emits a PRFM instruction. +// The prfop encoding is: +// type: PLD = 00, PLI = 01, PST = 10 +// target: L1 = 00, L2 = 01, L3 = 10 +// policy: KEEP = 0, STRM = 1 + inline void Prefetch::read (const void *loc, intx interval) { + if (interval >= 0) { + __prefetch2((const char*) loc + interval, /* PLD + L1 + KEEP */ 0); + } } inline void Prefetch::write(void *loc, intx interval) { + if (interval >= 0) { + __prefetch2((char*) loc + interval, /* PST + L1 + KEEP */ 16); + } } #endif // OS_CPU_WINDOWS_AARCH64_PREFETCH_WINDOWS_AARCH64_INLINE_HPP diff --git a/src/hotspot/os_cpu/windows_aarch64/sve_windows_aarch64.S b/src/hotspot/os_cpu/windows_aarch64/sve_windows_aarch64.S new file mode 100644 index 00000000000..e0c85830bd4 --- /dev/null +++ b/src/hotspot/os_cpu/windows_aarch64/sve_windows_aarch64.S @@ -0,0 +1,42 @@ +; +; Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. +; DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +; +; This code is free software; you can redistribute it and/or modify it +; under the terms of the GNU General Public License version 2 only, as +; published by the Free Software Foundation. +; +; This code is distributed in the hope that it will be useful, but WITHOUT +; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +; version 2 for more details (a copy is included in the LICENSE file that +; accompanied this code). +; +; You should have received a copy of the GNU General Public License version +; 2 along with this work; if not, write to the Free Software Foundation, +; Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +; +; Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +; or visit www.oracle.com if you need additional information or have any +; questions. +; + + ; Support for int get_sve_vector_length(); + ; + ; Returns the current SVE vector length in bytes. + ; This function uses the INCB instruction which increments a register + ; by the number of bytes in an SVE vector register. + ; + ; Note: This function will fault if SVE is not available or enabled. + ; The caller must ensure SVE support is detected before calling. + + ALIGN 4 + EXPORT get_sve_vector_length + AREA sve_text, CODE + +get_sve_vector_length + mov x0, #0 + incb x0 + ret + + END diff --git a/src/hotspot/os_cpu/windows_aarch64/vm_version_windows_aarch64.cpp b/src/hotspot/os_cpu/windows_aarch64/vm_version_windows_aarch64.cpp index 93beb549366..e78a37b4178 100644 --- a/src/hotspot/os_cpu/windows_aarch64/vm_version_windows_aarch64.cpp +++ b/src/hotspot/os_cpu/windows_aarch64/vm_version_windows_aarch64.cpp @@ -26,16 +26,19 @@ #include "runtime/os.hpp" #include "runtime/vm_version.hpp" +// Assembly function to get SVE vector length using INCB instruction +extern "C" int get_sve_vector_length(); + int VM_Version::get_current_sve_vector_length() { assert(VM_Version::supports_sve(), "should not call this"); - ShouldNotReachHere(); - return 0; + // Use assembly instruction to get the actual SVE vector length + return VM_Version::supports_sve() ? get_sve_vector_length() : 0; // This value is in bytes } int VM_Version::set_and_get_current_sve_vector_length(int length) { assert(VM_Version::supports_sve(), "should not call this"); - ShouldNotReachHere(); - return 0; + // Use assembly instruction to get the SVE vector length + return VM_Version::supports_sve() ? get_sve_vector_length() : 0; // This value is in bytes } void VM_Version::get_os_cpu_info() { @@ -47,11 +50,29 @@ void VM_Version::get_os_cpu_info() { set_feature(CPU_AES); set_feature(CPU_SHA1); set_feature(CPU_SHA2); + set_feature(CPU_PMULL); } if (IsProcessorFeaturePresent(PF_ARM_VFP_32_REGISTERS_AVAILABLE)) { set_feature(CPU_ASIMD); } - // No check for CPU_PMULL, CPU_SVE, CPU_SVE2 + if (IsProcessorFeaturePresent(PF_ARM_V81_ATOMIC_INSTRUCTIONS_AVAILABLE)) { + set_feature(CPU_LSE); + } + if (IsProcessorFeaturePresent(PF_ARM_SVE_INSTRUCTIONS_AVAILABLE)) { + set_feature(CPU_SVE); + } + if (IsProcessorFeaturePresent(PF_ARM_SVE2_INSTRUCTIONS_AVAILABLE)) { + set_feature(CPU_SVE2); + } + if (IsProcessorFeaturePresent(PF_ARM_SVE_BITPERM_INSTRUCTIONS_AVAILABLE)) { + set_feature(CPU_SVEBITPERM); + } + if (IsProcessorFeaturePresent(PF_ARM_SHA3_INSTRUCTIONS_AVAILABLE)) { + set_feature(CPU_SHA3); + } + if (IsProcessorFeaturePresent(PF_ARM_SHA512_INSTRUCTIONS_AVAILABLE)) { + set_feature(CPU_SHA512); + } __int64 dczid_el0 = _ReadStatusReg(0x5807 /* ARM64_DCZID_EL0 */); @@ -102,8 +123,8 @@ void VM_Version::get_os_cpu_info() { SYSTEM_INFO si; GetSystemInfo(&si); _model = si.wProcessorLevel; - _variant = si.wProcessorRevision / 0xFF; - _revision = si.wProcessorRevision & 0xFF; + _variant = (si.wProcessorRevision >> 8) & 0xFF; // Variant is the upper byte of wProcessorRevision + _revision = si.wProcessorRevision & 0xFF; // Revision is the lower byte of wProcessorRevision } } } diff --git a/src/hotspot/os_cpu/windows_x86/atomicAccess_windows_x86.hpp b/src/hotspot/os_cpu/windows_x86/atomicAccess_windows_x86.hpp index aa78a401235..252411f62bc 100644 --- a/src/hotspot/os_cpu/windows_x86/atomicAccess_windows_x86.hpp +++ b/src/hotspot/os_cpu/windows_x86/atomicAccess_windows_x86.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,7 +26,7 @@ #define OS_CPU_WINDOWS_X86_ATOMICACCESS_WINDOWS_X86_HPP #include -#include "runtime/os.hpp" +#include // Note that in MSVC, volatile memory accesses are explicitly // guaranteed to have acquire release semantics (w.r.t. compiler diff --git a/src/hotspot/os_cpu/windows_x86/prefetch_windows_x86.inline.hpp b/src/hotspot/os_cpu/windows_x86/prefetch_windows_x86.inline.hpp index 645fbe99a22..575eabc97dd 100644 --- a/src/hotspot/os_cpu/windows_x86/prefetch_windows_x86.inline.hpp +++ b/src/hotspot/os_cpu/windows_x86/prefetch_windows_x86.inline.hpp @@ -27,7 +27,18 @@ // Included in runtime/prefetch.inline.hpp -inline void Prefetch::read (const void *loc, intx interval) {} -inline void Prefetch::write(void *loc, intx interval) {} +#include + +inline void Prefetch::read (const void *loc, intx interval) { + if (interval >= 0) { + _mm_prefetch((const char*) loc + interval, _MM_HINT_T0); + } +} + +inline void Prefetch::write(void *loc, intx interval) { + if (interval >= 0) { + _mm_prefetch((const char*) loc + interval, _MM_HINT_T0); + } +} #endif // OS_CPU_WINDOWS_X86_PREFETCH_WINDOWS_X86_INLINE_HPP diff --git a/src/hotspot/share/adlc/adlparse.cpp b/src/hotspot/share/adlc/adlparse.cpp index 356c24760e8..b49efa34be8 100644 --- a/src/hotspot/share/adlc/adlparse.cpp +++ b/src/hotspot/share/adlc/adlparse.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -993,9 +993,6 @@ void ADLParser::frame_parse(void) { if (strcmp(token,"frame_pointer")==0) { frame_pointer_parse(frame, false); } - if (strcmp(token,"interpreter_frame_pointer")==0) { - interpreter_frame_pointer_parse(frame, false); - } if (strcmp(token,"inline_cache_reg")==0) { inline_cache_parse(frame, false); } @@ -1119,11 +1116,6 @@ void ADLParser::frame_pointer_parse(FrameForm *frame, bool native) { else { frame->_frame_pointer = frame_pointer; } } -//------------------------------interpreter_frame_pointer_parse---------------------------- -void ADLParser::interpreter_frame_pointer_parse(FrameForm *frame, bool native) { - frame->_interpreter_frame_pointer_reg = parse_one_arg("interpreter frame pointer entry"); -} - //------------------------------inline_cache_parse----------------------------- void ADLParser::inline_cache_parse(FrameForm *frame, bool native) { frame->_inline_cache_reg = parse_one_arg("inline cache reg entry"); diff --git a/src/hotspot/share/adlc/adlparse.hpp b/src/hotspot/share/adlc/adlparse.hpp index 02baec53262..89296193612 100644 --- a/src/hotspot/share/adlc/adlparse.hpp +++ b/src/hotspot/share/adlc/adlparse.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -120,7 +120,6 @@ protected: // Parse the components of the frame section void sync_stack_slots_parse(FrameForm *frame); void frame_pointer_parse(FrameForm *frame, bool native); - void interpreter_frame_pointer_parse(FrameForm *frame, bool native); void inline_cache_parse(FrameForm *frame, bool native); void interpreter_arg_ptr_parse(FrameForm *frame, bool native); void interpreter_method_parse(FrameForm *frame, bool native); diff --git a/src/hotspot/share/adlc/formsopt.cpp b/src/hotspot/share/adlc/formsopt.cpp index fbd1043492e..091e34f40f4 100644 --- a/src/hotspot/share/adlc/formsopt.cpp +++ b/src/hotspot/share/adlc/formsopt.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -476,7 +476,6 @@ void AllocClass::forms_do(FormClosure* f) { FrameForm::FrameForm() { _sync_stack_slots = nullptr; _inline_cache_reg = nullptr; - _interpreter_frame_pointer_reg = nullptr; _cisc_spilling_operand_name = nullptr; _frame_pointer = nullptr; _c_frame_pointer = nullptr; diff --git a/src/hotspot/share/adlc/formsopt.hpp b/src/hotspot/share/adlc/formsopt.hpp index 9e0c9db854d..087ab1e2653 100644 --- a/src/hotspot/share/adlc/formsopt.hpp +++ b/src/hotspot/share/adlc/formsopt.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -347,7 +347,6 @@ public: // Public Data char *_sync_stack_slots; char *_inline_cache_reg; - char *_interpreter_frame_pointer_reg; char *_cisc_spilling_operand_name; char *_frame_pointer; char *_c_frame_pointer; diff --git a/src/hotspot/share/adlc/formssel.cpp b/src/hotspot/share/adlc/formssel.cpp index 182587d2f2f..5802217c1c1 100644 --- a/src/hotspot/share/adlc/formssel.cpp +++ b/src/hotspot/share/adlc/formssel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -4233,11 +4233,13 @@ int MatchRule::is_expensive() const { strcmp(opType,"PopulateIndex")==0 || strcmp(opType,"AddReductionVI")==0 || strcmp(opType,"AddReductionVL")==0 || + strcmp(opType,"AddReductionVHF")==0 || strcmp(opType,"AddReductionVF")==0 || strcmp(opType,"AddReductionVD")==0 || strcmp(opType,"MulReductionVI")==0 || strcmp(opType,"MulReductionVL")==0 || strcmp(opType,"MulReductionVF")==0 || + strcmp(opType,"MulReductionVHF")==0 || strcmp(opType,"MulReductionVD")==0 || strcmp(opType,"MinReductionV")==0 || strcmp(opType,"MaxReductionV")==0 || @@ -4276,7 +4278,9 @@ bool MatchRule::is_ideal_membar() const { !strcmp(_opType,"LoadFence" ) || !strcmp(_opType,"StoreFence") || !strcmp(_opType,"StoreStoreFence") || + !strcmp(_opType,"MemBarStoreLoad") || !strcmp(_opType,"MemBarVolatile") || + !strcmp(_opType,"MemBarFull") || !strcmp(_opType,"MemBarCPUOrder") || !strcmp(_opType,"MemBarStoreStore") || !strcmp(_opType,"OnSpinWait"); @@ -4346,9 +4350,9 @@ bool MatchRule::is_vector() const { "MaxV", "MinV", "MinVHF", "MaxVHF", "UMinV", "UMaxV", "CompressV", "ExpandV", "CompressM", "CompressBitsV", "ExpandBitsV", "AddReductionVI", "AddReductionVL", - "AddReductionVF", "AddReductionVD", + "AddReductionVHF", "AddReductionVF", "AddReductionVD", "MulReductionVI", "MulReductionVL", - "MulReductionVF", "MulReductionVD", + "MulReductionVHF", "MulReductionVF", "MulReductionVD", "MaxReductionV", "MinReductionV", "AndReductionV", "OrReductionV", "XorReductionV", "MulAddVS2VI", "MacroLogicV", diff --git a/src/hotspot/share/adlc/output_c.cpp b/src/hotspot/share/adlc/output_c.cpp index 9cbd6aaf66f..45b3d6bda63 100644 --- a/src/hotspot/share/adlc/output_c.cpp +++ b/src/hotspot/share/adlc/output_c.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -4212,14 +4212,6 @@ void ArchDesc::buildFrameMethods(FILE *fp_cpp) { fprintf(fp_cpp,"int Matcher::inline_cache_reg_encode() {"); fprintf(fp_cpp," return _regEncode[inline_cache_reg()]; }\n\n"); - // Interpreter's Frame Pointer Register - fprintf(fp_cpp,"OptoReg::Name Matcher::interpreter_frame_pointer_reg() {"); - if (_frame->_interpreter_frame_pointer_reg == nullptr) - fprintf(fp_cpp," return OptoReg::Bad; }\n\n"); - else - fprintf(fp_cpp," return OptoReg::Name(%s_num); }\n\n", - _frame->_interpreter_frame_pointer_reg); - // Frame Pointer definition /* CNC - I can not contemplate having a different frame pointer between Java and native code; makes my head hurt to think about it. diff --git a/src/hotspot/share/asm/codeBuffer.cpp b/src/hotspot/share/asm/codeBuffer.cpp index ba525588f32..854cf73049b 100644 --- a/src/hotspot/share/asm/codeBuffer.cpp +++ b/src/hotspot/share/asm/codeBuffer.cpp @@ -32,7 +32,6 @@ #include "oops/methodCounters.hpp" #include "oops/methodData.hpp" #include "oops/oop.inline.hpp" -#include "runtime/icache.hpp" #include "runtime/safepointVerifiers.hpp" #include "utilities/align.hpp" #include "utilities/copy.hpp" @@ -745,9 +744,6 @@ void CodeBuffer::copy_code_to(CodeBlob* dest_blob) { // Done moving code bytes; were they the right size? assert((int)align_up(dest.total_content_size(), oopSize) == dest_blob->content_size(), "sanity"); - - // Flush generated code - ICache::invalidate_range(dest_blob->code_begin(), dest_blob->code_size()); } // Move all my code into another code buffer. Consult applicable @@ -862,6 +858,13 @@ csize_t CodeBuffer::figure_expanded_capacities(CodeSection* which_cs, } void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) { +#ifdef ASSERT + // The code below copies contents across temp buffers. The following + // sizes relate to buffer contents, and should not be changed by buffer + // expansion. + int old_total_skipped = total_skipped_instructions_size(); +#endif + #ifndef PRODUCT if (PrintNMethods && (WizardMode || Verbose)) { tty->print("expanding CodeBuffer:"); @@ -920,6 +923,7 @@ void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) { assert(cb_sect->capacity() >= new_capacity[n], "big enough"); address cb_start = cb_sect->start(); cb_sect->set_end(cb_start + this_sect->size()); + cb_sect->register_skipped(this_sect->_skipped_instructions_size); if (this_sect->mark() == nullptr) { cb_sect->clear_mark(); } else { @@ -956,6 +960,9 @@ void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) { this->print_on(tty); } #endif //PRODUCT + + assert(old_total_skipped == total_skipped_instructions_size(), + "Should match: %d == %d", old_total_skipped, total_skipped_instructions_size()); } void CodeBuffer::adjust_internal_address(address from, address to) { @@ -1140,7 +1147,7 @@ void AsmRemarks::clear() { uint AsmRemarks::print(uint offset, outputStream* strm) const { uint count = 0; const char* prefix = " ;; "; - const char* remstr = _remarks->lookup(offset); + const char* remstr = (_remarks ? _remarks->lookup(offset) : nullptr); while (remstr != nullptr) { strm->bol(); strm->print("%s", prefix); diff --git a/src/hotspot/share/c1/c1_Runtime1.cpp b/src/hotspot/share/c1/c1_Runtime1.cpp index 63764dd113a..38f563935e0 100644 --- a/src/hotspot/share/c1/c1_Runtime1.cpp +++ b/src/hotspot/share/c1/c1_Runtime1.cpp @@ -278,11 +278,9 @@ bool Runtime1::initialize(BufferBlob* blob) { if (!generate_blob_for(blob, id)) { return false; } - if (id == StubId::c1_forward_exception_id) { - // publish early c1 stubs at this point so later stubs can refer to them - AOTCodeCache::init_early_c1_table(); - } } + // disallow any further c1 stub generation + AOTCodeCache::set_c1_stubs_complete(); // printing #ifndef PRODUCT if (PrintSimpleStubs) { diff --git a/src/hotspot/share/cds/aotArtifactFinder.cpp b/src/hotspot/share/cds/aotArtifactFinder.cpp index f85f1e46520..bd69b18a1aa 100644 --- a/src/hotspot/share/cds/aotArtifactFinder.cpp +++ b/src/hotspot/share/cds/aotArtifactFinder.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,6 +32,7 @@ #include "cds/lambdaProxyClassDictionary.hpp" #include "cds/regeneratedClasses.hpp" #include "classfile/systemDictionaryShared.hpp" +#include "classfile/vmClasses.hpp" #include "logging/log.hpp" #include "memory/metaspaceClosure.hpp" #include "oops/instanceKlass.hpp" @@ -169,6 +170,7 @@ void AOTArtifactFinder::find_artifacts() { end_scanning_for_oops(); TrainingData::cleanup_training_data(); + check_critical_classes(); } void AOTArtifactFinder::start_scanning_for_oops() { @@ -233,6 +235,7 @@ void AOTArtifactFinder::add_cached_instance_class(InstanceKlass* ik) { bool created; _seen_classes->put_if_absent(ik, &created); if (created) { + check_critical_class(ik); append_to_all_cached_classes(ik); // All super types must be added. @@ -310,3 +313,25 @@ void AOTArtifactFinder::all_cached_classes_do(MetaspaceClosure* it) { it->push(_all_cached_classes->adr_at(i)); } } + +void AOTArtifactFinder::check_critical_classes() { + if (CDSConfig::is_dumping_static_archive()) { + // vmClasses are store in the AOT cache (or AOT config file, or static archive). + // If any of the vmClasses is excluded, (usually due to incompatible JVMTI agent), + // the resulting cache/config/archive is unusable. + for (auto id : EnumRange{}) { + check_critical_class(vmClasses::klass_at(id)); + } + } +} + +void AOTArtifactFinder::check_critical_class(InstanceKlass* ik) { + if (SystemDictionaryShared::is_excluded_class(ik)) { + ResourceMark rm; + const char* msg = err_msg("Critical class %s has been excluded. %s cannot be written.", + ik->external_name(), + CDSConfig::type_of_archive_being_written()); + AOTMetaspace::unrecoverable_writing_error(msg); + } +} + diff --git a/src/hotspot/share/cds/aotArtifactFinder.hpp b/src/hotspot/share/cds/aotArtifactFinder.hpp index 05bcde6b0ac..50057b6caee 100644 --- a/src/hotspot/share/cds/aotArtifactFinder.hpp +++ b/src/hotspot/share/cds/aotArtifactFinder.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -81,12 +81,14 @@ class AOTArtifactFinder : AllStatic { static void add_cached_type_array_class(TypeArrayKlass* tak); static void add_cached_instance_class(InstanceKlass* ik); static void append_to_all_cached_classes(Klass* k); + static void check_critical_class(InstanceKlass* ik); public: static void initialize(); static void find_artifacts(); static void add_cached_class(Klass* k); static void add_aot_inited_class(InstanceKlass* ik); static void all_cached_classes_do(MetaspaceClosure* it); + static void check_critical_classes(); static void dispose(); }; diff --git a/src/hotspot/share/cds/aotClassInitializer.cpp b/src/hotspot/share/cds/aotClassInitializer.cpp index 06fc3af6f30..9ef96282aeb 100644 --- a/src/hotspot/share/cds/aotClassInitializer.cpp +++ b/src/hotspot/share/cds/aotClassInitializer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2024, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -59,6 +59,39 @@ bool AOTClassInitializer::can_archive_initialized_mirror(InstanceKlass* ik) { return false; } +#ifdef ASSERT + // If code in ik is executed, then ik must be in the state of being_initialized or + // fully_initialized. + // + // Check that no user code is executed during the assembly phase. Otherwise the user + // code may introduce undesirable environment dependencies into the heap image. + // If any of these two flags are set, we allow user code to be executed + // in the assembly phase. Note that these flags are strictly for the purpose + // of testing HotSpot and are not available in product builds. + if (AOTInitTestClass == nullptr && ArchiveHeapTestClass == nullptr) { + if (ik->defined_by_boot_loader()) { + // We allow boot classes to be AOT-initialized, except for classes from + // -Xbootclasspath (cp index >= 1) be AOT-initialized, as such classes may be + // provided by the user application. + assert(ik->shared_classpath_index() <= 0, + "only boot classed loaded from the modules image can be AOT-initialized"); + } else { + assert(ik->defined_by_platform_loader() || ik->defined_by_app_loader(), + "cannot AOT-initialized classed loaded by other loaders"); + + // Hidden classes from platform/app loaders need to be AOT-initialized to + // support AOT-linking of lambdas. These hidden classes are generated by the + // VM and do not contain user code. + if (!ik->is_hidden()) { + // OK: ik is an interface used by a lambda. When AOT-linking lambdas, we only + // support interfaces that are not interface_needs_clinit_execution_as_super(). + // See AOTConstantPoolResolver::check_lambda_metafactory_signature(). + assert(ik->is_interface() && !ik->interface_needs_clinit_execution_as_super(), "cannot execute Java code in assembly phase"); + } + } + } +#endif // ASSERT + // About "static field that may hold a different value" errors: // // Automatic selection for aot-inited classes @@ -234,7 +267,8 @@ bool AOTClassInitializer::can_archive_initialized_mirror(InstanceKlass* ik) { } void AOTClassInitializer::call_runtime_setup(JavaThread* current, InstanceKlass* ik) { - assert(ik->has_aot_initialized_mirror(), "sanity"); + precond(ik->has_aot_initialized_mirror()); + precond(!AOTLinkedClassBulkLoader::is_initializing_classes_early()); if (ik->is_runtime_setup_required()) { if (log_is_enabled(Info, aot, init)) { ResourceMark rm; diff --git a/src/hotspot/share/cds/aotConstantPoolResolver.cpp b/src/hotspot/share/cds/aotConstantPoolResolver.cpp index 93145940955..f1a704d4bee 100644 --- a/src/hotspot/share/cds/aotConstantPoolResolver.cpp +++ b/src/hotspot/share/cds/aotConstantPoolResolver.cpp @@ -81,6 +81,7 @@ bool AOTConstantPoolResolver::is_resolution_deterministic(ConstantPool* cp, int bool AOTConstantPoolResolver::is_class_resolution_deterministic(InstanceKlass* cp_holder, Klass* resolved_class) { assert(!is_in_archivebuilder_buffer(cp_holder), "sanity"); assert(!is_in_archivebuilder_buffer(resolved_class), "sanity"); + assert_at_safepoint(); // try_add_candidate() is called below and requires to be at safepoint. if (resolved_class->is_instance_klass()) { InstanceKlass* ik = InstanceKlass::cast(resolved_class); @@ -346,7 +347,15 @@ void AOTConstantPoolResolver::maybe_resolve_fmi_ref(InstanceKlass* ik, Method* m break; case Bytecodes::_invokehandle: - InterpreterRuntime::cds_resolve_invokehandle(raw_index, cp, CHECK); + if (CDSConfig::is_dumping_method_handles()) { + ResolvedMethodEntry* method_entry = cp->resolved_method_entry_at(raw_index); + int cp_index = method_entry->constant_pool_index(); + Symbol* sig = cp->uncached_signature_ref_at(cp_index); + Klass* k; + if (check_methodtype_signature(cp(), sig, &k, true)) { + InterpreterRuntime::cds_resolve_invokehandle(raw_index, cp, CHECK); + } + } break; default: @@ -400,7 +409,7 @@ void AOTConstantPoolResolver::preresolve_indy_cp_entries(JavaThread* current, In // Check the MethodType signatures used by parameters to the indy BSMs. Make sure we don't // use types that have been excluded, or else we might end up creating MethodTypes that cannot be stored // in the AOT cache. -bool AOTConstantPoolResolver::check_methodtype_signature(ConstantPool* cp, Symbol* sig, Klass** return_type_ret) { +bool AOTConstantPoolResolver::check_methodtype_signature(ConstantPool* cp, Symbol* sig, Klass** return_type_ret, bool is_invokehandle) { ResourceMark rm; for (SignatureStream ss(sig); !ss.is_done(); ss.next()) { if (ss.is_reference()) { @@ -413,11 +422,18 @@ bool AOTConstantPoolResolver::check_methodtype_signature(ConstantPool* cp, Symbo if (SystemDictionaryShared::should_be_excluded(k)) { if (log_is_enabled(Warning, aot, resolve)) { ResourceMark rm; - log_warning(aot, resolve)("Cannot aot-resolve Lambda proxy because %s is excluded", k->external_name()); + log_warning(aot, resolve)("Cannot aot-resolve %s because %s is excluded", + is_invokehandle ? "invokehandle" : "Lambda proxy", + k->external_name()); } return false; } + // cp->pool_holder() must be able to resolve k in production run + precond(CDSConfig::is_dumping_aot_linked_classes()); + precond(SystemDictionaryShared::is_builtin_loader(cp->pool_holder()->class_loader_data())); + precond(SystemDictionaryShared::is_builtin_loader(k->class_loader_data())); + if (ss.at_return_type() && return_type_ret != nullptr) { *return_type_ret = k; } @@ -475,11 +491,44 @@ bool AOTConstantPoolResolver::check_lambda_metafactory_methodhandle_arg(Constant return false; } + // klass and sigature of the method (no need to check the method name) Symbol* sig = cp->method_handle_signature_ref_at(mh_index); + Symbol* klass_name = cp->klass_name_at(cp->method_handle_klass_index_at(mh_index)); + if (log_is_enabled(Debug, aot, resolve)) { ResourceMark rm; log_debug(aot, resolve)("Checking MethodType of MethodHandle for LambdaMetafactory BSM arg %d: %s", arg_i, sig->as_C_string()); } + + { + Klass* k = find_loaded_class(Thread::current(), cp->pool_holder()->class_loader(), klass_name); + if (k == nullptr) { + // Dumping AOT cache: all classes should have been loaded by FinalImageRecipes::load_all_classes(). k must have + // been a class that was excluded when FinalImageRecipes recorded all classes at the end of the training run. + // + // Dumping static CDS archive: all classes in the classlist have already been loaded, before we resolve + // constants. k must have been a class that was excluded when the classlist was written + // at the end of the training run. + if (log_is_enabled(Warning, aot, resolve)) { + ResourceMark rm; + log_warning(aot, resolve)("Cannot aot-resolve Lambda proxy because %s is not loaded", klass_name->as_C_string()); + } + return false; + } + if (SystemDictionaryShared::should_be_excluded(k)) { + if (log_is_enabled(Warning, aot, resolve)) { + ResourceMark rm; + log_warning(aot, resolve)("Cannot aot-resolve Lambda proxy because %s is excluded", k->external_name()); + } + return false; + } + + // cp->pool_holder() must be able to resolve k in production run + precond(CDSConfig::is_dumping_aot_linked_classes()); + precond(SystemDictionaryShared::is_builtin_loader(cp->pool_holder()->class_loader_data())); + precond(SystemDictionaryShared::is_builtin_loader(k->class_loader_data())); + } + return check_methodtype_signature(cp, sig); } diff --git a/src/hotspot/share/cds/aotConstantPoolResolver.hpp b/src/hotspot/share/cds/aotConstantPoolResolver.hpp index e49d9d1ad0b..ecf2ac27061 100644 --- a/src/hotspot/share/cds/aotConstantPoolResolver.hpp +++ b/src/hotspot/share/cds/aotConstantPoolResolver.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -74,7 +74,10 @@ class AOTConstantPoolResolver : AllStatic { static void maybe_resolve_fmi_ref(InstanceKlass* ik, Method* m, Bytecodes::Code bc, int raw_index, GrowableArray* resolve_fmi_list, TRAPS); - static bool check_methodtype_signature(ConstantPool* cp, Symbol* sig, Klass** return_type_ret = nullptr); +public: + static bool check_methodtype_signature(ConstantPool* cp, Symbol* sig, Klass** return_type_ret = nullptr, bool is_invokehandle = false); + +private: static bool check_lambda_metafactory_signature(ConstantPool* cp, Symbol* sig); static bool check_lambda_metafactory_methodtype_arg(ConstantPool* cp, int bsms_attribute_index, int arg_i); static bool check_lambda_metafactory_methodhandle_arg(ConstantPool* cp, int bsms_attribute_index, int arg_i); diff --git a/src/hotspot/share/cds/aotGrowableArray.hpp b/src/hotspot/share/cds/aotGrowableArray.hpp deleted file mode 100644 index 0a0c137ed07..00000000000 --- a/src/hotspot/share/cds/aotGrowableArray.hpp +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_AOT_AOTGROWABLEARRAY_HPP -#define SHARE_AOT_AOTGROWABLEARRAY_HPP - -#include -#include - -class AOTGrowableArrayHelper { -public: - static void deallocate(void* mem); -}; - -// An AOTGrowableArray provides the same functionality as a GrowableArray that -// uses the C heap allocator. In addition, AOTGrowableArray can be iterated with -// MetaspaceClosure. This type should be used for growable arrays that need to be -// stored in the AOT cache. See ModuleEntry::_reads for an example. -template -class AOTGrowableArray : public GrowableArrayWithAllocator> { - friend class VMStructs; - friend class GrowableArrayWithAllocator; - - static E* allocate(int max, MemTag mem_tag) { - return (E*)GrowableArrayCHeapAllocator::allocate(max, sizeof(E), mem_tag); - } - - E* allocate() { - return allocate(this->_capacity, mtClass); - } - - void deallocate(E* mem) { -#if INCLUDE_CDS - AOTGrowableArrayHelper::deallocate(mem); -#else - GrowableArrayCHeapAllocator::deallocate(mem); -#endif - } - -public: - AOTGrowableArray(int initial_capacity, MemTag mem_tag) : - GrowableArrayWithAllocator( - allocate(initial_capacity, mem_tag), - initial_capacity) {} - - AOTGrowableArray() : AOTGrowableArray(0, mtClassShared) {} - - // methods required by MetaspaceClosure - void metaspace_pointers_do(MetaspaceClosure* it); - int size_in_heapwords() const { return (int)heap_word_size(sizeof(*this)); } - MetaspaceClosureType type() const { return MetaspaceClosureType::GrowableArrayType; } - static bool is_read_only_by_default() { return false; } -}; - -#endif // SHARE_AOT_AOTGROWABLEARRAY_HPP diff --git a/src/hotspot/share/cds/aotLinkedClassBulkLoader.cpp b/src/hotspot/share/cds/aotLinkedClassBulkLoader.cpp index 3653f9d518c..8129e6a5a81 100644 --- a/src/hotspot/share/cds/aotLinkedClassBulkLoader.cpp +++ b/src/hotspot/share/cds/aotLinkedClassBulkLoader.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2024, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -116,11 +116,24 @@ void AOTLinkedClassBulkLoader::preload_classes_in_table(Array* c } } +#ifdef ASSERT +// true iff we are inside AOTLinkedClassBulkLoader::link_classes(), when +// we are moving classes into the fully_initialized state before the +// JVM is able to execute any bytecodes. +static bool _is_initializing_classes_early = false; +bool AOTLinkedClassBulkLoader::is_initializing_classes_early() { + return _is_initializing_classes_early; +} +#endif + // Some cached heap objects may hold references to methods in aot-linked // classes (via MemberName). We need to make sure all classes are // linked before executing any bytecode. void AOTLinkedClassBulkLoader::link_classes(JavaThread* current) { + DEBUG_ONLY(_is_initializing_classes_early = true); link_classes_impl(current); + DEBUG_ONLY(_is_initializing_classes_early = false); + if (current->has_pending_exception()) { exit_on_exception(current); } @@ -135,6 +148,13 @@ void AOTLinkedClassBulkLoader::link_classes_impl(TRAPS) { link_classes_in_table(table->boot2(), CHECK); link_classes_in_table(table->platform(), CHECK); link_classes_in_table(table->app(), CHECK); + + init_classes_for_loader(Handle(), AOTLinkedClassTable::get()->boot1(), /*early_only=*/true, CHECK); + init_classes_for_loader(Handle(), AOTLinkedClassTable::get()->boot2(), /*early_only=*/true, CHECK); + init_classes_for_loader(Handle(), AOTLinkedClassTable::get()->platform(), /*early_only=*/true, CHECK); + init_classes_for_loader(Handle(), AOTLinkedClassTable::get()->app(), /*early_only=*/true, CHECK); + + log_info(aot, init)("------ finished early class init"); } void AOTLinkedClassBulkLoader::link_classes_in_table(Array* classes, TRAPS) { @@ -216,7 +236,7 @@ void AOTLinkedClassBulkLoader::validate_module(Klass* k, const char* category_na #endif void AOTLinkedClassBulkLoader::init_javabase_classes(JavaThread* current) { - init_classes_for_loader(Handle(), AOTLinkedClassTable::get()->boot1(), current); + init_classes_for_loader(Handle(), AOTLinkedClassTable::get()->boot1(), /*early_only=*/false, current); if (current->has_pending_exception()) { exit_on_exception(current); } @@ -246,9 +266,9 @@ void AOTLinkedClassBulkLoader::init_non_javabase_classes_impl(TRAPS) { assert(h_system_loader() != nullptr, "must be"); AOTLinkedClassTable* table = AOTLinkedClassTable::get(); - init_classes_for_loader(Handle(), table->boot2(), CHECK); - init_classes_for_loader(h_platform_loader, table->platform(), CHECK); - init_classes_for_loader(h_system_loader, table->app(), CHECK); + init_classes_for_loader(Handle(), table->boot2(), /*early_only=*/false, CHECK); + init_classes_for_loader(h_platform_loader, table->platform(), /*early_only=*/false, CHECK); + init_classes_for_loader(h_system_loader, table->app(), /*early_only=*/false, CHECK); if (Universe::is_fully_initialized() && VerifyDuringStartup) { // Make sure we're still in a clean state. @@ -260,6 +280,10 @@ void AOTLinkedClassBulkLoader::init_non_javabase_classes_impl(TRAPS) { tty->print_cr("==================== archived_training_data ** after all classes preloaded ===================="); TrainingData::print_archived_training_data_on(tty); } + LogStreamHandle(Info, aot, training, data) log; + if (log.is_enabled()) { + TrainingData::print_archived_training_data_on(&log); + } } // For the AOT cache to function properly, all classes in the AOTLinkedClassTable @@ -324,22 +348,80 @@ void AOTLinkedClassBulkLoader::initiate_loading(JavaThread* current, const char* } } -// Some AOT-linked classes for must be initialized early. This includes -// - classes that were AOT-initialized by AOTClassInitializer -// - the classes of all objects that are reachable from the archived mirrors of -// the AOT-linked classes for . -void AOTLinkedClassBulkLoader::init_classes_for_loader(Handle class_loader, Array* classes, TRAPS) { +// Can we move ik into fully_initialized state before the JVM is able to execute +// bytecodes? +static bool is_early_init_possible(InstanceKlass* ik) { + if (ik->is_runtime_setup_required()) { + // Bytecodes need to be executed in order to initialize this class. + if (log_is_enabled(Debug, aot, init)) { + ResourceMark rm; + log_debug(aot, init)("No early init %s: needs runtimeSetup()", + ik->external_name()); + } + return false; + } + + if (ik->super() != nullptr && !ik->super()->is_initialized()) { + // is_runtime_setup_required() == true for a super type + if (log_is_enabled(Debug, aot, init)) { + ResourceMark rm; + log_debug(aot, init)("No early init %s: super type %s not initialized", + ik->external_name(), ik->super()->external_name()); + } + return false; + } + + Array* interfaces = ik->local_interfaces(); + int num_interfaces = interfaces->length(); + for (int i = 0; i < num_interfaces; i++) { + InstanceKlass* intf = interfaces->at(i); + if (!intf->is_initialized() && intf->interface_needs_clinit_execution_as_super(/*also_check_supers*/false)) { + // is_runtime_setup_required() == true for this interface + if (log_is_enabled(Debug, aot, init)) { + ResourceMark rm; + log_debug(aot, init)("No early init %s: interface type %s not initialized", + ik->external_name(), intf->external_name()); + } + return false; + } + } + + return true; +} + +// Normally, classes are initialized on demand. However, some AOT-linked classes +// for the class_loader must be proactively intialized, including: +// - Classes that have an AOT-initialized mirror (they were AOT-initialized by +// AOTClassInitializer during the assembly phase). +// - The classes of all objects that are reachable from the archived mirrors of +// the AOT-linked classes for the class_loader. These are recorded in the special +// subgraph. +// +// (early_only == true) means that this function is called before the JVM +// is capable of executing Java bytecodes. +void AOTLinkedClassBulkLoader::init_classes_for_loader(Handle class_loader, Array* classes, + bool early_only, TRAPS) { if (classes != nullptr) { for (int i = 0; i < classes->length(); i++) { InstanceKlass* ik = classes->at(i); assert(ik->class_loader_data() != nullptr, "must be"); - if (ik->has_aot_initialized_mirror()) { - ik->initialize_with_aot_initialized_mirror(CHECK); + + bool do_init = ik->has_aot_initialized_mirror(); + if (do_init && early_only && !is_early_init_possible(ik)) { + // ik will be proactively initialized later when init_classes_for_loader() + // is called again with (early_only == false). + do_init = false; + } + + if (do_init) { + ik->initialize_with_aot_initialized_mirror(early_only, CHECK); } } } - HeapShared::init_classes_for_special_subgraph(class_loader, CHECK); + if (!early_only) { + HeapShared::init_classes_for_special_subgraph(class_loader, CHECK); + } } void AOTLinkedClassBulkLoader::replay_training_at_init(Array* classes, TRAPS) { diff --git a/src/hotspot/share/cds/aotLinkedClassBulkLoader.hpp b/src/hotspot/share/cds/aotLinkedClassBulkLoader.hpp index 31fdac386fe..24ff61cea1e 100644 --- a/src/hotspot/share/cds/aotLinkedClassBulkLoader.hpp +++ b/src/hotspot/share/cds/aotLinkedClassBulkLoader.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2024, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -56,7 +56,7 @@ class AOTLinkedClassBulkLoader : AllStatic { static void link_classes_impl(TRAPS); static void link_classes_in_table(Array* classes, TRAPS); static void init_non_javabase_classes_impl(TRAPS); - static void init_classes_for_loader(Handle class_loader, Array* classes, TRAPS); + static void init_classes_for_loader(Handle class_loader, Array* classes, bool early_only, TRAPS); static void replay_training_at_init(Array* classes, TRAPS) NOT_CDS_RETURN; #ifdef ASSERT @@ -73,8 +73,9 @@ public: static void init_javabase_classes(JavaThread* current) NOT_CDS_RETURN; static void init_non_javabase_classes(JavaThread* current) NOT_CDS_RETURN; static void exit_on_exception(JavaThread* current); - static void replay_training_at_init_for_preloaded_classes(TRAPS) NOT_CDS_RETURN; + + static bool is_initializing_classes_early() NOT_DEBUG({return false;}); }; #endif // SHARE_CDS_AOTLINKEDCLASSBULKLOADER_HPP diff --git a/src/hotspot/share/cds/aotMapLogger.cpp b/src/hotspot/share/cds/aotMapLogger.cpp index fa769aee1bf..9f338826fd6 100644 --- a/src/hotspot/share/cds/aotMapLogger.cpp +++ b/src/hotspot/share/cds/aotMapLogger.cpp @@ -98,8 +98,8 @@ void AOTMapLogger::dumptime_log(ArchiveBuilder* builder, FileMapInfo* mapinfo, DumpRegion* rw_region = &builder->_rw_region; DumpRegion* ro_region = &builder->_ro_region; - dumptime_log_metaspace_region("rw region", rw_region, &builder->_rw_src_objs); - dumptime_log_metaspace_region("ro region", ro_region, &builder->_ro_src_objs); + dumptime_log_metaspace_region("rw region", rw_region, &builder->_rw_src_objs, &builder->_ro_src_objs); + dumptime_log_metaspace_region("ro region", ro_region, &builder->_rw_src_objs, &builder->_ro_src_objs); address bitmap_end = address(bitmap + bitmap_size_in_bytes); log_region_range("bitmap", address(bitmap), bitmap_end, nullptr); @@ -122,17 +122,6 @@ void AOTMapLogger::dumptime_log(ArchiveBuilder* builder, FileMapInfo* mapinfo, class AOTMapLogger::RuntimeGatherArchivedMetaspaceObjs : public UniqueMetaspaceClosure { GrowableArrayCHeap _objs; - static int compare_objs_by_addr(ArchivedObjInfo* a, ArchivedObjInfo* b) { - intx diff = a->_src_addr - b->_src_addr; - if (diff < 0) { - return -1; - } else if (diff == 0) { - return 0; - } else { - return 1; - } - } - public: GrowableArrayCHeap* objs() { return &_objs; } @@ -152,7 +141,7 @@ public: void finish() { UniqueMetaspaceClosure::finish(); - _objs.sort(compare_objs_by_addr); + _objs.sort(compare_by_address); } }; // AOTMapLogger::RuntimeGatherArchivedMetaspaceObjs @@ -203,24 +192,47 @@ void AOTMapLogger::runtime_log(FileMapInfo* mapinfo, GrowableArrayCHeapbase()); address region_top = address(region->top()); log_region_range(name, region_base, region_top, region_base + _buffer_to_requested_delta); if (log_is_enabled(Debug, aot, map)) { GrowableArrayCHeap objs; - for (int i = 0; i < src_objs->objs()->length(); i++) { - ArchiveBuilder::SourceObjInfo* src_info = src_objs->at(i); + // With -XX:+UseCompactObjectHeaders, it's possible for small objects (including some from + // ro_objs) to be allocated in the gaps in the RW region. + collect_metaspace_objs(&objs, region_base, region_top, rw_objs); + collect_metaspace_objs(&objs, region_base, region_top, ro_objs); + objs.sort(compare_by_address); + log_metaspace_objects_impl(address(region->base()), address(region->end()), &objs, 0, objs.length()); + } +} + +void AOTMapLogger::collect_metaspace_objs(GrowableArrayCHeap* objs, + address region_base, address region_top , + const ArchiveBuilder::SourceObjList* src_objs) { + for (int i = 0; i < src_objs->objs()->length(); i++) { + ArchiveBuilder::SourceObjInfo* src_info = src_objs->at(i); + address buf_addr = src_info->buffered_addr(); + if (region_base <= buf_addr && buf_addr < region_top) { ArchivedObjInfo info; info._src_addr = src_info->source_addr(); - info._buffered_addr = src_info->buffered_addr(); + info._buffered_addr = buf_addr; info._requested_addr = info._buffered_addr + _buffer_to_requested_delta; info._bytes = src_info->size_in_bytes(); info._type = src_info->type(); - objs.append(info); + objs->append(info); } + } +} - log_metaspace_objects_impl(address(region->base()), address(region->end()), &objs, 0, objs.length()); +int AOTMapLogger::compare_by_address(ArchivedObjInfo* a, ArchivedObjInfo* b) { + if (a->_buffered_addr < b->_buffered_addr) { + return -1; + } else if (a->_buffered_addr > b->_buffered_addr) { + return 1; + } else { + return 0; } } @@ -577,7 +589,6 @@ public: } Klass* real_klass() { - assert(UseCompressedClassPointers, "heap archiving requires UseCompressedClassPointers"); return _data._klass; } diff --git a/src/hotspot/share/cds/aotMapLogger.hpp b/src/hotspot/share/cds/aotMapLogger.hpp index f495ed97f40..0a89f1e5012 100644 --- a/src/hotspot/share/cds/aotMapLogger.hpp +++ b/src/hotspot/share/cds/aotMapLogger.hpp @@ -127,7 +127,12 @@ private: static void runtime_log(FileMapInfo* mapinfo, GrowableArrayCHeap* objs); static void runtime_log_metaspace_regions(FileMapInfo* mapinfo, GrowableArrayCHeap* objs); static void dumptime_log_metaspace_region(const char* name, DumpRegion* region, - const ArchiveBuilder::SourceObjList* src_objs); + const ArchiveBuilder::SourceObjList* rw_objs, + const ArchiveBuilder::SourceObjList* ro_objs); + static void collect_metaspace_objs(GrowableArrayCHeap* objs, + address region_base, address region_top , + const ArchiveBuilder::SourceObjList* src_objs); + static int compare_by_address(ArchivedObjInfo* a, ArchivedObjInfo* b); // Common code for dumptime/runtime static void log_file_header(FileMapInfo* mapinfo); diff --git a/src/hotspot/share/cds/aotMappedHeapLoader.hpp b/src/hotspot/share/cds/aotMappedHeapLoader.hpp index 7c5ca1b1f9e..10f5ce3124f 100644 --- a/src/hotspot/share/cds/aotMappedHeapLoader.hpp +++ b/src/hotspot/share/cds/aotMappedHeapLoader.hpp @@ -54,7 +54,7 @@ public: // Can this VM map archived heap region? Currently only G1+compressed{oops,cp} static bool can_map() { - CDS_JAVA_HEAP_ONLY(return (UseG1GC && UseCompressedClassPointers);) + CDS_JAVA_HEAP_ONLY(return UseG1GC;) NOT_CDS_JAVA_HEAP(return false;) } diff --git a/src/hotspot/share/cds/aotMappedHeapWriter.cpp b/src/hotspot/share/cds/aotMappedHeapWriter.cpp index 64c0e3c40e8..8f810ef5244 100644 --- a/src/hotspot/share/cds/aotMappedHeapWriter.cpp +++ b/src/hotspot/share/cds/aotMappedHeapWriter.cpp @@ -64,6 +64,11 @@ HeapRootSegments AOTMappedHeapWriter::_heap_root_segments; address AOTMappedHeapWriter::_requested_bottom; address AOTMappedHeapWriter::_requested_top; +static size_t _num_strings = 0; +static size_t _string_bytes = 0; +static size_t _num_packages = 0; +static size_t _num_protection_domains = 0; + GrowableArrayCHeap* AOTMappedHeapWriter::_native_pointers; GrowableArrayCHeap* AOTMappedHeapWriter::_source_objs; GrowableArrayCHeap* AOTMappedHeapWriter::_source_objs_order; @@ -71,8 +76,6 @@ GrowableArrayCHeap* AOTMappedH AOTMappedHeapWriter::BufferOffsetToSourceObjectTable* AOTMappedHeapWriter::_buffer_offset_to_source_obj_table = nullptr; -DumpedInternedStrings *AOTMappedHeapWriter::_dumped_interned_strings = nullptr; - typedef HashTable< size_t, // offset of a filler from AOTMappedHeapWriter::buffer_bottom() size_t, // size of this filler (in bytes) @@ -87,7 +90,6 @@ void AOTMappedHeapWriter::init() { Universe::heap()->collect(GCCause::_java_lang_system_gc); _buffer_offset_to_source_obj_table = new (mtClassShared) BufferOffsetToSourceObjectTable(/*size (prime)*/36137, /*max size*/1 * M); - _dumped_interned_strings = new (mtClass)DumpedInternedStrings(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE); _fillers = new (mtClassShared) FillersTable(); _requested_bottom = nullptr; _requested_top = nullptr; @@ -141,9 +143,6 @@ int AOTMappedHeapWriter::narrow_oop_shift() { void AOTMappedHeapWriter::delete_tables_with_raw_oops() { delete _source_objs; _source_objs = nullptr; - - delete _dumped_interned_strings; - _dumped_interned_strings = nullptr; } void AOTMappedHeapWriter::add_source_obj(oop src_obj) { @@ -181,25 +180,6 @@ bool AOTMappedHeapWriter::is_too_large_to_archive(size_t size) { } } -// Keep track of the contents of the archived interned string table. This table -// is used only by CDSHeapVerifier. -void AOTMappedHeapWriter::add_to_dumped_interned_strings(oop string) { - assert_at_safepoint(); // DumpedInternedStrings uses raw oops - assert(!is_string_too_large_to_archive(string), "must be"); - bool created; - _dumped_interned_strings->put_if_absent(string, true, &created); - if (created) { - // Prevent string deduplication from changing the value field to - // something not in the archive. - java_lang_String::set_deduplication_forbidden(string); - _dumped_interned_strings->maybe_grow(); - } -} - -bool AOTMappedHeapWriter::is_dumped_interned_string(oop o) { - return _dumped_interned_strings->get(o) != nullptr; -} - // Various lookup functions between source_obj, buffered_obj and requested_obj bool AOTMappedHeapWriter::is_in_requested_range(oop o) { assert(_requested_bottom != nullptr, "do not call before _requested_bottom is initialized"); @@ -430,6 +410,7 @@ void AOTMappedHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeapset_buffer_offset(buffer_offset); + assert(buffer_offset <= 0x7fffffff, "sanity"); OopHandle handle(Universe::vm_global(), src_obj); _buffer_offset_to_source_obj_table->put_when_absent(buffer_offset, handle); @@ -442,6 +423,9 @@ void AOTMappedHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeaplength() + 1, roots->length(), _num_native_ptrs); + log_info(aot)(" strings = %8zu (%zu bytes)", _num_strings, _string_bytes); + log_info(aot)(" packages = %8zu", _num_packages); + log_info(aot)(" protection domains = %8zu", _num_protection_domains); } size_t AOTMappedHeapWriter::filler_array_byte_size(int length) { @@ -466,7 +450,6 @@ int AOTMappedHeapWriter::filler_array_length(size_t fill_bytes) { } HeapWord* AOTMappedHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) { - assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses"); Klass* oak = Universe::objectArrayKlass(); // already relocated to point to archived klass HeapWord* mem = offset_to_buffered_address(_buffer_used); memset(mem, 0, fill_bytes); @@ -530,7 +513,25 @@ void update_buffered_object_field(address buffered_obj, int field_offset, T valu *field_addr = value; } +void AOTMappedHeapWriter::update_stats(oop src_obj) { + if (java_lang_String::is_instance(src_obj)) { + _num_strings ++; + _string_bytes += src_obj->size() * HeapWordSize; + _string_bytes += java_lang_String::value(src_obj)->size() * HeapWordSize; + } else { + Klass* k = src_obj->klass(); + Symbol* name = k->name(); + if (name->equals("java/lang/NamedPackage") || name->equals("java/lang/Package")) { + _num_packages ++; + } else if (name->equals("java/security/ProtectionDomain")) { + _num_protection_domains ++; + } + } +} + size_t AOTMappedHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) { + update_stats(src_obj); + assert(!is_too_large_to_archive(src_obj), "already checked"); size_t byte_size = src_obj->size() * HeapWordSize; assert(byte_size > 0, "no zero-size objects"); @@ -722,7 +723,6 @@ template void AOTMappedHeapWriter::mark_oop_pointer(T* buffered_add } void AOTMappedHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass) { - assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses"); narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass); address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop
(requested_obj)); @@ -896,8 +896,14 @@ void AOTMappedHeapWriter::compute_ptrmap(AOTMappedHeapInfo* heap_info) { native_ptr = RegeneratedClasses::get_regenerated_object(native_ptr); } - guarantee(ArchiveBuilder::current()->has_been_archived((address)native_ptr), - "Metadata %p should have been archived", native_ptr); + if (!ArchiveBuilder::current()->has_been_archived((address)native_ptr)) { + ResourceMark rm; + LogStreamHandle(Error, aot) log; + log.print("Marking native pointer for oop %p (type = %s, offset = %d)", + cast_from_oop(src_obj), src_obj->klass()->external_name(), field_offset); + src_obj->print_on(&log); + fatal("Metadata %p should have been archived", native_ptr); + } address buffered_native_ptr = ArchiveBuilder::current()->get_buffered_addr((address)native_ptr); address requested_native_ptr = ArchiveBuilder::current()->to_requested(buffered_native_ptr); diff --git a/src/hotspot/share/cds/aotMappedHeapWriter.hpp b/src/hotspot/share/cds/aotMappedHeapWriter.hpp index 7481e7922a0..2420e68d9fe 100644 --- a/src/hotspot/share/cds/aotMappedHeapWriter.hpp +++ b/src/hotspot/share/cds/aotMappedHeapWriter.hpp @@ -40,20 +40,6 @@ class MemRegion; #if INCLUDE_CDS_JAVA_HEAP -class DumpedInternedStrings : - public ResizeableHashTable -{ -public: - DumpedInternedStrings(unsigned size, unsigned max_size) : - ResizeableHashTable(size, max_size) {} -}; - class AOTMappedHeapWriter : AllStatic { friend class HeapShared; friend class AOTMappedHeapLoader; @@ -131,7 +117,6 @@ private: static GrowableArrayCHeap* _native_pointers; static GrowableArrayCHeap* _source_objs; - static DumpedInternedStrings *_dumped_interned_strings; // We sort _source_objs_order to minimize the number of bits in ptrmap and oopmap. // See comments near the body of AOTMappedHeapWriter::compare_objs_by_oop_fields(). @@ -190,6 +175,7 @@ private: static void copy_roots_to_buffer(GrowableArrayCHeap* roots); static void copy_source_objs_to_buffer(GrowableArrayCHeap* roots); static size_t copy_one_source_obj_to_buffer(oop src_obj); + static void update_stats(oop src_obj); static void maybe_fill_gc_region_gap(size_t required_byte_size); static size_t filler_array_byte_size(int length); @@ -227,8 +213,6 @@ public: static bool is_too_large_to_archive(size_t size); static bool is_too_large_to_archive(oop obj); static bool is_string_too_large_to_archive(oop string); - static bool is_dumped_interned_string(oop o); - static void add_to_dumped_interned_strings(oop string); static void write(GrowableArrayCHeap*, AOTMappedHeapInfo* heap_info); static address requested_address(); // requested address of the lowest achived heap object static size_t get_filler_size_at(address buffered_addr); diff --git a/src/hotspot/share/cds/aotMetaspace.cpp b/src/hotspot/share/cds/aotMetaspace.cpp index b75d7628aa9..4c23ede9cb8 100644 --- a/src/hotspot/share/cds/aotMetaspace.cpp +++ b/src/hotspot/share/cds/aotMetaspace.cpp @@ -250,9 +250,9 @@ static bool shared_base_too_high(char* specified_base, char* aligned_base, size_ static char* compute_shared_base(size_t cds_max) { char* specified_base = (char*)SharedBaseAddress; size_t alignment = AOTMetaspace::core_region_alignment(); - if (UseCompressedClassPointers && CompressedKlassPointers::needs_class_space()) { - alignment = MAX2(alignment, Metaspace::reserve_alignment()); - } +#if INCLUDE_CLASS_SPACE + alignment = MAX2(alignment, Metaspace::reserve_alignment()); +#endif if (SharedBaseAddress == 0) { // Special meaning of -XX:SharedBaseAddress=0 -> Always map archive at os-selected address. @@ -949,11 +949,18 @@ void AOTMetaspace::dump_static_archive(TRAPS) { ResourceMark rm(THREAD); HandleMark hm(THREAD); - if (CDSConfig::is_dumping_final_static_archive() && AOTPrintTrainingInfo) { - tty->print_cr("==================== archived_training_data ** before dumping ===================="); - TrainingData::print_archived_training_data_on(tty); + if (CDSConfig::is_dumping_final_static_archive()) { + if (AOTPrintTrainingInfo) { + tty->print_cr("==================== archived_training_data ** before dumping ===================="); + TrainingData::print_archived_training_data_on(tty); + } + LogStreamHandle(Info, aot, training, data) log; + if (log.is_enabled()) { + TrainingData::print_archived_training_data_on(&log); + } } + StaticArchiveBuilder builder; dump_static_archive_impl(builder, THREAD); if (HAS_PENDING_EXCEPTION) { @@ -1187,8 +1194,8 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS CDSConfig::enable_dumping_aot_code(); { builder.start_ac_region(); - // Write the contents to AOT code region and close AOTCodeCache before packing the region - AOTCodeCache::close(); + // Write the contents to AOT code region before packing the region + AOTCodeCache::dump(); builder.end_ac_region(); } CDSConfig::disable_dumping_aot_code(); @@ -1637,32 +1644,29 @@ MapArchiveResult AOTMetaspace::map_archives(FileMapInfo* static_mapinfo, FileMap aot_log_debug(aot)("Failed to reserve spaces (use_requested_addr=%u)", (unsigned)use_requested_addr); } else { - if (Metaspace::using_class_space()) { - prot_zone_size = protection_zone_size(); - } + CLASS_SPACE_ONLY(prot_zone_size = protection_zone_size();) -#ifdef ASSERT // Some sanity checks after reserving address spaces for archives // and class space. assert(archive_space_rs.is_reserved(), "Sanity"); - if (Metaspace::using_class_space()) { - assert(archive_space_rs.base() == mapped_base_address && - archive_space_rs.size() > protection_zone_size(), - "Archive space must lead and include the protection zone"); - // Class space must closely follow the archive space. Both spaces - // must be aligned correctly. - assert(class_space_rs.is_reserved() && class_space_rs.size() > 0, - "A class space should have been reserved"); - assert(class_space_rs.base() >= archive_space_rs.end(), - "class space should follow the cds archive space"); - assert(is_aligned(archive_space_rs.base(), - core_region_alignment()), - "Archive space misaligned"); - assert(is_aligned(class_space_rs.base(), - Metaspace::reserve_alignment()), - "class space misaligned"); - } -#endif // ASSERT + +#if INCLUDE_CLASS_SPACE + assert(archive_space_rs.base() == mapped_base_address && + archive_space_rs.size() > protection_zone_size(), + "Archive space must lead and include the protection zone"); + // Class space must closely follow the archive space. Both spaces + // must be aligned correctly. + assert(class_space_rs.is_reserved() && class_space_rs.size() > 0, + "A class space should have been reserved"); + assert(class_space_rs.base() >= archive_space_rs.end(), + "class space should follow the cds archive space"); + assert(is_aligned(archive_space_rs.base(), + core_region_alignment()), + "Archive space misaligned"); + assert(is_aligned(class_space_rs.base(), + Metaspace::reserve_alignment()), + "class space misaligned"); +#endif // INCLUDE_CLASS_SPACE aot_log_info(aot)("Reserved archive_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (%zu) bytes%s", p2i(archive_space_rs.base()), p2i(archive_space_rs.end()), archive_space_rs.size(), @@ -1764,67 +1768,60 @@ MapArchiveResult AOTMetaspace::map_archives(FileMapInfo* static_mapinfo, FileMap if (result == MAP_ARCHIVE_SUCCESS) { SharedBaseAddress = (size_t)mapped_base_address; -#ifdef _LP64 - if (Metaspace::using_class_space()) { - assert(prot_zone_size > 0 && - *(mapped_base_address) == 'P' && - *(mapped_base_address + prot_zone_size - 1) == 'P', - "Protection zone was overwritten?"); - // Set up ccs in metaspace. - Metaspace::initialize_class_space(class_space_rs); +#if INCLUDE_CLASS_SPACE + assert(prot_zone_size > 0 && + *(mapped_base_address) == 'P' && + *(mapped_base_address + prot_zone_size - 1) == 'P', + "Protection zone was overwritten?"); + // Set up ccs in metaspace. + Metaspace::initialize_class_space(class_space_rs); - // Set up compressed Klass pointer encoding: the encoding range must - // cover both archive and class space. - const address klass_range_start = (address)mapped_base_address; - const size_t klass_range_size = (address)class_space_rs.end() - klass_range_start; - if (INCLUDE_CDS_JAVA_HEAP || UseCompactObjectHeaders) { - // The CDS archive may contain narrow Klass IDs that were precomputed at archive generation time: - // - every archived java object header (only if INCLUDE_CDS_JAVA_HEAP) - // - every archived Klass' prototype (only if +UseCompactObjectHeaders) - // - // In order for those IDs to still be valid, we need to dictate base and shift: base should be the - // mapping start (including protection zone), shift should be the shift used at archive generation time. - CompressedKlassPointers::initialize_for_given_encoding( - klass_range_start, klass_range_size, - klass_range_start, ArchiveBuilder::precomputed_narrow_klass_shift() // precomputed encoding, see ArchiveBuilder - ); - assert(CompressedKlassPointers::base() == klass_range_start, "must be"); - } else { - // Let JVM freely choose encoding base and shift - CompressedKlassPointers::initialize(klass_range_start, klass_range_size); - assert(CompressedKlassPointers::base() == nullptr || - CompressedKlassPointers::base() == klass_range_start, "must be"); - } - // Establish protection zone, but only if we need one - if (CompressedKlassPointers::base() == klass_range_start) { - CompressedKlassPointers::establish_protection_zone(klass_range_start, prot_zone_size); - } + // Set up compressed Klass pointer encoding: the encoding range must + // cover both archive and class space. + const address klass_range_start = (address)mapped_base_address; + const size_t klass_range_size = (address)class_space_rs.end() - klass_range_start; + if (INCLUDE_CDS_JAVA_HEAP || UseCompactObjectHeaders) { + // The CDS archive may contain narrow Klass IDs that were precomputed at archive generation time: + // - every archived java object header (only if INCLUDE_CDS_JAVA_HEAP) + // - every archived Klass' prototype (only if +UseCompactObjectHeaders) + // + // In order for those IDs to still be valid, we need to dictate base and shift: base should be the + // mapping start (including protection zone), shift should be the shift used at archive generation time. + CompressedKlassPointers::initialize_for_given_encoding( + klass_range_start, klass_range_size, + klass_range_start, ArchiveBuilder::precomputed_narrow_klass_shift() // precomputed encoding, see ArchiveBuilder + ); + assert(CompressedKlassPointers::base() == klass_range_start, "must be"); + } else { + // Let JVM freely choose encoding base and shift + CompressedKlassPointers::initialize(klass_range_start, klass_range_size); + assert(CompressedKlassPointers::base() == nullptr || + CompressedKlassPointers::base() == klass_range_start, "must be"); + } + // Establish protection zone, but only if we need one + if (CompressedKlassPointers::base() == klass_range_start) { + CompressedKlassPointers::establish_protection_zone(klass_range_start, prot_zone_size); + } - if (static_mapinfo->can_use_heap_region()) { - if (static_mapinfo->object_streaming_mode()) { - HeapShared::initialize_loading_mode(HeapArchiveMode::_streaming); - } else { - // map_or_load_heap_region() compares the current narrow oop and klass encodings - // with the archived ones, so it must be done after all encodings are determined. - static_mapinfo->map_or_load_heap_region(); - HeapShared::initialize_loading_mode(HeapArchiveMode::_mapping); - } + if (static_mapinfo->can_use_heap_region()) { + if (static_mapinfo->object_streaming_mode()) { + HeapShared::initialize_loading_mode(HeapArchiveMode::_streaming); } else { - FileMapRegion* r = static_mapinfo->region_at(AOTMetaspace::hp); - if (r->used() > 0) { - if (static_mapinfo->object_streaming_mode()) { - AOTMetaspace::report_loading_error("Cannot use CDS heap data."); - } else { - if (!UseCompressedOops && !AOTMappedHeapLoader::can_map()) { - AOTMetaspace::report_loading_error("Cannot use CDS heap data. Selected GC not compatible -XX:-UseCompressedOops"); - } else { - AOTMetaspace::report_loading_error("Cannot use CDS heap data. UseEpsilonGC, UseG1GC, UseSerialGC, UseParallelGC, or UseShenandoahGC are required."); - } - } - } + // map_or_load_heap_region() compares the current narrow oop and klass encodings + // with the archived ones, so it must be done after all encodings are determined. + static_mapinfo->map_or_load_heap_region(); + HeapShared::initialize_loading_mode(HeapArchiveMode::_mapping); + } + } else { + FileMapRegion* r = static_mapinfo->region_at(AOTMetaspace::hp); + if (r->used() > 0) { + AOTMetaspace::report_loading_error("Cannot use CDS heap data."); + } + if (!CDSConfig::is_dumping_static_archive()) { + CDSConfig::stop_using_full_module_graph("No CDS heap data"); } } -#endif // _LP64 +#endif // INCLUDE_CLASS_SPACE log_info(aot)("initial optimized module handling: %s", CDSConfig::is_using_optimized_module_handling() ? "enabled" : "disabled"); log_info(aot)("initial full module graph: %s", CDSConfig::is_using_full_module_graph() ? "enabled" : "disabled"); } else { @@ -1857,8 +1854,13 @@ MapArchiveResult AOTMetaspace::map_archives(FileMapInfo* static_mapinfo, FileMap // (The gap may result from different alignment requirements between metaspace // and CDS) // -// If UseCompressedClassPointers is disabled, only one address space will be -// reserved: +// The range encompassing both spaces will be suitable to en/decode narrow Klass +// pointers: the base will be valid for encoding the range [Base, End) and not +// surpass the max. range for that encoding. +// +// On 32-bit, a "narrow" Klass is just the pointer itself, and the Klass encoding +// range encompasses the whole address range. Consequently, we can "decode" and +// "encode" any pointer anywhere, and so are free to place the CDS archive anywhere: // // +-- Base address End // | | @@ -1872,27 +1874,21 @@ MapArchiveResult AOTMetaspace::map_archives(FileMapInfo* static_mapinfo, FileMap // use_archive_base_addr address is false, this base address is determined // by the platform. // -// If UseCompressedClassPointers=1, the range encompassing both spaces will be -// suitable to en/decode narrow Klass pointers: the base will be valid for -// encoding, the range [Base, End) and not surpass the max. range for that encoding. -// // Return: // // - On success: // - total_space_rs will be reserved as whole for archive_space_rs and -// class_space_rs if UseCompressedClassPointers is true. +// class_space_rs on 64-bit. // On Windows, try reserve archive_space_rs and class_space_rs // separately first if use_archive_base_addr is true. // - archive_space_rs will be reserved and large enough to host static and // if needed dynamic archive: [Base, A). // archive_space_rs.base and size will be aligned to CDS reserve // granularity. -// - class_space_rs: If UseCompressedClassPointers=1, class_space_rs will -// be reserved. Its start address will be aligned to metaspace reserve -// alignment, which may differ from CDS alignment. It will follow the cds -// archive space, close enough such that narrow class pointer encoding -// covers both spaces. -// If UseCompressedClassPointers=0, class_space_rs remains unreserved. +// - class_space_rs: On 64-bit, class_space_rs will be reserved. Its start +// address will be aligned to metaspace reserve alignment, which may differ +// from CDS alignment. It will follow the cds archive space, close enough +// such that narrow class pointer encoding covers both spaces. // - On error: null is returned and the spaces remain unreserved. char* AOTMetaspace::reserve_address_space_for_archives(FileMapInfo* static_mapinfo, FileMapInfo* dynamic_mapinfo, @@ -1908,32 +1904,34 @@ char* AOTMetaspace::reserve_address_space_for_archives(FileMapInfo* static_mapin size_t archive_end_offset = (dynamic_mapinfo == nullptr) ? static_mapinfo->mapping_end_offset() : dynamic_mapinfo->mapping_end_offset(); size_t archive_space_size = align_up(archive_end_offset, archive_space_alignment); - if (!Metaspace::using_class_space()) { - // Get the simple case out of the way first: - // no compressed class space, simple allocation. +#if !INCLUDE_CLASS_SPACE - // When running without class space, requested archive base should be aligned to cds core alignment. - assert(is_aligned(base_address, archive_space_alignment), - "Archive base address unaligned: " PTR_FORMAT ", needs alignment: %zu.", - p2i(base_address), archive_space_alignment); + // Get the simple case out of the way first: + // no compressed class space, simple allocation. - archive_space_rs = MemoryReserver::reserve((char*)base_address, - archive_space_size, - archive_space_alignment, - os::vm_page_size(), - mtNone); - if (archive_space_rs.is_reserved()) { - assert(base_address == nullptr || - (address)archive_space_rs.base() == base_address, "Sanity"); - // Register archive space with NMT. - MemTracker::record_virtual_memory_tag(archive_space_rs, mtClassShared); - return archive_space_rs.base(); - } - return nullptr; + // When running without class space, requested archive base should be aligned to cds core alignment. + assert(is_aligned(base_address, archive_space_alignment), + "Archive base address unaligned: " PTR_FORMAT ", needs alignment: %zu.", + p2i(base_address), archive_space_alignment); + + archive_space_rs = MemoryReserver::reserve((char*)base_address, + archive_space_size, + archive_space_alignment, + os::vm_page_size(), + mtNone); + if (archive_space_rs.is_reserved()) { + assert(base_address == nullptr || + (address)archive_space_rs.base() == base_address, "Sanity"); + // Register archive space with NMT. + MemTracker::record_virtual_memory_tag(archive_space_rs, mtClassShared); + return archive_space_rs.base(); } -#ifdef _LP64 + return nullptr; +#else + + // INCLUDE_CLASS_SPACE=1 // Complex case: two spaces adjacent to each other, both to be addressable // with narrow class pointers. // We reserve the whole range spanning both spaces, then split that range up. @@ -2045,11 +2043,7 @@ char* AOTMetaspace::reserve_address_space_for_archives(FileMapInfo* static_mapin return archive_space_rs.base(); -#else - ShouldNotReachHere(); - return nullptr; -#endif - +#endif // INCLUDE_CLASS_SPACE } void AOTMetaspace::release_reserved_spaces(ReservedSpace& total_space_rs, diff --git a/src/hotspot/share/cds/aotReferenceObjSupport.cpp b/src/hotspot/share/cds/aotReferenceObjSupport.cpp index 0c27c8ce5f0..2d5fc8c7f21 100644 --- a/src/hotspot/share/cds/aotReferenceObjSupport.cpp +++ b/src/hotspot/share/cds/aotReferenceObjSupport.cpp @@ -96,7 +96,7 @@ class KeepAliveObjectsTable : public HashTable {}; + HeapShared::oop_address_hash> {}; static KeepAliveObjectsTable* _keep_alive_objs_table; static OopHandle _keep_alive_objs_array; diff --git a/src/hotspot/share/cds/aotStreamedHeapWriter.cpp b/src/hotspot/share/cds/aotStreamedHeapWriter.cpp index f52532b2f2a..25bef10a673 100644 --- a/src/hotspot/share/cds/aotStreamedHeapWriter.cpp +++ b/src/hotspot/share/cds/aotStreamedHeapWriter.cpp @@ -242,20 +242,6 @@ void AOTStreamedHeapWriter::copy_roots_max_dfs_to_buffer(int roots_length) { } } -static bool is_interned_string(oop obj) { - if (!java_lang_String::is_instance(obj)) { - return false; - } - - ResourceMark rm; - int len; - jchar* name = java_lang_String::as_unicode_string_or_null(obj, len); - if (name == nullptr) { - fatal("Insufficient memory for dumping"); - } - return StringTable::lookup(name, len) == obj; -} - static BitMap::idx_t bit_idx_for_buffer_offset(size_t buffer_offset) { if (UseCompressedOops) { return BitMap::idx_t(buffer_offset / sizeof(narrowOop)); @@ -264,10 +250,6 @@ static BitMap::idx_t bit_idx_for_buffer_offset(size_t buffer_offset) { } } -bool AOTStreamedHeapWriter::is_dumped_interned_string(oop obj) { - return is_interned_string(obj) && HeapShared::get_cached_oop_info(obj) != nullptr; -} - void AOTStreamedHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap* roots) { for (int i = 0; i < _source_objs->length(); i++) { oop src_obj = _source_objs->at(i); @@ -325,7 +307,7 @@ size_t AOTStreamedHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) { ensure_buffer_space(new_used); - if (is_interned_string(src_obj)) { + if (HeapShared::is_interned_string(src_obj)) { java_lang_String::hash_code(src_obj); // Sets the hash code field(s) java_lang_String::set_deduplication_forbidden(src_obj); // Allows faster interning at runtime assert(java_lang_String::hash_is_set(src_obj), "hash must be set"); @@ -387,7 +369,6 @@ template void AOTStreamedHeapWriter::map_oop_field_in_buffer(oop ob } void AOTStreamedHeapWriter::update_header_for_buffered_addr(address buffered_addr, oop src_obj, Klass* src_klass) { - assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses"); narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass); markWord mw = markWord::prototype(); @@ -402,7 +383,7 @@ void AOTStreamedHeapWriter::update_header_for_buffered_addr(address buffered_add mw = mw.copy_set_hash(src_hash); } - if (is_interned_string(src_obj)) { + if (HeapShared::is_interned_string(src_obj)) { // Mark the mark word of interned string so the loader knows to link these to // the string table at runtime. mw = mw.set_marked(); diff --git a/src/hotspot/share/cds/aotStreamedHeapWriter.hpp b/src/hotspot/share/cds/aotStreamedHeapWriter.hpp index ab5aec0327b..c3cc9f2c092 100644 --- a/src/hotspot/share/cds/aotStreamedHeapWriter.hpp +++ b/src/hotspot/share/cds/aotStreamedHeapWriter.hpp @@ -148,8 +148,6 @@ public: return size_t(buffered_addr) - size_t(buffer_bottom()); } - static bool is_dumped_interned_string(oop obj); - static size_t source_obj_to_buffered_offset(oop src_obj); static address source_obj_to_buffered_addr(oop src_obj); diff --git a/src/hotspot/share/cds/archiveBuilder.cpp b/src/hotspot/share/cds/archiveBuilder.cpp index 0ea5d6c6ecb..21eef3d7b0b 100644 --- a/src/hotspot/share/cds/archiveBuilder.cpp +++ b/src/hotspot/share/cds/archiveBuilder.cpp @@ -627,6 +627,7 @@ void ArchiveBuilder::dump_ro_metadata() { start_dump_region(&_ro_region); make_shallow_copies(&_ro_region, &_ro_src_objs); RegeneratedClasses::record_regenerated_objects(); + DumpRegion::report_gaps(&_alloc_stats); } void ArchiveBuilder::make_shallow_copies(DumpRegion *dump_region, @@ -639,33 +640,10 @@ void ArchiveBuilder::make_shallow_copies(DumpRegion *dump_region, void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* src_info) { address src = src_info->source_addr(); - int bytes = src_info->size_in_bytes(); // word-aligned - size_t alignment = SharedSpaceObjectAlignment; // alignment for the dest pointer + int bytes = src_info->size_in_bytes(); + char* dest = dump_region->allocate_metaspace_obj(bytes, src, src_info->type(), + src_info->read_only(), &_alloc_stats); - char* oldtop = dump_region->top(); - if (src_info->type() == MetaspaceClosureType::ClassType) { - // Allocate space for a pointer directly in front of the future InstanceKlass, so - // we can do a quick lookup from InstanceKlass* -> RunTimeClassInfo* - // without building another hashtable. See RunTimeClassInfo::get_for() - // in systemDictionaryShared.cpp. - Klass* klass = (Klass*)src; - if (klass->is_instance_klass()) { - SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass)); - dump_region->allocate(sizeof(address)); - } -#ifdef _LP64 - // More strict alignments needed for UseCompressedClassPointers - if (UseCompressedClassPointers) { - alignment = nth_bit(ArchiveBuilder::precomputed_narrow_klass_shift()); - } -#endif - } else if (src_info->type() == MetaspaceClosureType::SymbolType) { - // Symbols may be allocated by using AllocateHeap, so their sizes - // may be less than size_in_bytes() indicates. - bytes = ((Symbol*)src)->byte_size(); - } - - char* dest = dump_region->allocate(bytes, alignment); memcpy(dest, src, bytes); // Update the hash of buffered sorted symbols for static dump so that the symbols have deterministic contents @@ -692,11 +670,6 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s log_trace(aot)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(src), p2i(dest), bytes); src_info->set_buffered_addr((address)dest); - - char* newtop = dump_region->top(); - _alloc_stats.record(src_info->type(), int(newtop - oldtop), src_info->read_only()); - - DEBUG_ONLY(_alloc_stats.verify((int)dump_region->used(), src_info->read_only())); } // This is used by code that hand-assembles data structures, such as the LambdaProxyClassKey, that are @@ -1119,20 +1092,17 @@ class RelocateBufferToRequested : public BitMapClosure { } }; -#ifdef _LP64 int ArchiveBuilder::precomputed_narrow_klass_shift() { - // Legacy Mode: - // We use 32 bits for narrowKlass, which should cover the full 4G Klass range. Shift can be 0. + // Standard Mode: + // We use 32 bits for narrowKlass, which should cover a full 4G Klass range. Shift can be 0. // CompactObjectHeader Mode: // narrowKlass is much smaller, and we use the highest possible shift value to later get the maximum // Klass encoding range. // // Note that all of this may change in the future, if we decide to correct the pre-calculated // narrow Klass IDs at archive load time. - assert(UseCompressedClassPointers, "Only needed for compressed class pointers"); return UseCompactObjectHeaders ? CompressedKlassPointers::max_shift() : 0; } -#endif // _LP64 void ArchiveBuilder::relocate_to_requested() { if (!ro_region()->is_packed()) { diff --git a/src/hotspot/share/cds/archiveBuilder.hpp b/src/hotspot/share/cds/archiveBuilder.hpp index b3667ea11b4..6a9df87092b 100644 --- a/src/hotspot/share/cds/archiveBuilder.hpp +++ b/src/hotspot/share/cds/archiveBuilder.hpp @@ -484,7 +484,6 @@ public: void print_stats(); void report_out_of_space(const char* name, size_t needed_bytes); -#ifdef _LP64 // The CDS archive contains pre-computed narrow Klass IDs. It carries them in the headers of // archived heap objects. With +UseCompactObjectHeaders, it also carries them in prototypes // in Klass. @@ -504,7 +503,6 @@ public: // TinyClassPointer Mode: // We use the highest possible shift value to maximize the encoding range size. static int precomputed_narrow_klass_shift(); -#endif // _LP64 }; diff --git a/src/hotspot/share/cds/archiveUtils.cpp b/src/hotspot/share/cds/archiveUtils.cpp index ea9bde8eb8d..6e0608e196b 100644 --- a/src/hotspot/share/cds/archiveUtils.cpp +++ b/src/hotspot/share/cds/archiveUtils.cpp @@ -30,6 +30,7 @@ #include "cds/cdsConfig.hpp" #include "cds/classListParser.hpp" #include "cds/classListWriter.hpp" +#include "cds/dumpAllocStats.hpp" #include "cds/dynamicArchive.hpp" #include "cds/filemap.hpp" #include "cds/heapShared.hpp" @@ -46,6 +47,7 @@ #include "utilities/debug.hpp" #include "utilities/formatBuffer.hpp" #include "utilities/globalDefinitions.hpp" +#include "utilities/rbTree.inline.hpp" #include "utilities/spinYield.hpp" CHeapBitMap* ArchivePtrMarker::_ptrmap = nullptr; @@ -116,13 +118,17 @@ void ArchivePtrMarker::mark_pointer(address* ptr_loc) { if (ptr_base() <= ptr_loc && ptr_loc < ptr_end()) { address value = *ptr_loc; - // We don't want any pointer that points to very bottom of the archive, otherwise when - // AOTMetaspace::default_base_address()==0, we can't distinguish between a pointer - // to nothing (null) vs a pointer to an objects that happens to be at the very bottom - // of the archive. - assert(value != (address)ptr_base(), "don't point to the bottom of the archive"); - if (value != nullptr) { + // We don't want any pointer that points to very bottom of the AOT metaspace, otherwise + // when AOTMetaspace::default_base_address()==0, we can't distinguish between a pointer + // to nothing (null) vs a pointer to an objects that happens to be at the very bottom + // of the AOT metaspace. + // + // This should never happen because the protection zone prevents any valid objects from + // being allocated at the bottom of the AOT metaspace. + assert(AOTMetaspace::protection_zone_size() > 0, "must be"); + assert(ArchiveBuilder::current()->any_to_offset(value) > 0, "cannot point to bottom of AOT metaspace"); + assert(uintx(ptr_loc) % sizeof(intptr_t) == 0, "pointers must be stored in aligned addresses"); size_t idx = ptr_loc - ptr_base(); if (_ptrmap->size() <= idx) { @@ -130,7 +136,6 @@ void ArchivePtrMarker::mark_pointer(address* ptr_loc) { } assert(idx < _ptrmap->size(), "must be"); _ptrmap->set_bit(idx); - //tty->print_cr("Marking pointer [" PTR_FORMAT "] -> " PTR_FORMAT " @ %5zu", p2i(ptr_loc), p2i(*ptr_loc), idx); } } } @@ -144,7 +149,6 @@ void ArchivePtrMarker::clear_pointer(address* ptr_loc) { size_t idx = ptr_loc - ptr_base(); assert(idx < _ptrmap->size(), "cannot clear pointers that have not been marked"); _ptrmap->clear_bit(idx); - //tty->print_cr("Clearing pointer [" PTR_FORMAT "] -> " PTR_FORMAT " @ %5zu", p2i(ptr_loc), p2i(*ptr_loc), idx); } class ArchivePtrBitmapCleaner: public BitMapClosure { @@ -249,16 +253,179 @@ void DumpRegion::commit_to(char* newtop) { which, commit, _vs->actual_committed_size(), _vs->high()); } +// Basic allocation. Any alignment gaps will be wasted. char* DumpRegion::allocate(size_t num_bytes, size_t alignment) { // Always align to at least minimum alignment alignment = MAX2(SharedSpaceObjectAlignment, alignment); char* p = (char*)align_up(_top, alignment); - char* newtop = p + align_up(num_bytes, (size_t)SharedSpaceObjectAlignment); + char* newtop = p + align_up(num_bytes, SharedSpaceObjectAlignment); expand_top_to(newtop); memset(p, 0, newtop - p); return p; } +class DumpRegion::AllocGap { + size_t _gap_bytes; // size of this gap in bytes + char* _gap_bottom; // must be SharedSpaceObjectAlignment aligned +public: + size_t gap_bytes() const { return _gap_bytes; } + char* gap_bottom() const { return _gap_bottom; } + + AllocGap(size_t bytes, char* bottom) : _gap_bytes(bytes), _gap_bottom(bottom) { + precond(is_aligned(gap_bytes(), SharedSpaceObjectAlignment)); + precond(is_aligned(gap_bottom(), SharedSpaceObjectAlignment)); + } +}; + +struct DumpRegion::AllocGapCmp { + static RBTreeOrdering cmp(AllocGap a, AllocGap b) { + RBTreeOrdering order = rbtree_primitive_cmp(a.gap_bytes(), b.gap_bytes()); + if (order == RBTreeOrdering::EQ) { + order = rbtree_primitive_cmp(a.gap_bottom(), b.gap_bottom()); + } + return order; + } +}; + +struct Empty {}; +using AllocGapNode = RBNode; + +class DumpRegion::AllocGapTree : public RBTreeCHeap { +public: + size_t add_gap(char* gap_bottom, char* gap_top) { + precond(gap_bottom < gap_top); + size_t gap_bytes = pointer_delta(gap_top, gap_bottom, 1); + precond(gap_bytes > 0); + + _total_gap_bytes += gap_bytes; + + AllocGap gap(gap_bytes, gap_bottom); // constructor checks alignment + AllocGapNode* node = allocate_node(gap, Empty{}); + insert(gap, node); + + log_trace(aot, alloc)("adding a gap of %zu bytes @ %p (total = %zu) in %zu blocks", gap_bytes, gap_bottom, _total_gap_bytes, size()); + return gap_bytes; + } + + char* allocate_from_gap(size_t num_bytes) { + // The gaps are sorted in ascending order of their sizes. When two gaps have the same + // size, the one with a lower gap_bottom comes first. + // + // Find the first gap that's big enough, with the lowest gap_bottom. + AllocGap target(num_bytes, nullptr); + AllocGapNode* node = closest_ge(target); + if (node == nullptr) { + return nullptr; // Didn't find any usable gap. + } + + size_t gap_bytes = node->key().gap_bytes(); + char* gap_bottom = node->key().gap_bottom(); + char* result = gap_bottom; + precond(is_aligned(result, SharedSpaceObjectAlignment)); + + remove(node); + + precond(_total_gap_bytes >= num_bytes); + _total_gap_bytes -= num_bytes; + _total_gap_bytes_used += num_bytes; + _total_gap_allocs++; + DEBUG_ONLY(node = nullptr); // Don't use it anymore! + + precond(gap_bytes >= num_bytes); + if (gap_bytes > num_bytes) { + gap_bytes -= num_bytes; + gap_bottom += num_bytes; + + AllocGap gap(gap_bytes, gap_bottom); // constructor checks alignment + AllocGapNode* new_node = allocate_node(gap, Empty{}); + insert(gap, new_node); + } + log_trace(aot, alloc)("%zu bytes @ %p in a gap of %zu bytes (used gaps %zu times, remain gap = %zu bytes in %zu blocks)", + num_bytes, result, gap_bytes, _total_gap_allocs, _total_gap_bytes, size()); + return result; + } +}; + +size_t DumpRegion::_total_gap_bytes = 0; +size_t DumpRegion::_total_gap_bytes_used = 0; +size_t DumpRegion::_total_gap_allocs = 0; +DumpRegion::AllocGapTree DumpRegion::_gap_tree; + +// Alignment gaps happen only for the RW space. Collect the gaps into the _gap_tree so they can be +// used for future small object allocation. +char* DumpRegion::allocate_metaspace_obj(size_t num_bytes, address src, MetaspaceClosureType type, bool read_only, DumpAllocStats* stats) { + num_bytes = align_up(num_bytes, SharedSpaceObjectAlignment); + size_t alignment = SharedSpaceObjectAlignment; // alignment for the dest pointer + bool is_class = (type == MetaspaceClosureType::ClassType); + bool is_instance_class = is_class && ((Klass*)src)->is_instance_klass(); + +#ifdef _LP64 + // More strict alignments needed for Klass objects + if (is_class) { + size_t klass_alignment = checked_cast(nth_bit(ArchiveBuilder::precomputed_narrow_klass_shift())); + alignment = MAX2(alignment, klass_alignment); + precond(is_aligned(alignment, SharedSpaceObjectAlignment)); + } +#endif + + if (alignment == SharedSpaceObjectAlignment && type != MetaspaceClosureType::SymbolType) { + // The addresses of Symbols must be in the same order as they are in ArchiveBuilder::SourceObjList. + // If we put them in gaps, their order will change. + // + // We have enough small objects that all gaps are usually filled. + char* p = _gap_tree.allocate_from_gap(num_bytes); + if (p != nullptr) { + // Already memset to 0 when adding the gap + stats->record(type, checked_cast(num_bytes), /*read_only=*/false); // all gaps are from RW space (for classes) + return p; + } + } + + // Reserve space for a pointer directly in front of the buffered InstanceKlass, so + // we can do a quick lookup from InstanceKlass* -> RunTimeClassInfo* + // without building another hashtable. See RunTimeClassInfo::get_for() + // in systemDictionaryShared.cpp. + const size_t RuntimeClassInfoPtrSize = is_instance_class ? sizeof(address) : 0; + + if (is_class && !is_aligned(top() + RuntimeClassInfoPtrSize, alignment)) { + // We need to add a gap to align the buffered Klass. Save the gap for future small allocations. + assert(read_only == false, "only gaps in RW region are reusable"); + char* gap_bottom = top(); + char* gap_top = align_up(gap_bottom + RuntimeClassInfoPtrSize, alignment) - RuntimeClassInfoPtrSize; + size_t gap_bytes = _gap_tree.add_gap(gap_bottom, gap_top); + allocate(gap_bytes); + } + + char* oldtop = top(); + if (is_instance_class) { + SystemDictionaryShared::validate_before_archiving((InstanceKlass*)src); + allocate(RuntimeClassInfoPtrSize); + } + + precond(is_aligned(top(), alignment)); + char* result = allocate(num_bytes); + log_trace(aot, alloc)("%zu bytes @ %p", num_bytes, result); + stats->record(type, pointer_delta_as_int(top(), oldtop), read_only); // includes RuntimeClassInfoPtrSize for classes + + return result; +} + +// Usually we have no gaps left. +void DumpRegion::report_gaps(DumpAllocStats* stats) { + _gap_tree.visit_in_order([&](const AllocGapNode* node) { + stats->record_gap(checked_cast(node->key().gap_bytes())); + return true; + }); + if (_gap_tree.size() > 0) { + log_warning(aot)("Unexpected %zu gaps (%zu bytes) for Klass alignment", + _gap_tree.size(), _total_gap_bytes); + } + if (_total_gap_allocs > 0) { + log_info(aot)("Allocated %zu objects of %zu bytes in gaps (remain = %zu bytes)", + _total_gap_allocs, _total_gap_bytes_used, _total_gap_bytes); + } +} + void DumpRegion::append_intptr_t(intptr_t n, bool need_to_mark) { assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment"); intptr_t *p = (intptr_t*)_top; diff --git a/src/hotspot/share/cds/archiveUtils.hpp b/src/hotspot/share/cds/archiveUtils.hpp index e5d1efa5eab..42455adedd0 100644 --- a/src/hotspot/share/cds/archiveUtils.hpp +++ b/src/hotspot/share/cds/archiveUtils.hpp @@ -28,20 +28,24 @@ #include "cds/cds_globals.hpp" #include "cds/serializeClosure.hpp" #include "logging/log.hpp" +#include "memory/allocation.hpp" #include "memory/metaspace.hpp" +#include "memory/metaspaceClosureType.hpp" #include "memory/virtualspace.hpp" #include "runtime/nonJavaThread.hpp" #include "runtime/semaphore.hpp" #include "utilities/bitMap.hpp" #include "utilities/exceptions.hpp" +#include "utilities/growableArray.hpp" +#include "utilities/hashTable.hpp" #include "utilities/macros.hpp" class BootstrapInfo; +class DumpAllocStats; class ReservedSpace; class VirtualSpace; template class Array; -template class GrowableArray; // ArchivePtrMarker is used to mark the location of pointers embedded in a CDS archive. E.g., when an // InstanceKlass k is dumped, we mark the location of the k->_name pointer by effectively calling @@ -159,6 +163,18 @@ private: void commit_to(char* newtop); +public: + // Allocation gaps (due to Klass alignment) + class AllocGapTree; + class AllocGap; + struct AllocGapCmp; + +private: + static AllocGapTree _gap_tree; + static size_t _total_gap_bytes; + static size_t _total_gap_bytes_used; + static size_t _total_gap_allocs; + public: DumpRegion(const char* name) : _name(name), _base(nullptr), _top(nullptr), _end(nullptr), @@ -167,6 +183,7 @@ public: char* expand_top_to(char* newtop); char* allocate(size_t num_bytes, size_t alignment = 0); + char* allocate_metaspace_obj(size_t num_bytes, address src, MetaspaceClosureType type, bool read_only, DumpAllocStats* stats); void append_intptr_t(intptr_t n, bool need_to_mark = false) NOT_CDS_RETURN; @@ -191,6 +208,8 @@ public: bool contains(char* p) { return base() <= p && p < top(); } + + static void report_gaps(DumpAllocStats* stats); }; // Closure for serializing initialization data out to a data area to be @@ -383,4 +402,39 @@ public: void run_task(ArchiveWorkerTask* task); }; +// A utility class for writing an array of unique items into the +// AOT cache. For determinism, the order of the array is the same +// as calls to add(). I.e., if items are added in the order +// of A, B, A, C, B, D, then the array will be written as {A, B, C, D} +template +class ArchivableTable : public AnyObj { + using Table = HashTable; + Table* _seen_items; + GrowableArray* _ordered_array; +public: + ArchivableTable() { + _seen_items = new (mtClassShared)Table(); + _ordered_array = new (mtClassShared)GrowableArray(128, mtClassShared); + } + + ~ArchivableTable() { + delete _seen_items; + delete _ordered_array; + } + + void add(T t) { + bool created; + _seen_items->put_if_absent(t, &created); + if (created) { + _ordered_array->append(t); + } + } + + Array* write_ordered_array() { + return ArchiveUtils::archive_array(_ordered_array); + } +}; + +using ArchivableKlassTable = ArchivableTable; + #endif // SHARE_CDS_ARCHIVEUTILS_HPP diff --git a/src/hotspot/share/cds/cdsConfig.cpp b/src/hotspot/share/cds/cdsConfig.cpp index f4ef3c66f7a..ecf3c6d2231 100644 --- a/src/hotspot/share/cds/cdsConfig.cpp +++ b/src/hotspot/share/cds/cdsConfig.cpp @@ -108,6 +108,8 @@ void CDSConfig::ergo_initialize() { } AOTMapLogger::ergo_initialize(); + + setup_compiler_args(); } const char* CDSConfig::default_archive_path() { @@ -635,8 +637,6 @@ bool CDSConfig::check_vm_args_consistency(bool patch_mod_javabase, bool mode_fla FLAG_SET_ERGO_IF_DEFAULT(AOTClassLinking, true); } - setup_compiler_args(); - if (AOTClassLinking) { // If AOTClassLinking is specified, enable all AOT optimizations by default. FLAG_SET_ERGO_IF_DEFAULT(AOTInvokeDynamicLinking, true); @@ -891,10 +891,6 @@ const char* CDSConfig::type_of_archive_being_written() { // If an incompatible VM options is found, return a text message that explains why static const char* check_options_incompatible_with_dumping_heap() { #if INCLUDE_CDS_JAVA_HEAP - if (!UseCompressedClassPointers) { - return "UseCompressedClassPointers must be true"; - } - return nullptr; #else return "JVM not configured for writing Java heap objects"; @@ -972,17 +968,27 @@ bool CDSConfig::is_loading_heap() { } bool CDSConfig::is_dumping_klass_subgraphs() { - if (is_dumping_classic_static_archive() || is_dumping_final_static_archive()) { + if (is_dumping_aot_linked_classes()) { // KlassSubGraphs (see heapShared.cpp) is a legacy mechanism for archiving oops. It // has been superceded by AOT class linking. This feature is used only when // AOT class linking is disabled. - // - // KlassSubGraphs are disabled in the preimage static archive, which contains a very - // limited set of oops. - return is_dumping_heap() && !is_dumping_aot_linked_classes(); - } else { return false; } + + if (is_dumping_preimage_static_archive()) { + // KlassSubGraphs are disabled in the preimage static archive, which contains a very + // limited set of oops. + return false; + } + + if (!is_dumping_full_module_graph()) { + // KlassSubGraphs cannot be partially disabled. Since some of the KlassSubGraphs + // are used for (legacy support) of the archived full module graph, if + // is_dumping_full_module_graph() is calse, we must disable all KlassSubGraphs. + return false; + } + + return is_dumping_heap(); } bool CDSConfig::is_using_klass_subgraphs() { diff --git a/src/hotspot/share/cds/cdsHeapVerifier.hpp b/src/hotspot/share/cds/cdsHeapVerifier.hpp index 7f1bdb1d249..f8e090801bb 100644 --- a/src/hotspot/share/cds/cdsHeapVerifier.hpp +++ b/src/hotspot/share/cds/cdsHeapVerifier.hpp @@ -53,7 +53,7 @@ class CDSHeapVerifier : public KlassClosure { 15889, // prime number AnyObj::C_HEAP, mtClassShared, - HeapShared::oop_hash> _table; + HeapShared::oop_address_hash> _table; GrowableArray _exclusions; GrowableArray _shared_secret_accessors; diff --git a/src/hotspot/share/cds/cppVtables.cpp b/src/hotspot/share/cds/cppVtables.cpp index dc5a777d7b1..57da12dee48 100644 --- a/src/hotspot/share/cds/cppVtables.cpp +++ b/src/hotspot/share/cds/cppVtables.cpp @@ -22,7 +22,6 @@ * */ -#include "cds/aotGrowableArray.hpp" #include "cds/aotMetaspace.hpp" #include "cds/archiveBuilder.hpp" #include "cds/archiveUtils.hpp" @@ -41,6 +40,7 @@ #include "oops/typeArrayKlass.hpp" #include "runtime/arguments.hpp" #include "utilities/globalDefinitions.hpp" +#include "utilities/growableArray.hpp" // Objects of the Metadata types (such as Klass and ConstantPool) have C++ vtables. // (In GCC this is the field ::_vptr, i.e., first word in the object.) @@ -58,10 +58,10 @@ #ifndef PRODUCT -// AOTGrowableArray has a vtable only when in non-product builds (due to +// GrowableArray has a vtable only when in non-product builds (due to // the virtual printing functions in AnyObj). -using GrowableArray_ModuleEntry_ptr = AOTGrowableArray; +using GrowableArray_ModuleEntry_ptr = GrowableArray; #define DEBUG_CPP_VTABLE_TYPES_DO(f) \ f(GrowableArray_ModuleEntry_ptr) \ diff --git a/src/hotspot/share/cds/dumpAllocStats.cpp b/src/hotspot/share/cds/dumpAllocStats.cpp index 5f324566103..ddd4bac6086 100644 --- a/src/hotspot/share/cds/dumpAllocStats.cpp +++ b/src/hotspot/share/cds/dumpAllocStats.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -129,15 +129,3 @@ void DumpAllocStats::print_stats(int ro_all, int rw_all) { _bytes [RW][MethodTrainingDataType]); } - -#ifdef ASSERT -void DumpAllocStats::verify(int expected_byte_size, bool read_only) const { - int bytes = 0; - const int what = (int)(read_only ? RO : RW); - for (int type = 0; type < int(_number_of_types); type ++) { - bytes += _bytes[what][type]; - } - assert(bytes == expected_byte_size, "counter mismatch (%s: %d vs %d)", - (read_only ? "RO" : "RW"), bytes, expected_byte_size); -} -#endif // ASSERT diff --git a/src/hotspot/share/cds/dumpAllocStats.hpp b/src/hotspot/share/cds/dumpAllocStats.hpp index 4553f0f6a01..4daef9195a6 100644 --- a/src/hotspot/share/cds/dumpAllocStats.hpp +++ b/src/hotspot/share/cds/dumpAllocStats.hpp @@ -41,6 +41,7 @@ public: f(StringHashentry) \ f(StringBucket) \ f(CppVTables) \ + f(Gap) \ f(Other) #define DUMPED_TYPE_DECLARE(name) name ## Type, @@ -111,12 +112,19 @@ public: _bytes [which][t] += byte_size; } + void record_gap(int byte_size) { + _counts[RW][GapType] += 1; + _bytes [RW][GapType] += byte_size; + } + void record_other_type(int byte_size, bool read_only) { int which = (read_only) ? RO : RW; + _counts[which][OtherType] += 1; _bytes [which][OtherType] += byte_size; } void record_cpp_vtables(int byte_size) { + _counts[RW][CppVTablesType] += 1; _bytes[RW][CppVTablesType] += byte_size; } @@ -145,9 +153,6 @@ public: } void print_stats(int ro_all, int rw_all); - - DEBUG_ONLY(void verify(int expected_byte_size, bool read_only) const); - }; #endif // SHARE_CDS_DUMPALLOCSTATS_HPP diff --git a/src/hotspot/share/cds/dumpTimeClassInfo.hpp b/src/hotspot/share/cds/dumpTimeClassInfo.hpp index c2f83b22337..d2de8148bea 100644 --- a/src/hotspot/share/cds/dumpTimeClassInfo.hpp +++ b/src/hotspot/share/cds/dumpTimeClassInfo.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,7 +41,6 @@ class Symbol; class DumpTimeClassInfo: public CHeapObj { bool _excluded; bool _is_aot_tooling_class; - bool _is_early_klass; bool _has_checked_exclusion; class DTLoaderConstraint { @@ -143,7 +142,6 @@ public: _clsfile_crc32 = -1; _excluded = false; _is_aot_tooling_class = false; - _is_early_klass = JvmtiExport::is_early_phase(); _verifier_constraints = nullptr; _verifier_constraint_flags = nullptr; _loader_constraints = nullptr; @@ -219,11 +217,6 @@ public: _is_aot_tooling_class = true; } - // Was this class loaded while JvmtiExport::is_early_phase()==true - bool is_early_klass() { - return _is_early_klass; - } - // simple accessors void set_excluded() { _excluded = true; } bool has_checked_exclusion() const { return _has_checked_exclusion; } diff --git a/src/hotspot/share/cds/filemap.cpp b/src/hotspot/share/cds/filemap.cpp index 91fbce701e5..38502b2b2d8 100644 --- a/src/hotspot/share/cds/filemap.cpp +++ b/src/hotspot/share/cds/filemap.cpp @@ -225,15 +225,9 @@ void FileMapHeader::populate(FileMapInfo *info, size_t core_region_alignment, } #endif _compressed_oops = UseCompressedOops; - _compressed_class_ptrs = UseCompressedClassPointers; - if (UseCompressedClassPointers) { -#ifdef _LP64 - _narrow_klass_pointer_bits = CompressedKlassPointers::narrow_klass_pointer_bits(); - _narrow_klass_shift = ArchiveBuilder::precomputed_narrow_klass_shift(); -#endif - } else { - _narrow_klass_pointer_bits = _narrow_klass_shift = -1; - } + _narrow_klass_pointer_bits = CompressedKlassPointers::narrow_klass_pointer_bits(); + _narrow_klass_shift = ArchiveBuilder::precomputed_narrow_klass_shift(); + // Which JIT compier is used _compiler_type = (u1)CompilerConfig::compiler_type(); _type_profile_level = TypeProfileLevel; @@ -295,7 +289,6 @@ void FileMapHeader::print(outputStream* st) { st->print_cr("- max_heap_size: %zu", _max_heap_size); st->print_cr("- narrow_oop_mode: %d", _narrow_oop_mode); st->print_cr("- compressed_oops: %d", _compressed_oops); - st->print_cr("- compressed_class_ptrs: %d", _compressed_class_ptrs); st->print_cr("- narrow_klass_pointer_bits: %d", _narrow_klass_pointer_bits); st->print_cr("- narrow_klass_shift: %d", _narrow_klass_shift); st->print_cr("- cloned_vtables: %u", cast_to_u4(_cloned_vtables)); @@ -1535,10 +1528,34 @@ bool FileMapInfo::can_use_heap_region() { if (!has_heap_region()) { return false; } - if (!object_streaming_mode() && !Universe::heap()->can_load_archived_objects() && !UseG1GC) { - // Incompatible object format + + if (!object_streaming_mode() && !AOTMappedHeapLoader::can_use()) { + // Currently this happens only when using ZGC with an AOT cache generated with -XX:-AOTStreamableObjects + AOTMetaspace::report_loading_error("CDS heap data cannot be used by the selected GC. " + "Please choose a different GC or rebuild AOT cache " + "with -XX:+AOTStreamableObjects"); return false; } + + if (CDSConfig::is_using_aot_linked_classes()) { + assert(!JvmtiExport::should_post_class_file_load_hook(), "already checked"); + assert(CDSConfig::is_using_full_module_graph(), "already checked"); + } else { + if (JvmtiExport::should_post_class_file_load_hook()) { + AOTMetaspace::report_loading_error("CDS heap data is disabled because JVMTI ClassFileLoadHook is in use."); + return false; + } + if (!CDSConfig::is_using_full_module_graph()) { + if (CDSConfig::is_dumping_final_static_archive()) { + // We are loading the preimage static archive, which has no KlassSubGraphs. + // See CDSConfig::is_dumping_klass_subgraphs() + } else { + AOTMetaspace::report_loading_error("CDS heap data is disabled because archived full module graph is not used."); + return false; + } + } + } + if (JvmtiExport::should_post_class_file_load_hook() && JvmtiExport::has_early_class_hook_env()) { ShouldNotReachHere(); // CDS should have been disabled. // The archived objects are mapped at JVM start-up, but we don't know if @@ -1902,11 +1919,12 @@ bool FileMapHeader::validate() { _has_platform_or_app_classes = false; } - aot_log_info(aot)("The %s was created with UseCompressedOops = %d, UseCompressedClassPointers = %d, UseCompactObjectHeaders = %d", - file_type, compressed_oops(), compressed_class_pointers(), compact_headers()); - if (compressed_oops() != UseCompressedOops || compressed_class_pointers() != UseCompressedClassPointers) { - aot_log_warning(aot)("Unable to use %s.\nThe saved state of UseCompressedOops and UseCompressedClassPointers is " - "different from runtime, CDS will be disabled.", file_type); + aot_log_info(aot)("The %s was created with UseCompressedOops = %d, UseCompactObjectHeaders = %d", + file_type, compressed_oops(), compact_headers()); + if (compressed_oops() != UseCompressedOops) { + aot_log_warning(aot)("Unable to use %s.\nThe saved state of UseCompressedOops (%d) is " + "different from runtime (%d), CDS will be disabled.", file_type, + compressed_oops(), UseCompressedOops); return false; } diff --git a/src/hotspot/share/cds/filemap.hpp b/src/hotspot/share/cds/filemap.hpp index 56b88df378a..bae08bd5bc7 100644 --- a/src/hotspot/share/cds/filemap.hpp +++ b/src/hotspot/share/cds/filemap.hpp @@ -120,7 +120,6 @@ private: CompressedOops::Mode _narrow_oop_mode; // compressed oop encoding mode bool _object_streaming_mode; // dump was created for object streaming bool _compressed_oops; // save the flag UseCompressedOops - bool _compressed_class_ptrs; // save the flag UseCompressedClassPointers int _narrow_klass_pointer_bits; // save number of bits in narrowKlass int _narrow_klass_shift; // save shift width used to pre-compute narrowKlass IDs in archived heap objects narrowPtr _cloned_vtables; // The address of the first cloned vtable @@ -200,7 +199,6 @@ public: bool has_platform_or_app_classes() const { return _has_platform_or_app_classes; } bool has_aot_linked_classes() const { return _has_aot_linked_classes; } bool compressed_oops() const { return _compressed_oops; } - bool compressed_class_pointers() const { return _compressed_class_ptrs; } int narrow_klass_pointer_bits() const { return _narrow_klass_pointer_bits; } int narrow_klass_shift() const { return _narrow_klass_shift; } bool has_full_module_graph() const { return _has_full_module_graph; } diff --git a/src/hotspot/share/cds/heapShared.cpp b/src/hotspot/share/cds/heapShared.cpp index 0c0f70eac0a..d75816656b0 100644 --- a/src/hotspot/share/cds/heapShared.cpp +++ b/src/hotspot/share/cds/heapShared.cpp @@ -112,6 +112,11 @@ static Klass* _test_class = nullptr; static const ArchivedKlassSubGraphInfoRecord* _test_class_record = nullptr; #endif +#ifdef ASSERT +// All classes that have at least one instance in the cached heap. +static ArchivableKlassTable* _dumptime_classes_with_cached_oops = nullptr; +static Array* _runtime_classes_with_cached_oops = nullptr; +#endif // // If you add new entries to the following tables, you should know what you're doing! @@ -131,17 +136,14 @@ static ArchivableStaticFieldInfo archive_subgraph_entry_fields[] = { {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"}, {"jdk/internal/math/FDBigInteger", "archivedCaches"}, -#ifndef PRODUCT - {nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass -#endif - {nullptr, nullptr}, -}; - -// full module graph -static ArchivableStaticFieldInfo fmg_archive_subgraph_entry_fields[] = { + // full module graph support {"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"}, {ARCHIVED_BOOT_LAYER_CLASS, ARCHIVED_BOOT_LAYER_FIELD}, {"java/lang/Module$ArchivedData", "archivedData"}, + +#ifndef PRODUCT + {nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass +#endif {nullptr, nullptr}, }; @@ -164,8 +166,7 @@ bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) { assert(CDSConfig::is_dumping_heap(), "dump-time only"); if (CDSConfig::is_dumping_klass_subgraphs()) { // Legacy CDS archive support (to be deprecated) - return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) || - is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik); + return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik); } else { return false; } @@ -175,23 +176,39 @@ oop HeapShared::CachedOopInfo::orig_referrer() const { return _orig_referrer.resolve(); } -unsigned HeapShared::oop_hash(oop const& p) { +// This is a simple hashing of the oop's address. This function is used +// while copying the oops into the AOT heap region. We don't want to +// have any side effects during the copying, so we avoid calling +// p->identity_hash() which can update the object header. +unsigned HeapShared::oop_address_hash(oop const& p) { assert(SafepointSynchronize::is_at_safepoint() || JavaThread::current()->is_in_no_safepoint_scope(), "sanity"); - // Do not call p->identity_hash() as that will update the - // object header. return primitive_hash(cast_from_oop(p)); } -unsigned int HeapShared::oop_handle_hash_raw(const OopHandle& oh) { - return oop_hash(oh.resolve()); -} - -unsigned int HeapShared::oop_handle_hash(const OopHandle& oh) { +// About the hashcode in the cached objects: +// - If a source object has a hashcode, it must be copied into the cache. +// That's because some cached hashtables are laid out using this hashcode. +// - If a source object doesn't have a hashcode, we avoid computing it while +// copying the objects into the cache. This will allow the hashcode to be +// dynamically and randomly computed in each production, which generally +// desirable to make the hashcodes more random between runs. +unsigned HeapShared::archived_object_cache_hash(OopHandle const& oh) { oop o = oh.resolve(); if (o == nullptr) { return 0; + } + if (!_use_identity_hash_for_archived_object_cache) { + // This is called while we are copying the objects. Don't call o->identity_hash() + // as that will update the object header. + return oop_address_hash(o); } else { + // This is called after all objects are copied. It's OK to update + // the object's hashcode. + // + // This may be called after we have left the AOT dumping safepoint. + // Objects in archived_object_cache() may be moved by the GC, so we + // can't use the address of o for computing the hash. return o->identity_hash(); } } @@ -271,6 +288,12 @@ void HeapShared::prepare_for_archiving(TRAPS) { HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr; +// Controls the hashing method for the _archived_object_cache. +// Changes from false to true once, after all objects are copied, +// inside make_archived_object_cache_gc_safe(). +// See archived_object_cache_hash() for more details. +bool HeapShared::_use_identity_hash_for_archived_object_cache = false; + bool HeapShared::is_archived_heap_in_use() { if (HeapShared::is_loading()) { if (HeapShared::is_loading_streaming_mode()) { @@ -373,6 +396,21 @@ void HeapShared::initialize_streaming() { } void HeapShared::enable_gc() { +#ifdef ASSERT + // At this point, a GC may start and will be able to see some or all + // of the cached oops. The class of each oop seen by the GC must have + // already been loaded. One function with such a requirement is + // ClaimMetadataVisitingOopIterateClosure::do_klass(). + if (is_archived_heap_in_use()) { + Array* klasses = _runtime_classes_with_cached_oops; + + for (int i = 0; i < klasses->length(); i++) { + assert(klasses->at(i)->class_loader_data() != nullptr, + "class of cached oop must have been loaded"); + } + } +#endif + if (AOTStreamedHeapLoader::is_in_use()) { AOTStreamedHeapLoader::enable_gc(); } @@ -384,9 +422,8 @@ void HeapShared::materialize_thread_object() { } } -void HeapShared::add_to_dumped_interned_strings(oop string) { +void HeapShared::archive_interned_string(oop string) { assert(HeapShared::is_writing_mapping_mode(), "Only used by this mode"); - AOTMappedHeapWriter::add_to_dumped_interned_strings(string); bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, string); assert(success, "shared strings array must not point to arrays or strings that are too large to archive"); } @@ -404,6 +441,24 @@ void HeapShared::finalize_initialization(FileMapInfo* static_mapinfo) { } } +void HeapShared::make_archived_object_cache_gc_safe() { + ArchivedObjectCache* new_cache = new (mtClass)ArchivedObjectCache(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE); + + // It's safe to change the behavior of the hash function now, because iterate_all() + // doesn't call the hash function. + // See archived_object_cache_hash() for more details. + assert(_use_identity_hash_for_archived_object_cache == false, "happens only once"); + _use_identity_hash_for_archived_object_cache = true; + + // Copy all CachedOopInfo into a new table using a different hashing algorithm + archived_object_cache()->iterate_all([&] (OopHandle oh, CachedOopInfo info) { + new_cache->put_when_absent(oh, info); + }); + + destroy_archived_object_cache(); + _archived_object_cache = new_cache; +} + HeapShared::CachedOopInfo* HeapShared::get_cached_oop_info(oop obj) { OopHandle oh(Universe::vm_global(), obj); CachedOopInfo* result = _archived_object_cache->get(oh); @@ -417,14 +472,53 @@ bool HeapShared::has_been_archived(oop obj) { } int HeapShared::append_root(oop obj) { + assert(SafepointSynchronize::is_at_safepoint(), "sanity"); assert(CDSConfig::is_dumping_heap(), "dump-time only"); - if (obj != nullptr) { - assert(has_been_archived(obj), "must be"); - } - // No GC should happen since we aren't scanning _pending_roots. - assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); + assert(_pending_roots != nullptr, "sanity"); - return _pending_roots->append(obj); + if (obj == nullptr) { + assert(_pending_roots->at(0) == nullptr, "root index 0 always maps to null"); + return 0; + } else if (CDSConfig::is_dumping_aot_linked_classes()) { + // The AOT compiler may refer the same obj many times, so we + // should use the same index for this oop to avoid excessive entries + // in the roots array. + CachedOopInfo* obj_info = get_cached_oop_info(obj); + assert(obj_info != nullptr, "must be archived"); + + if (obj_info->root_index() > 0) { + return obj_info->root_index(); + } else { + assert(obj_info->root_index() < 0, "must not be zero"); + int i = _pending_roots->append(obj); + obj_info->set_root_index(i); + return i; + } + } else { + return _pending_roots->append(obj); + } +} + +int HeapShared::get_root_index(oop obj) { + if (java_lang_Class::is_instance(obj)) { + obj = scratch_java_mirror(obj); + } + + CachedOopInfo* obj_info = get_cached_oop_info(obj); + const char* error = nullptr; + if (obj_info == nullptr) { + error = "Not a cached oop"; + } else if (obj_info->root_index() < 0) { + error = "Not a cached oop root"; + } else { + return obj_info->root_index(); + } + + ResourceMark rm; + log_debug(aot, codecache, oops)("%s: " INTPTR_FORMAT " (%s)", error, + cast_from_oop(obj), + obj->klass()->external_name()); + return -1; } oop HeapShared::get_root(int index, bool clear) { @@ -453,6 +547,13 @@ void HeapShared::finish_materialize_objects() { } void HeapShared::clear_root(int index) { + if (CDSConfig::is_using_aot_linked_classes()) { + // When AOT linked classes are in use, all roots will be in use all + // the time, there's no benefit for clearing the roots. Also, we + // can't clear the roots as they can be shared. + return; + } + assert(index >= 0, "sanity"); assert(CDSConfig::is_using_archive(), "must be"); if (is_archived_heap_in_use()) { @@ -483,8 +584,10 @@ bool HeapShared::archive_object(oop obj, oop referrer, KlassSubGraphInfo* subgra return false; } + AOTArtifactFinder::add_cached_class(obj->klass()); AOTOopChecker::check(obj); // Make sure contents of this oop are safe. count_allocation(obj->size()); + DEBUG_ONLY(_dumptime_classes_with_cached_oops->add(obj->klass())); if (HeapShared::is_writing_streaming_mode()) { AOTStreamedHeapWriter::add_source_obj(obj); @@ -586,11 +689,6 @@ public: }; void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) { - if (CDSConfig::is_dumping_preimage_static_archive() && scratch_resolved_references(src) != nullptr) { - // We are in AOT training run. The class has been redefined and we are giving it a new resolved_reference. - // Ignore it, as this class will be excluded from the AOT config. - return; - } if (SystemDictionaryShared::is_builtin_loader(src->pool_holder()->class_loader_data())) { _scratch_objects_table->set_oop(src, dest); } @@ -600,9 +698,17 @@ objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) { return (objArrayOop)_scratch_objects_table->get_oop(src); } - void HeapShared::init_dumping() { - _scratch_objects_table = new (mtClass)MetaspaceObjToOopHandleTable(); - _pending_roots = new GrowableArrayCHeap(500); +void HeapShared::remove_scratch_resolved_references(ConstantPool* src) { + if (CDSConfig::is_dumping_heap()) { + _scratch_objects_table->remove_oop(src); + } +} + +void HeapShared::init_dumping() { + _scratch_objects_table = new (mtClass)MetaspaceObjToOopHandleTable(); + _pending_roots = new GrowableArrayCHeap(500); + _pending_roots->append(nullptr); // root index 0 represents a null oop + DEBUG_ONLY(_dumptime_classes_with_cached_oops = new (mtClassShared)ArchivableKlassTable()); } void HeapShared::init_scratch_objects_for_basic_type_mirrors(TRAPS) { @@ -839,7 +945,7 @@ void HeapShared::start_scanning_for_oops() { // The special subgraph doesn't belong to any class. We use Object_klass() here just // for convenience. - _dump_time_special_subgraph = init_subgraph_info(vmClasses::Object_klass(), false); + _dump_time_special_subgraph = init_subgraph_info(vmClasses::Object_klass()); // Cache for recording where the archived objects are copied to create_archived_object_cache(); @@ -883,6 +989,13 @@ void HeapShared::write_heap(AOTMappedHeapInfo* mapped_heap_info, AOTStreamedHeap ArchiveBuilder::OtherROAllocMark mark; write_subgraph_info_table(); + + DEBUG_ONLY(_runtime_classes_with_cached_oops = _dumptime_classes_with_cached_oops->write_ordered_array()); + + delete _pending_roots; + _pending_roots = nullptr; + + make_archived_object_cache_gc_safe(); } void HeapShared::scan_java_mirror(oop orig_mirror) { @@ -912,12 +1025,7 @@ void HeapShared::archive_subgraphs() { assert(CDSConfig::is_dumping_heap(), "must be"); if (CDSConfig::is_dumping_klass_subgraphs()) { - archive_object_subgraphs(archive_subgraph_entry_fields, - false /* is_full_module_graph */); - if (CDSConfig::is_dumping_full_module_graph()) { - archive_object_subgraphs(fmg_archive_subgraph_entry_fields, - true /* is_full_module_graph */); - } + archive_object_subgraphs(archive_subgraph_entry_fields); } } @@ -930,12 +1038,11 @@ HeapShared::RunTimeKlassSubGraphInfoTable HeapShared::_run_time_subgraph_info_ // Get the subgraph_info for Klass k. A new subgraph_info is created if // there is no existing one for k. The subgraph_info records the "buffered" // address of the class. -KlassSubGraphInfo* HeapShared::init_subgraph_info(Klass* k, bool is_full_module_graph) { +KlassSubGraphInfo* HeapShared::init_subgraph_info(Klass* k) { assert(CDSConfig::is_dumping_heap(), "dump time only"); bool created; KlassSubGraphInfo* info = - _dump_time_subgraph_info_table->put_if_absent(k, KlassSubGraphInfo(k, is_full_module_graph), - &created); + _dump_time_subgraph_info_table->put_if_absent(k, KlassSubGraphInfo(k), &created); assert(created, "must not initialize twice"); return info; } @@ -1023,7 +1130,6 @@ void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) { } _subgraph_object_klasses->append_if_missing(orig_k); - _has_non_early_klasses |= is_non_early_klass(orig_k); } void KlassSubGraphInfo::check_allowed_klass(InstanceKlass* ik) { @@ -1066,45 +1172,11 @@ void KlassSubGraphInfo::check_allowed_klass(InstanceKlass* ik) { AOTMetaspace::unrecoverable_writing_error(); } -bool KlassSubGraphInfo::is_non_early_klass(Klass* k) { - if (k->is_objArray_klass()) { - k = ObjArrayKlass::cast(k)->bottom_klass(); - } - if (k->is_instance_klass()) { - if (!SystemDictionaryShared::is_early_klass(InstanceKlass::cast(k))) { - ResourceMark rm; - log_info(aot, heap)("non-early: %s", k->external_name()); - return true; - } else { - return false; - } - } else { - return false; - } -} - // Initialize an archived subgraph_info_record from the given KlassSubGraphInfo. void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) { _k = ArchiveBuilder::get_buffered_klass(info->klass()); _entry_field_records = nullptr; _subgraph_object_klasses = nullptr; - _is_full_module_graph = info->is_full_module_graph(); - - if (_is_full_module_graph) { - // Consider all classes referenced by the full module graph as early -- we will be - // allocating objects of these classes during JVMTI early phase, so they cannot - // be processed by (non-early) JVMTI ClassFileLoadHook - _has_non_early_klasses = false; - } else { - _has_non_early_klasses = info->has_non_early_klasses(); - } - - if (_has_non_early_klasses) { - ResourceMark rm; - log_info(aot, heap)( - "Subgraph of klass %s has non-early klasses and cannot be used when JVMTI ClassFileLoadHook is enabled", - _k->external_name()); - } // populate the entry fields GrowableArray* entry_fields = info->subgraph_entry_fields(); @@ -1231,6 +1303,7 @@ void HeapShared::serialize_tables(SerializeClosure* soc) { _run_time_subgraph_info_table.serialize_header(soc); soc->do_ptr(&_run_time_special_subgraph); + DEBUG_ONLY(soc->do_ptr(&_runtime_classes_with_cached_oops)); } static void verify_the_heap(Klass* k, const char* which) { @@ -1262,15 +1335,10 @@ static void verify_the_heap(Klass* k, const char* which) { // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots() // have a valid klass. I.e., oopDesc::klass() must have already been resolved. -// -// Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI -// ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In -// this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots. void HeapShared::resolve_classes(JavaThread* current) { assert(CDSConfig::is_using_archive(), "runtime only!"); if (CDSConfig::is_using_klass_subgraphs()) { resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields); - resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields); } } @@ -1418,24 +1486,6 @@ HeapShared::resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAP } return nullptr; } else { - if (record->is_full_module_graph() && !CDSConfig::is_using_full_module_graph()) { - if (log_is_enabled(Info, aot, heap)) { - ResourceMark rm(THREAD); - log_info(aot, heap)("subgraph %s cannot be used because full module graph is disabled", - k->external_name()); - } - return nullptr; - } - - if (record->has_non_early_klasses() && JvmtiExport::should_post_class_file_load_hook()) { - if (log_is_enabled(Info, aot, heap)) { - ResourceMark rm(THREAD); - log_info(aot, heap)("subgraph %s cannot be used because JVMTI ClassFileLoadHook is enabled", - k->external_name()); - } - return nullptr; - } - if (log_is_enabled(Info, aot, heap)) { ResourceMark rm; log_info(aot, heap)("%s subgraph %s ", do_init ? "init" : "resolve", k->external_name()); @@ -1517,8 +1567,8 @@ void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphI // mirror after this point. if (log_is_enabled(Info, aot, heap)) { ResourceMark rm; - log_info(aot, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT "%s%s", - k->external_name(), p2i(k), JvmtiExport::is_early_phase() ? " (early)" : "", + log_info(aot, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT "%s", + k->external_name(), p2i(k), k->has_aot_initialized_mirror() ? " (aot-inited)" : ""); } } @@ -1911,6 +1961,11 @@ void HeapShared::verify_subgraph_from(oop orig_obj) { void HeapShared::verify_reachable_objects_from(oop obj) { _num_total_verifications ++; if (java_lang_Class::is_instance(obj)) { + Klass* k = java_lang_Class::as_Klass(obj); + if (RegeneratedClasses::has_been_regenerated(k)) { + k = RegeneratedClasses::get_regenerated_object(k); + obj = k->java_mirror(); + } obj = scratch_java_mirror(obj); assert(obj != nullptr, "must be"); } @@ -1979,9 +2034,9 @@ void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) { ++ _num_new_walked_objs; } -void HeapShared::start_recording_subgraph(InstanceKlass *k, const char* class_name, bool is_full_module_graph) { +void HeapShared::start_recording_subgraph(InstanceKlass *k, const char* class_name) { log_info(aot, heap)("Start recording subgraph(s) for archived fields in %s", class_name); - init_subgraph_info(k, is_full_module_graph); + init_subgraph_info(k); init_seen_objects_table(); _num_new_walked_objs = 0; _num_new_archived_objs = 0; @@ -2113,9 +2168,6 @@ void HeapShared::init_subgraph_entry_fields(TRAPS) { _dump_time_subgraph_info_table = new (mtClass)DumpTimeKlassSubGraphInfoTable(); if (CDSConfig::is_dumping_klass_subgraphs()) { init_subgraph_entry_fields(archive_subgraph_entry_fields, CHECK); - if (CDSConfig::is_dumping_full_module_graph()) { - init_subgraph_entry_fields(fmg_archive_subgraph_entry_fields, CHECK); - } } } @@ -2214,8 +2266,7 @@ void HeapShared::init_heap_writer() { } } -void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[], - bool is_full_module_graph) { +void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[]) { _num_total_subgraph_recordings = 0; _num_total_walked_objs = 0; _num_total_archived_objs = 0; @@ -2231,7 +2282,7 @@ void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[], for (int i = 0; fields[i].valid(); ) { ArchivableStaticFieldInfo* info = &fields[i]; const char* klass_name = info->klass_name; - start_recording_subgraph(info->klass, klass_name, is_full_module_graph); + start_recording_subgraph(info->klass, klass_name); // If you have specified consecutive fields of the same klass in // fields[], these will be archived in the same @@ -2264,12 +2315,22 @@ void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[], #endif } -bool HeapShared::is_dumped_interned_string(oop o) { - if (is_writing_mapping_mode()) { - return AOTMappedHeapWriter::is_dumped_interned_string(o); - } else { - return AOTStreamedHeapWriter::is_dumped_interned_string(o); +bool HeapShared::is_interned_string(oop obj) { + if (!java_lang_String::is_instance(obj)) { + return false; } + + ResourceMark rm; + int len = 0; + jchar* name = java_lang_String::as_unicode_string_or_null(obj, len); + if (name == nullptr) { + fatal("Insufficient memory for dumping"); + } + return StringTable::lookup(name, len) == obj; +} + +bool HeapShared::is_dumped_interned_string(oop o) { + return is_interned_string(o) && has_been_archived(o); } // These tables should be used only within the CDS safepoint, so diff --git a/src/hotspot/share/cds/heapShared.hpp b/src/hotspot/share/cds/heapShared.hpp index 2cb330160e4..10ea35ab56e 100644 --- a/src/hotspot/share/cds/heapShared.hpp +++ b/src/hotspot/share/cds/heapShared.hpp @@ -40,7 +40,6 @@ #include "utilities/hashTable.hpp" #if INCLUDE_CDS_JAVA_HEAP -class DumpedInternedStrings; class FileMapInfo; class KlassSubGraphInfo; class MetaspaceObjToOopHandleTable; @@ -67,21 +66,12 @@ class KlassSubGraphInfo: public CHeapObj { // For each entry field, it is a tuple of field_offset, field_value GrowableArray* _subgraph_entry_fields; - // Does this KlassSubGraphInfo belong to the archived full module graph - bool _is_full_module_graph; - - // Does this KlassSubGraphInfo references any classes that were loaded while - // JvmtiExport::is_early_phase()!=true. If so, this KlassSubGraphInfo cannot be - // used at runtime if JVMTI ClassFileLoadHook is enabled. - bool _has_non_early_klasses; static bool is_non_early_klass(Klass* k); static void check_allowed_klass(InstanceKlass* ik); public: - KlassSubGraphInfo(Klass* k, bool is_full_module_graph) : + KlassSubGraphInfo(Klass* k) : _k(k), _subgraph_object_klasses(nullptr), - _subgraph_entry_fields(nullptr), - _is_full_module_graph(is_full_module_graph), - _has_non_early_klasses(false) {} + _subgraph_entry_fields(nullptr) {} ~KlassSubGraphInfo() { if (_subgraph_object_klasses != nullptr) { @@ -105,8 +95,6 @@ class KlassSubGraphInfo: public CHeapObj { return _subgraph_object_klasses == nullptr ? 0 : _subgraph_object_klasses->length(); } - bool is_full_module_graph() const { return _is_full_module_graph; } - bool has_non_early_klasses() const { return _has_non_early_klasses; } }; // An archived record of object sub-graphs reachable from static @@ -115,7 +103,6 @@ class KlassSubGraphInfo: public CHeapObj { class ArchivedKlassSubGraphInfoRecord { private: Klass* _k; - bool _is_full_module_graph; bool _has_non_early_klasses; // contains pairs of field offset and value for each subgraph entry field @@ -131,7 +118,6 @@ class ArchivedKlassSubGraphInfoRecord { Klass* klass() const { return _k; } Array* entry_field_records() const { return _entry_field_records; } Array* subgraph_object_klasses() const { return _subgraph_object_klasses; } - bool is_full_module_graph() const { return _is_full_module_graph; } bool has_non_early_klasses() const { return _has_non_early_klasses; } }; #endif // INCLUDE_CDS_JAVA_HEAP @@ -176,7 +162,7 @@ public: static void initialize_streaming() NOT_CDS_JAVA_HEAP_RETURN; static void enable_gc() NOT_CDS_JAVA_HEAP_RETURN; static void materialize_thread_object() NOT_CDS_JAVA_HEAP_RETURN; - static void add_to_dumped_interned_strings(oop string) NOT_CDS_JAVA_HEAP_RETURN; + static void archive_interned_string(oop string); static void finalize_initialization(FileMapInfo* static_mapinfo) NOT_CDS_JAVA_HEAP_RETURN; private: @@ -195,13 +181,8 @@ private: static void print_stats(); public: static void debug_trace(); - static unsigned oop_hash(oop const& p); - static unsigned oop_handle_hash(OopHandle const& oh); - static unsigned oop_handle_hash_raw(OopHandle const& oh); + static unsigned oop_address_hash(oop const& p); static bool oop_handle_equals(const OopHandle& a, const OopHandle& b); - static unsigned string_oop_hash(oop const& string) { - return java_lang_String::hash_code(string); - } class CopyKlassSubGraphInfoToArchive; @@ -217,27 +198,37 @@ public: // One or more fields in this object are pointing to MetaspaceObj bool _has_native_pointers; + + // >= 0 if this oop has been append to the list of roots + int _root_index; public: CachedOopInfo(OopHandle orig_referrer, bool has_oop_pointers) : _orig_referrer(orig_referrer), _buffer_offset(0), _has_oop_pointers(has_oop_pointers), - _has_native_pointers(false) {} + _has_native_pointers(false), + _root_index(-1) {} oop orig_referrer() const; void set_buffer_offset(size_t offset) { _buffer_offset = offset; } size_t buffer_offset() const { return _buffer_offset; } bool has_oop_pointers() const { return _has_oop_pointers; } bool has_native_pointers() const { return _has_native_pointers; } void set_has_native_pointers() { _has_native_pointers = true; } + int root_index() const { return _root_index; } + void set_root_index(int i) { _root_index = i; } }; private: static const int INITIAL_TABLE_SIZE = 15889; // prime number static const int MAX_TABLE_SIZE = 1000000; + static bool _use_identity_hash_for_archived_object_cache; + + static unsigned archived_object_cache_hash(OopHandle const& oh); + typedef ResizeableHashTable ArchivedObjectCache; static ArchivedObjectCache* _archived_object_cache; @@ -266,8 +257,7 @@ private: static CachedOopInfo make_cached_oop_info(oop obj, oop referrer); static ArchivedKlassSubGraphInfoRecord* archive_subgraph_info(KlassSubGraphInfo* info); - static void archive_object_subgraphs(ArchivableStaticFieldInfo fields[], - bool is_full_module_graph); + static void archive_object_subgraphs(ArchivableStaticFieldInfo fields[]); // Archive object sub-graph starting from the given static field // in Klass k's mirror. @@ -281,7 +271,7 @@ private: static void verify_subgraph_from(oop orig_obj) PRODUCT_RETURN; static void check_special_subgraph_classes(); - static KlassSubGraphInfo* init_subgraph_info(Klass *k, bool is_full_module_graph); + static KlassSubGraphInfo* init_subgraph_info(Klass *k); static KlassSubGraphInfo* get_subgraph_info(Klass *k); static void init_subgraph_entry_fields(TRAPS) NOT_CDS_JAVA_HEAP_RETURN; @@ -297,7 +287,7 @@ private: typedef ResizeableHashTable SeenObjectsTable; + HeapShared::oop_address_hash> SeenObjectsTable; static SeenObjectsTable *_seen_objects_table; @@ -336,8 +326,7 @@ private: static size_t _num_total_recorded_klasses; static size_t _num_total_verifications; - static void start_recording_subgraph(InstanceKlass *k, const char* klass_name, - bool is_full_module_graph); + static void start_recording_subgraph(InstanceKlass *k, const char* klass_name); static void done_recording_subgraph(InstanceKlass *k, const char* klass_name); static bool has_been_seen_during_subgraph_recording(oop obj); @@ -394,6 +383,7 @@ private: delete _archived_object_cache; _archived_object_cache = nullptr; } + static void make_archived_object_cache_gc_safe(); static ArchivedObjectCache* archived_object_cache() { return _archived_object_cache; } @@ -406,6 +396,7 @@ private: KlassSubGraphInfo* subgraph_info, oop orig_obj); + static bool is_interned_string(oop obj); static bool is_dumped_interned_string(oop o); // Scratch objects for archiving Klass::java_mirror() @@ -437,6 +428,11 @@ private: // Dump-time only. Returns the index of the root, which can be used at run time to read // the root using get_root(index, ...). static int append_root(oop obj); + + // AOT-compile time only. + // Returns -1 if obj is not in the heap root set. + static int get_root_index(oop obj) NOT_CDS_JAVA_HEAP_RETURN_(-1); + static GrowableArrayCHeap* pending_roots() { return _pending_roots; } // Dump-time and runtime @@ -445,9 +441,7 @@ private: // Run-time only static void clear_root(int index); - static void get_segment_indexes(int index, int& segment_index, int& internal_index); - static void setup_test_class(const char* test_class_name) PRODUCT_RETURN; #endif // INCLUDE_CDS_JAVA_HEAP @@ -457,6 +451,7 @@ private: static void write_heap(AOTMappedHeapInfo* mapped_heap_info, AOTStreamedHeapInfo* streamed_heap_info) NOT_CDS_JAVA_HEAP_RETURN; static objArrayOop scratch_resolved_references(ConstantPool* src); static void add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) NOT_CDS_JAVA_HEAP_RETURN; + static void remove_scratch_resolved_references(ConstantPool* src) NOT_CDS_JAVA_HEAP_RETURN; static void init_dumping() NOT_CDS_JAVA_HEAP_RETURN; static void init_scratch_objects_for_basic_type_mirrors(TRAPS) NOT_CDS_JAVA_HEAP_RETURN; static void init_box_classes(TRAPS) NOT_CDS_JAVA_HEAP_RETURN; diff --git a/src/hotspot/share/cds/lambdaProxyClassDictionary.hpp b/src/hotspot/share/cds/lambdaProxyClassDictionary.hpp index dfb75532917..b20e998bba6 100644 --- a/src/hotspot/share/cds/lambdaProxyClassDictionary.hpp +++ b/src/hotspot/share/cds/lambdaProxyClassDictionary.hpp @@ -22,8 +22,8 @@ * */ -#ifndef SHARE_CDS_LAMBDAPROXYCLASSINFO_HPP -#define SHARE_CDS_LAMBDAPROXYCLASSINFO_HPP +#ifndef SHARE_CDS_LAMBDAPROXYCLASSDICTIONARY_HPP +#define SHARE_CDS_LAMBDAPROXYCLASSDICTIONARY_HPP #include "cds/aotCompressedPointers.hpp" #include "cds/aotMetaspace.hpp" @@ -331,4 +331,4 @@ public: static void print_statistics(outputStream* st, bool is_static_archive); }; -#endif // SHARE_CDS_LAMBDAPROXYCLASSINFO_HPP +#endif // SHARE_CDS_LAMBDAPROXYCLASSDICTIONARY_HPP diff --git a/src/hotspot/share/ci/ciMethodData.cpp b/src/hotspot/share/ci/ciMethodData.cpp index 533e8659968..5e623e2b965 100644 --- a/src/hotspot/share/ci/ciMethodData.cpp +++ b/src/hotspot/share/ci/ciMethodData.cpp @@ -537,8 +537,8 @@ void ciMethodData::clear_escape_info() { if (mdo != nullptr) { mdo->clear_escape_info(); ArgInfoData *aid = arg_info(); - int arg_count = (aid == nullptr) ? 0 : aid->number_of_args(); - for (int i = 0; i < arg_count; i++) { + int arg_size = (aid == nullptr) ? 0 : aid->size_of_args(); + for (int i = 0; i < arg_size; i++) { set_arg_modified(i, 0); } } @@ -554,8 +554,8 @@ void ciMethodData::update_escape_info() { mdo->set_arg_local(_arg_local); mdo->set_arg_stack(_arg_stack); mdo->set_arg_returned(_arg_returned); - int arg_count = mdo->method()->size_of_parameters(); - for (int i = 0; i < arg_count; i++) { + int arg_size = mdo->method()->size_of_parameters(); + for (int i = 0; i < arg_size; i++) { mdo->set_arg_modified(i, arg_modified(i)); } } @@ -652,7 +652,7 @@ void ciMethodData::set_arg_modified(int arg, uint val) { ArgInfoData *aid = arg_info(); if (aid == nullptr) return; - assert(arg >= 0 && arg < aid->number_of_args(), "valid argument number"); + assert(arg >= 0 && arg < aid->size_of_args(), "valid argument number"); aid->set_arg_modified(arg, val); } @@ -672,7 +672,7 @@ uint ciMethodData::arg_modified(int arg) const { ArgInfoData *aid = arg_info(); if (aid == nullptr) return 0; - assert(arg >= 0 && arg < aid->number_of_args(), "valid argument number"); + assert(arg >= 0 && arg < aid->size_of_args(), "valid argument number"); return aid->arg_modified(arg); } diff --git a/src/hotspot/share/classfile/classFileParser.cpp b/src/hotspot/share/classfile/classFileParser.cpp index c1f00cbe536..a9ea6fbea11 100644 --- a/src/hotspot/share/classfile/classFileParser.cpp +++ b/src/hotspot/share/classfile/classFileParser.cpp @@ -34,6 +34,7 @@ #include "classfile/packageEntry.hpp" #include "classfile/symbolTable.hpp" #include "classfile/systemDictionary.hpp" +#include "classfile/systemDictionaryShared.hpp" #include "classfile/verificationType.hpp" #include "classfile/verifier.hpp" #include "classfile/vmClasses.hpp" @@ -86,9 +87,6 @@ #include "utilities/macros.hpp" #include "utilities/ostream.hpp" #include "utilities/utf8.hpp" -#if INCLUDE_CDS -#include "classfile/systemDictionaryShared.hpp" -#endif // We generally try to create the oops directly when parsing, rather than // allocating temporary data structures and copying the bytes twice. A @@ -194,7 +192,7 @@ void ClassFileParser::parse_constant_pool_entries(const ClassFileStream* const s // so we don't need bounds-check for reading tag. const u1 tag = cfs->get_u1_fast(); switch (tag) { - case JVM_CONSTANT_Class : { + case JVM_CONSTANT_Class: { cfs->guarantee_more(3, CHECK); // name_index, tag/access_flags const u2 name_index = cfs->get_u2_fast(); cp->klass_index_at_put(index, name_index); @@ -4403,14 +4401,14 @@ void ClassFileParser::verify_legal_field_modifiers(jint flags, TRAPS) const { if (!_need_verify) { return; } - const bool is_public = (flags & JVM_ACC_PUBLIC) != 0; - const bool is_protected = (flags & JVM_ACC_PROTECTED) != 0; - const bool is_private = (flags & JVM_ACC_PRIVATE) != 0; - const bool is_static = (flags & JVM_ACC_STATIC) != 0; - const bool is_final = (flags & JVM_ACC_FINAL) != 0; - const bool is_volatile = (flags & JVM_ACC_VOLATILE) != 0; - const bool is_transient = (flags & JVM_ACC_TRANSIENT) != 0; - const bool is_enum = (flags & JVM_ACC_ENUM) != 0; + const bool is_public = (flags & JVM_ACC_PUBLIC) != 0; + const bool is_protected = (flags & JVM_ACC_PROTECTED) != 0; + const bool is_private = (flags & JVM_ACC_PRIVATE) != 0; + const bool is_static = (flags & JVM_ACC_STATIC) != 0; + const bool is_final = (flags & JVM_ACC_FINAL) != 0; + const bool is_volatile = (flags & JVM_ACC_VOLATILE) != 0; + const bool is_transient = (flags & JVM_ACC_TRANSIENT) != 0; + const bool is_enum = (flags & JVM_ACC_ENUM) != 0; const bool major_gte_1_5 = _major_version >= JAVA_1_5_VERSION; bool is_illegal = false; @@ -5256,6 +5254,9 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik, if (!is_internal()) { ik->print_class_load_logging(_loader_data, module_entry, _stream); + if (CDSConfig::is_dumping_archive()) { + SystemDictionaryShared::check_code_source(ik, _stream); + } if (ik->minor_version() == JAVA_PREVIEW_MINOR_VERSION && ik->major_version() == JVM_CLASSFILE_MAJOR_VERSION && diff --git a/src/hotspot/share/classfile/classLoaderData.cpp b/src/hotspot/share/classfile/classLoaderData.cpp index dfc3b74db96..d1ea9c09d4c 100644 --- a/src/hotspot/share/classfile/classLoaderData.cpp +++ b/src/hotspot/share/classfile/classLoaderData.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,6 +46,7 @@ // The bootstrap loader (represented by null) also has a ClassLoaderData, // the singleton class the_null_class_loader_data(). +#include "cds/heapShared.hpp" #include "classfile/classLoaderData.inline.hpp" #include "classfile/classLoaderDataGraph.inline.hpp" #include "classfile/dictionary.hpp" @@ -899,6 +900,7 @@ void ClassLoaderData::free_deallocate_list() { if (m->is_method()) { MetadataFactory::free_metadata(this, (Method*)m); } else if (m->is_constantPool()) { + HeapShared::remove_scratch_resolved_references((ConstantPool*)m); MetadataFactory::free_metadata(this, (ConstantPool*)m); } else if (m->is_klass()) { MetadataFactory::free_metadata(this, (InstanceKlass*)m); diff --git a/src/hotspot/share/classfile/classPrinter.cpp b/src/hotspot/share/classfile/classPrinter.cpp index 3ed0a5e9840..6cf89f7357f 100644 --- a/src/hotspot/share/classfile/classPrinter.cpp +++ b/src/hotspot/share/classfile/classPrinter.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -224,6 +224,7 @@ void ClassPrinter::print_flags_help(outputStream* os) { os->print_cr(" 0x%02x - print info for invokehandle", PRINT_METHOD_HANDLE); os->print_cr(" 0x%02x - print details of the C++ and Java objects that represent classes", PRINT_CLASS_DETAILS); os->print_cr(" 0x%02x - print details of the C++ objects that represent methods", PRINT_METHOD_DETAILS); + os->print_cr(" 0x%02x - print MethodData", PRINT_METHOD_DATA); os->cr(); } diff --git a/src/hotspot/share/classfile/classPrinter.hpp b/src/hotspot/share/classfile/classPrinter.hpp index 470e82ddc0e..b09a1a1ef3b 100644 --- a/src/hotspot/share/classfile/classPrinter.hpp +++ b/src/hotspot/share/classfile/classPrinter.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -54,6 +54,7 @@ public: PRINT_METHOD_HANDLE = 1 << 4, // extra information for invokehandle PRINT_CLASS_DETAILS = 1 << 5, // print details of the C++ and Java objects that represent classes PRINT_METHOD_DETAILS = 1 << 6, // print details of the C++ objects that represent methods + PRINT_METHOD_DATA = 1 << 7, // print MethodData - requires MDO lock }; static bool has_mode(int flags, Mode mode) { return (flags & static_cast(mode)) != 0; diff --git a/src/hotspot/share/classfile/compactHashtable.hpp b/src/hotspot/share/classfile/compactHashtable.hpp index 81f2951289d..1711c5f8cd3 100644 --- a/src/hotspot/share/classfile/compactHashtable.hpp +++ b/src/hotspot/share/classfile/compactHashtable.hpp @@ -307,14 +307,9 @@ public: template inline void iterate(ITER* iter) const { iterate([&](V v) { iter->do_value(v); }); } - template - inline void iterate(const Function& function) const { // lambda enabled API - iterate(const_cast(function)); - } - // Iterate through the values in the table, stopping when the lambda returns false. template - inline void iterate(Function& function) const { // lambda enabled API + inline void iterate(Function function) const { // lambda enabled API for (u4 i = 0; i < _bucket_count; i++) { u4 bucket_info = _buckets[i]; u4 bucket_offset = BUCKET_OFFSET(bucket_info); diff --git a/src/hotspot/share/classfile/fieldLayoutBuilder.cpp b/src/hotspot/share/classfile/fieldLayoutBuilder.cpp index a87e12edc96..adf4e1e63fa 100644 --- a/src/hotspot/share/classfile/fieldLayoutBuilder.cpp +++ b/src/hotspot/share/classfile/fieldLayoutBuilder.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,7 +37,7 @@ LayoutRawBlock::LayoutRawBlock(Kind kind, int size) : _next_block(nullptr), _prev_block(nullptr), - _kind(kind), + _block_kind(kind), _offset(-1), _alignment(1), _size(size), @@ -52,7 +52,7 @@ LayoutRawBlock::LayoutRawBlock(Kind kind, int size) : LayoutRawBlock::LayoutRawBlock(int index, Kind kind, int size, int alignment, bool is_reference) : _next_block(nullptr), _prev_block(nullptr), - _kind(kind), + _block_kind(kind), _offset(-1), _alignment(alignment), _size(size), @@ -148,8 +148,8 @@ void FieldLayout::initialize_instance_layout(const InstanceKlass* super_klass, b LayoutRawBlock* FieldLayout::first_field_block() { LayoutRawBlock* block = _start; - while (block->kind() != LayoutRawBlock::INHERITED && block->kind() != LayoutRawBlock::REGULAR - && block->kind() != LayoutRawBlock::FLATTENED && block->kind() != LayoutRawBlock::PADDING) { + while (block->block_kind() != LayoutRawBlock::INHERITED && block->block_kind() != LayoutRawBlock::REGULAR + && block->block_kind() != LayoutRawBlock::FLATTENED && block->block_kind() != LayoutRawBlock::PADDING) { block = block->next_block(); } return block; @@ -190,7 +190,7 @@ void FieldLayout::add(GrowableArray* list, LayoutRawBlock* star assert(cursor != nullptr, "Sanity check"); last_search_success = true; while (cursor != start) { - if (cursor->kind() == LayoutRawBlock::EMPTY && cursor->fit(b->size(), b->alignment())) { + if (cursor->block_kind() == LayoutRawBlock::EMPTY && cursor->fit(b->size(), b->alignment())) { if (candidate == nullptr || cursor->size() < candidate->size()) { candidate = cursor; } @@ -202,7 +202,7 @@ void FieldLayout::add(GrowableArray* list, LayoutRawBlock* star last_search_success = false; } assert(candidate != nullptr, "Candidate must not be null"); - assert(candidate->kind() == LayoutRawBlock::EMPTY, "Candidate must be an empty block"); + assert(candidate->block_kind() == LayoutRawBlock::EMPTY, "Candidate must be an empty block"); assert(candidate->fit(b->size(), b->alignment()), "Candidate must be able to store the block"); } @@ -221,7 +221,7 @@ void FieldLayout::add_field_at_offset(LayoutRawBlock* block, int offset, LayoutR while (slot != nullptr) { if ((slot->offset() <= block->offset() && (slot->offset() + slot->size()) > block->offset()) || slot == _last){ - assert(slot->kind() == LayoutRawBlock::EMPTY, "Matching slot must be an empty slot"); + assert(slot->block_kind() == LayoutRawBlock::EMPTY, "Matching slot must be an empty slot"); assert(slot->size() >= block->offset() + block->size() ,"Matching slot must be big enough"); if (slot->offset() < block->offset()) { int adjustment = block->offset() - slot->offset(); @@ -261,7 +261,7 @@ void FieldLayout::add_contiguously(GrowableArray* list, LayoutR } else { LayoutRawBlock* first = list->at(0); candidate = last_block()->prev_block(); - while (candidate->kind() != LayoutRawBlock::EMPTY || !candidate->fit(size, first->alignment())) { + while (candidate->block_kind() != LayoutRawBlock::EMPTY || !candidate->fit(size, first->alignment())) { if (candidate == start) { candidate = last_block(); break; @@ -269,7 +269,7 @@ void FieldLayout::add_contiguously(GrowableArray* list, LayoutR candidate = candidate->prev_block(); } assert(candidate != nullptr, "Candidate must not be null"); - assert(candidate->kind() == LayoutRawBlock::EMPTY, "Candidate must be an empty block"); + assert(candidate->block_kind() == LayoutRawBlock::EMPTY, "Candidate must be an empty block"); assert(candidate->fit(size, first->alignment()), "Candidate must be able to store the whole contiguous block"); } @@ -281,7 +281,7 @@ void FieldLayout::add_contiguously(GrowableArray* list, LayoutR } LayoutRawBlock* FieldLayout::insert_field_block(LayoutRawBlock* slot, LayoutRawBlock* block) { - assert(slot->kind() == LayoutRawBlock::EMPTY, "Blocks can only be inserted in empty blocks"); + assert(slot->block_kind() == LayoutRawBlock::EMPTY, "Blocks can only be inserted in empty blocks"); if (slot->offset() % block->alignment() != 0) { int adjustment = block->alignment() - (slot->offset() % block->alignment()); LayoutRawBlock* adj = new LayoutRawBlock(LayoutRawBlock::EMPTY, adjustment); @@ -362,7 +362,7 @@ void FieldLayout::fill_holes(const InstanceKlass* super_klass) { b = b->next_block(); } assert(b->next_block() == nullptr, "Invariant at this point"); - assert(b->kind() != LayoutRawBlock::EMPTY, "Sanity check"); + assert(b->block_kind() != LayoutRawBlock::EMPTY, "Sanity check"); // If the super class has @Contended annotation, a padding block is // inserted at the end to ensure that fields from the subclasses won't share @@ -384,7 +384,7 @@ void FieldLayout::fill_holes(const InstanceKlass* super_klass) { } LayoutRawBlock* FieldLayout::insert(LayoutRawBlock* slot, LayoutRawBlock* block) { - assert(slot->kind() == LayoutRawBlock::EMPTY, "Blocks can only be inserted in empty blocks"); + assert(slot->block_kind() == LayoutRawBlock::EMPTY, "Blocks can only be inserted in empty blocks"); assert(slot->offset() % block->alignment() == 0, "Incompatible alignment"); block->set_offset(slot->offset()); slot->set_offset(slot->offset() + block->size()); @@ -425,7 +425,7 @@ void FieldLayout::print(outputStream* output, bool is_static, const InstanceKlas ResourceMark rm; LayoutRawBlock* b = _blocks; while(b != _last) { - switch(b->kind()) { + switch(b->block_kind()) { case LayoutRawBlock::REGULAR: { FieldInfo* fi = _field_info->adr_at(b->field_index()); output->print_cr(" @%d \"%s\" %s %d/%d %s", @@ -596,11 +596,13 @@ void FieldLayoutBuilder::regular_field_sorting() { } } -void FieldLayoutBuilder::insert_contended_padding(LayoutRawBlock* slot) { +LayoutRawBlock* FieldLayoutBuilder::insert_contended_padding(LayoutRawBlock* slot) { + LayoutRawBlock* padding = nullptr; if (ContendedPaddingWidth > 0) { - LayoutRawBlock* padding = new LayoutRawBlock(LayoutRawBlock::PADDING, ContendedPaddingWidth); + padding = new LayoutRawBlock(LayoutRawBlock::PADDING, ContendedPaddingWidth); _layout->insert(slot, padding); } + return padding; } // Computation of regular classes layout is an evolution of the previous default layout @@ -620,10 +622,14 @@ void FieldLayoutBuilder::compute_regular_layout() { regular_field_sorting(); if (_is_contended) { - _layout->set_start(_layout->last_block()); // insertion is currently easy because the current strategy doesn't try to fill holes // in super classes layouts => the _start block is by consequence the _last_block - insert_contended_padding(_layout->start()); + _layout->set_start(_layout->last_block()); + LayoutRawBlock* padding = insert_contended_padding(_layout->start()); + if (padding != nullptr) { + // Setting the padding block as start ensures we do not insert past it. + _layout->set_start(padding); + } need_tail_padding = true; } @@ -639,7 +645,13 @@ void FieldLayoutBuilder::compute_regular_layout() { for (int i = 0; i < _contended_groups.length(); i++) { FieldGroup* cg = _contended_groups.at(i); LayoutRawBlock* start = _layout->last_block(); - insert_contended_padding(start); + LayoutRawBlock* padding = insert_contended_padding(start); + + // Do not insert fields past the padding block. + if (padding != nullptr) { + start = padding; + } + _layout->add(cg->primitive_fields(), start); _layout->add(cg->oop_fields(), start); need_tail_padding = true; diff --git a/src/hotspot/share/classfile/fieldLayoutBuilder.hpp b/src/hotspot/share/classfile/fieldLayoutBuilder.hpp index 82bbaefc623..a45131ec9a3 100644 --- a/src/hotspot/share/classfile/fieldLayoutBuilder.hpp +++ b/src/hotspot/share/classfile/fieldLayoutBuilder.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -65,7 +65,7 @@ class LayoutRawBlock : public ResourceObj { private: LayoutRawBlock* _next_block; LayoutRawBlock* _prev_block; - Kind _kind; + Kind _block_kind; int _offset; int _alignment; int _size; @@ -79,7 +79,7 @@ class LayoutRawBlock : public ResourceObj { void set_next_block(LayoutRawBlock* next) { _next_block = next; } LayoutRawBlock* prev_block() const { return _prev_block; } void set_prev_block(LayoutRawBlock* prev) { _prev_block = prev; } - Kind kind() const { return _kind; } + Kind block_kind() const { return _block_kind; } int offset() const { assert(_offset >= 0, "Must be initialized"); return _offset; @@ -173,7 +173,7 @@ class FieldLayout : public ResourceObj { LayoutRawBlock* first_empty_block() { LayoutRawBlock* block = _start; - while (block->kind() != LayoutRawBlock::EMPTY) { + while (block->block_kind() != LayoutRawBlock::EMPTY) { block = block->next_block(); } return block; @@ -250,7 +250,7 @@ class FieldLayoutBuilder : public ResourceObj { void build_layout(); void compute_regular_layout(); - void insert_contended_padding(LayoutRawBlock* slot); + LayoutRawBlock* insert_contended_padding(LayoutRawBlock* slot); private: void prologue(); diff --git a/src/hotspot/share/classfile/moduleEntry.cpp b/src/hotspot/share/classfile/moduleEntry.cpp index b5b8aa4ef55..c7fadeaea9b 100644 --- a/src/hotspot/share/classfile/moduleEntry.cpp +++ b/src/hotspot/share/classfile/moduleEntry.cpp @@ -23,7 +23,6 @@ */ #include "cds/aotClassLocation.hpp" -#include "cds/aotGrowableArray.inline.hpp" #include "cds/archiveBuilder.hpp" #include "cds/archiveUtils.hpp" #include "cds/cdsConfig.hpp" @@ -168,7 +167,7 @@ void ModuleEntry::add_read(ModuleEntry* m) { } else { if (reads() == nullptr) { // Lazily create a module's reads list - AOTGrowableArray* new_reads = new (mtModule) AOTGrowableArray(MODULE_READS_SIZE, mtModule); + GrowableArray* new_reads = new (mtModule) GrowableArray(MODULE_READS_SIZE, mtModule); set_reads(new_reads); } diff --git a/src/hotspot/share/classfile/moduleEntry.hpp b/src/hotspot/share/classfile/moduleEntry.hpp index 1a0251a2c2a..10dec73e9fa 100644 --- a/src/hotspot/share/classfile/moduleEntry.hpp +++ b/src/hotspot/share/classfile/moduleEntry.hpp @@ -25,7 +25,6 @@ #ifndef SHARE_CLASSFILE_MODULEENTRY_HPP #define SHARE_CLASSFILE_MODULEENTRY_HPP -#include "cds/aotGrowableArray.hpp" #include "jni.h" #include "memory/metaspaceClosureType.hpp" #include "oops/oopHandle.hpp" @@ -70,7 +69,7 @@ private: // for shared classes from this module Symbol* _name; // name of this module ClassLoaderData* _loader_data; - AOTGrowableArray* _reads; // list of modules that are readable by this module + GrowableArray* _reads; // list of modules that are readable by this module Symbol* _version; // module version number Symbol* _location; // module location @@ -118,10 +117,10 @@ public: bool can_read(ModuleEntry* m) const; bool has_reads_list() const; - AOTGrowableArray* reads() const { + GrowableArray* reads() const { return _reads; } - void set_reads(AOTGrowableArray* r) { + void set_reads(GrowableArray* r) { _reads = r; } void pack_reads() { diff --git a/src/hotspot/share/classfile/packageEntry.cpp b/src/hotspot/share/classfile/packageEntry.cpp index 3e61f2e3a3e..3eb50fcb5a7 100644 --- a/src/hotspot/share/classfile/packageEntry.cpp +++ b/src/hotspot/share/classfile/packageEntry.cpp @@ -22,7 +22,6 @@ * */ -#include "cds/aotGrowableArray.inline.hpp" #include "cds/aotMetaspace.hpp" #include "cds/archiveBuilder.hpp" #include "cds/archiveUtils.hpp" @@ -83,7 +82,7 @@ void PackageEntry::add_qexport(ModuleEntry* m) { if (!has_qual_exports_list()) { // Lazily create a package's qualified exports list. // Initial size is small, do not anticipate export lists to be large. - _qualified_exports = new (mtModule) AOTGrowableArray(QUAL_EXP_SIZE, mtModule); + _qualified_exports = new (mtModule) GrowableArray(QUAL_EXP_SIZE, mtModule); } // Determine, based on this newly established export to module m, diff --git a/src/hotspot/share/classfile/packageEntry.hpp b/src/hotspot/share/classfile/packageEntry.hpp index 7b174a92287..e064e53b263 100644 --- a/src/hotspot/share/classfile/packageEntry.hpp +++ b/src/hotspot/share/classfile/packageEntry.hpp @@ -25,7 +25,6 @@ #ifndef SHARE_CLASSFILE_PACKAGEENTRY_HPP #define SHARE_CLASSFILE_PACKAGEENTRY_HPP -#include "cds/aotGrowableArray.hpp" #include "classfile/moduleEntry.hpp" #include "memory/metaspaceClosureType.hpp" #include "oops/symbol.hpp" @@ -116,7 +115,7 @@ private: bool _must_walk_exports; // Contains list of modules this package is qualifiedly exported to. Access // to this list is protected by the Module_lock. - AOTGrowableArray* _qualified_exports; + GrowableArray* _qualified_exports; JFR_ONLY(DEFINE_TRACE_ID_FIELD;) // Initial size of a package entry's list of qualified exports. diff --git a/src/hotspot/share/classfile/stackMapTableFormat.hpp b/src/hotspot/share/classfile/stackMapTableFormat.hpp index 2b89c53278a..4906f4b9d80 100644 --- a/src/hotspot/share/classfile/stackMapTableFormat.hpp +++ b/src/hotspot/share/classfile/stackMapTableFormat.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -221,9 +221,11 @@ class stack_map_frame { class same_frame : public stack_map_frame { private: static int frame_type_to_offset_delta(u1 frame_type) { - return frame_type + 1; } + return frame_type + 1; + } static u1 offset_delta_to_frame_type(int offset_delta) { - return checked_cast(offset_delta - 1); } + return checked_cast(offset_delta - 1); + } public: @@ -327,9 +329,11 @@ class same_locals_1_stack_item_frame : public stack_map_frame { address type_addr() const { return frame_type_addr() + sizeof(u1); } static int frame_type_to_offset_delta(u1 frame_type) { - return frame_type - 63; } + return frame_type - 63; + } static u1 offset_delta_to_frame_type(int offset_delta) { - return (u1)(offset_delta + 63); } + return (u1)(offset_delta + 63); + } public: static bool is_frame_type(u1 tag) { @@ -657,9 +661,11 @@ class full_frame : public stack_map_frame { address num_locals_addr() const { return offset_delta_addr() + sizeof(u2); } address locals_addr() const { return num_locals_addr() + sizeof(u2); } address stack_slots_addr(address end_of_locals) const { - return end_of_locals; } + return end_of_locals; + } address stack_addr(address end_of_locals) const { - return stack_slots_addr(end_of_locals) + sizeof(u2); } + return stack_slots_addr(end_of_locals) + sizeof(u2); + } enum { _frame_id = 255 }; @@ -930,11 +936,14 @@ class stack_map_table { class stack_map_table_attribute { private: address name_index_addr() const { - return (address)this; } + return (address)this; + } address attribute_length_addr() const { - return name_index_addr() + sizeof(u2); } + return name_index_addr() + sizeof(u2); + } address stack_map_table_addr() const { - return attribute_length_addr() + sizeof(u4); } + return attribute_length_addr() + sizeof(u4); + } NONCOPYABLE(stack_map_table_attribute); protected: @@ -948,9 +957,11 @@ class stack_map_table_attribute { } u2 name_index() const { - return Bytes::get_Java_u2(name_index_addr()); } + return Bytes::get_Java_u2(name_index_addr()); + } u4 attribute_length() const { - return Bytes::get_Java_u4(attribute_length_addr()); } + return Bytes::get_Java_u4(attribute_length_addr()); + } stack_map_table* table() const { return stack_map_table::at(stack_map_table_addr()); } diff --git a/src/hotspot/share/classfile/stringTable.cpp b/src/hotspot/share/classfile/stringTable.cpp index 2b8b7780a41..c3f60487b9c 100644 --- a/src/hotspot/share/classfile/stringTable.cpp +++ b/src/hotspot/share/classfile/stringTable.cpp @@ -946,7 +946,7 @@ void StringTable::init_shared_table() { // so we are all good. // - If there's a reference to it, we will report an error inside HeapShared.cpp and // dumping will fail. - HeapShared::add_to_dumped_interned_strings(string); + HeapShared::archive_interned_string(string); } n++; return true; diff --git a/src/hotspot/share/classfile/systemDictionary.cpp b/src/hotspot/share/classfile/systemDictionary.cpp index 0b47c749df8..8483551fd4f 100644 --- a/src/hotspot/share/classfile/systemDictionary.cpp +++ b/src/hotspot/share/classfile/systemDictionary.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -416,7 +416,7 @@ static inline void log_circularity_error(Symbol* name, PlaceholderEntry* probe) // // resolve_with_circularity_detection adds a DETECT_CIRCULARITY placeholder to the placeholder table before calling // resolve_instance_class_or_null. ClassCircularityError is detected when a DETECT_CIRCULARITY or LOAD_INSTANCE -// placeholder for the same thread, class, classloader is found. +// placeholder for the same thread, class, and classloader is found. // This can be seen with logging option: -Xlog:class+load+placeholders=debug. // InstanceKlass* SystemDictionary::resolve_with_circularity_detection(Symbol* class_name, diff --git a/src/hotspot/share/classfile/systemDictionaryShared.cpp b/src/hotspot/share/classfile/systemDictionaryShared.cpp index cfb20412ab8..fd30fc6766f 100644 --- a/src/hotspot/share/classfile/systemDictionaryShared.cpp +++ b/src/hotspot/share/classfile/systemDictionaryShared.cpp @@ -89,11 +89,9 @@ DEBUG_ONLY(bool SystemDictionaryShared::_class_loading_may_happen = true;) #ifdef ASSERT static void check_klass_after_loading(const Klass* k) { -#ifdef _LP64 - if (k != nullptr && UseCompressedClassPointers) { + if (k != nullptr) { CompressedKlassPointers::check_encodable(k); } -#endif } #endif @@ -204,6 +202,20 @@ DumpTimeClassInfo* SystemDictionaryShared::get_info_locked(InstanceKlass* k) { return info; } +void SystemDictionaryShared::check_code_source(InstanceKlass* ik, const ClassFileStream* cfs) { + if (CDSConfig::is_dumping_preimage_static_archive() && !is_builtin_loader(ik->class_loader_data())) { + if (cfs == nullptr || cfs->source() == nullptr || strncmp(cfs->source(), "file:", 5) != 0) { + // AOT cache filtering: + // For non-built-in loaders, cache only the classes that have a file: code source, so + // we can avoid caching dynamically generated classes that are likely to change from + // run to run. This is similar to the filtering in ClassListWriter::write_to_stream() + // for the classic CDS static archive. + SystemDictionaryShared::log_exclusion(ik, "Not loaded from \"file:\" code source"); + SystemDictionaryShared::set_excluded(ik); + } + } +} + bool SystemDictionaryShared::should_be_excluded_impl(InstanceKlass* k, DumpTimeClassInfo* info) { assert_lock_strong(DumpTimeTable_lock); @@ -373,11 +385,6 @@ bool SystemDictionaryShared::is_jfr_event_class(InstanceKlass *k) { return false; } -bool SystemDictionaryShared::is_early_klass(InstanceKlass* ik) { - DumpTimeClassInfo* info = _dumptime_table->get(ik); - return (info != nullptr) ? info->is_early_klass() : false; -} - bool SystemDictionaryShared::check_self_exclusion(InstanceKlass* k) { bool log_warning = false; const char* error = check_self_exclusion_helper(k, log_warning); diff --git a/src/hotspot/share/classfile/systemDictionaryShared.hpp b/src/hotspot/share/classfile/systemDictionaryShared.hpp index 2619a642fd1..c837a386344 100644 --- a/src/hotspot/share/classfile/systemDictionaryShared.hpp +++ b/src/hotspot/share/classfile/systemDictionaryShared.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -199,7 +199,6 @@ private: static void iterate_verification_constraint_names(InstanceKlass* k, DumpTimeClassInfo* info, Function func); public: - static bool is_early_klass(InstanceKlass* k); // Was k loaded while JvmtiExport::is_early_phase()==true static bool has_archived_enum_objs(InstanceKlass* ik); static void set_has_archived_enum_objs(InstanceKlass* ik); @@ -236,6 +235,7 @@ public: static void update_shared_entry(InstanceKlass* klass, int id); static void set_shared_class_misc_info(InstanceKlass* k, ClassFileStream* cfs); + static void check_code_source(InstanceKlass* ik, const ClassFileStream* cfs) NOT_CDS_RETURN; static InstanceKlass* lookup_from_stream(Symbol* class_name, Handle class_loader, Handle protection_domain, diff --git a/src/hotspot/share/classfile/verifier.cpp b/src/hotspot/share/classfile/verifier.cpp index 76d09161fdd..48be24c20dc 100644 --- a/src/hotspot/share/classfile/verifier.cpp +++ b/src/hotspot/share/classfile/verifier.cpp @@ -1607,12 +1607,12 @@ void ClassVerifier::verify_method(const methodHandle& m, TRAPS) { case Bytecodes::_if_acmpeq : case Bytecodes::_if_acmpne : current_frame.pop_stack( - VerificationType::reference_check(), CHECK_VERIFY(this)); + object_type(), CHECK_VERIFY(this)); // fall through case Bytecodes::_ifnull : case Bytecodes::_ifnonnull : current_frame.pop_stack( - VerificationType::reference_check(), CHECK_VERIFY(this)); + object_type(), CHECK_VERIFY(this)); stackmap_table.check_jump_target (¤t_frame, bcs.bci(), bcs.get_offset_s2(), CHECK_VERIFY(this)); no_control_flow = false; break; diff --git a/src/hotspot/share/classfile/vmIntrinsics.hpp b/src/hotspot/share/classfile/vmIntrinsics.hpp index 67817682ced..3f85fd16b61 100644 --- a/src/hotspot/share/classfile/vmIntrinsics.hpp +++ b/src/hotspot/share/classfile/vmIntrinsics.hpp @@ -368,10 +368,10 @@ class methodHandle; do_intrinsic(_inflateStringB, java_lang_StringLatin1, inflate_name, inflateB_signature, F_S) \ do_signature(inflateB_signature, "([BI[BII)V") \ do_intrinsic(_toBytesStringU, java_lang_StringUTF16, toBytes_name, toBytesU_signature, F_S) \ - do_name( toBytes_name, "toBytes") \ + do_name( toBytes_name, "toBytes0") \ do_signature(toBytesU_signature, "([CII)[B") \ do_intrinsic(_getCharsStringU, java_lang_StringUTF16, getCharsU_name, getCharsU_signature, F_S) \ - do_name( getCharsU_name, "getChars") \ + do_name( getCharsU_name, "getChars0") \ do_signature(getCharsU_signature, "([BII[CI)V") \ do_intrinsic(_getCharStringU, java_lang_StringUTF16, getChar_name, getCharStringU_signature, F_S) \ do_signature(getCharStringU_signature, "([BI)C") \ @@ -469,6 +469,9 @@ class methodHandle; do_intrinsic(_Reference_clear0, java_lang_ref_Reference, clear0_name, void_method_signature, F_RN) \ do_intrinsic(_PhantomReference_clear0, java_lang_ref_PhantomReference, clear0_name, void_method_signature, F_RN) \ \ + do_intrinsic(_Reference_reachabilityFence, java_lang_ref_Reference, reachabilityFence_name, object_void_signature, F_S) \ + do_name(reachabilityFence_name, "reachabilityFence") \ + \ /* support for com.sun.crypto.provider.AES_Crypt and some of its callers */ \ do_class(com_sun_crypto_provider_aescrypt, "com/sun/crypto/provider/AES_Crypt") \ do_intrinsic(_aescrypt_encryptBlock, com_sun_crypto_provider_aescrypt, encryptBlock_name, byteArray_int_byteArray_int_signature, F_R) \ diff --git a/src/hotspot/share/classfile/vmSymbols.hpp b/src/hotspot/share/classfile/vmSymbols.hpp index 2ae42bebcfd..33d00b93365 100644 --- a/src/hotspot/share/classfile/vmSymbols.hpp +++ b/src/hotspot/share/classfile/vmSymbols.hpp @@ -702,6 +702,7 @@ class SerializeClosure; template(appendToClassPathForInstrumentation_name, "appendToClassPathForInstrumentation") \ do_alias(appendToClassPathForInstrumentation_signature, string_void_signature) \ template(serializePropertiesToByteArray_name, "serializePropertiesToByteArray") \ + template(serializeSecurityPropertiesToByteArray_name, "serializeSecurityPropertiesToByteArray") \ template(serializeAgentPropertiesToByteArray_name, "serializeAgentPropertiesToByteArray") \ template(encodeThrowable_name, "encodeThrowable") \ template(encodeThrowable_signature, "(Ljava/lang/Throwable;JI)I") \ diff --git a/src/hotspot/share/code/aotCodeCache.cpp b/src/hotspot/share/code/aotCodeCache.cpp index e5f68afc51d..d4f12936e96 100644 --- a/src/hotspot/share/code/aotCodeCache.cpp +++ b/src/hotspot/share/code/aotCodeCache.cpp @@ -33,13 +33,18 @@ #include "classfile/javaAssertions.hpp" #include "code/aotCodeCache.hpp" #include "code/codeCache.hpp" +#include "gc/shared/barrierSetAssembler.hpp" +#include "gc/shared/barrierSetNMethod.hpp" #include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/gcConfig.hpp" #include "logging/logStream.hpp" #include "memory/memoryReserver.hpp" +#include "prims/jvmtiThreadState.hpp" +#include "prims/upcallLinker.hpp" #include "runtime/deoptimization.hpp" #include "runtime/flags/flagSetting.hpp" #include "runtime/globals_extension.hpp" +#include "runtime/icache.hpp" #include "runtime/java.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/os.inline.hpp" @@ -73,11 +78,23 @@ const char* aot_code_entry_kind_name[] = { #undef DECL_KIND_STRING }; +// Stream to printing AOTCodeCache loading failure. +// Print to error channel when -XX:AOTMode is set to "on" +static LogStream& load_failure_log() { + static LogStream err_stream(LogLevel::Error, LogTagSetMapping::tagset()); + static LogStream dbg_stream(LogLevel::Debug, LogTagSetMapping::tagset()); + if (RequireSharedSpaces) { + return err_stream; + } else { + return dbg_stream; + } +} + static void report_load_failure() { if (AbortVMOnAOTCodeFailure) { vm_exit_during_initialization("Unable to use AOT Code Cache.", nullptr); } - log_info(aot, codecache, init)("Unable to use AOT Code Cache."); + load_failure_log().print_cr("Unable to use AOT Code Cache."); AOTCodeCache::disable_caching(); } @@ -86,7 +103,7 @@ static void report_store_failure() { tty->print_cr("Unable to create AOT Code Cache."); vm_abort(false); } - log_info(aot, codecache, exit)("Unable to create AOT Code Cache."); + log_error(aot, codecache, exit)("Unable to create AOT Code Cache."); AOTCodeCache::disable_caching(); } @@ -156,10 +173,13 @@ static uint32_t encode_id(AOTCodeEntry::Kind kind, int id) { } else if (kind == AOTCodeEntry::C1Blob) { assert(StubInfo::is_c1(static_cast(id)), "not a c1 blob id %d", id); return id; - } else { - // kind must be AOTCodeEntry::C2Blob + } else if (kind == AOTCodeEntry::C2Blob) { assert(StubInfo::is_c2(static_cast(id)), "not a c2 blob id %d", id); return id; + } else { + // kind must be AOTCodeEntry::StubGenBlob + assert(StubInfo::is_stubgen(static_cast(id)), "not a stubgen blob id %d", id); + return id; } } @@ -184,9 +204,6 @@ void AOTCodeCache::initialize() { return; // AOTCache must be specified to dump and use AOT code } - // Disable stubs caching until JDK-8357398 is fixed. - FLAG_SET_ERGO(AOTStubCaching, false); - if (VerifyOops) { // Disable AOT stubs caching when VerifyOops flag is on. // Verify oops code generated a lot of C strings which overflow @@ -284,11 +301,24 @@ bool AOTCodeCache::open_cache(bool is_dumping, bool is_using) { return true; } -void AOTCodeCache::close() { +// Called after continuations_init() when continuation stub callouts +// have been initialized +void AOTCodeCache::init3() { + if (opened_cache == nullptr) { + return; + } + // initialize external routines for continuations so we can save + // generated continuation blob that references them + AOTCodeAddressTable* table = opened_cache->_table; + assert(table != nullptr, "should be initialized already"); + table->init_extrs2(); +} + +void AOTCodeCache::dump() { if (is_on()) { - delete _cache; // Free memory - _cache = nullptr; - opened_cache = nullptr; + assert(is_on_for_dump(), "should be called only when dumping AOT code"); + MutexLocker ml(Compile_lock); + _cache->finish_write(); } } @@ -304,7 +334,6 @@ AOTCodeCache::AOTCodeCache(bool is_dumping, bool is_using) : _store_size(0), _for_use(is_using), _for_dump(is_dumping), - _closing(false), _failed(false), _lookup_failed(false), _table(nullptr), @@ -343,6 +372,7 @@ AOTCodeCache::AOTCodeCache(bool is_dumping, bool is_using) : log_info (aot, codecache, init)("Loaded %u AOT code entries from AOT Code Cache", _load_header->entries_count()); log_debug(aot, codecache, init)(" Adapters: total=%u", _load_header->adapters_count()); log_debug(aot, codecache, init)(" Shared Blobs: total=%u", _load_header->shared_blobs_count()); + log_debug(aot, codecache, init)(" StubGen Blobs: total=%d", _load_header->stubgen_blobs_count()); log_debug(aot, codecache, init)(" C1 Blobs: total=%u", _load_header->C1_blobs_count()); log_debug(aot, codecache, init)(" C2 Blobs: total=%u", _load_header->C2_blobs_count()); log_debug(aot, codecache, init)(" AOT code cache size: %u bytes", _load_header->cache_size()); @@ -360,82 +390,80 @@ AOTCodeCache::AOTCodeCache(bool is_dumping, bool is_using) : _table = new AOTCodeAddressTable(); } -void AOTCodeCache::init_early_stubs_table() { - AOTCodeAddressTable* table = addr_table(); - if (table != nullptr) { - table->init_early_stubs(); +void AOTCodeCache::add_stub_entries(StubId stub_id, address start, GrowableArray
*entries, int begin_idx) { + EntryId entry_id = StubInfo::entry_base(stub_id); + add_stub_entry(entry_id, start); + // skip past first entry + entry_id = StubInfo::next_in_stub(stub_id, entry_id); + // now check for any more entries + int count = StubInfo::entry_count(stub_id) - 1; + assert(start != nullptr, "invalid start address for stub %s", StubInfo::name(stub_id)); + assert(entries == nullptr || begin_idx + count <= entries->length(), "sanity"); + // write any extra entries + for (int i = 0; i < count; i++) { + assert(entry_id != EntryId::NO_ENTRYID, "not enough entries for stub %s", StubInfo::name(stub_id)); + address a = entries->at(begin_idx + i); + add_stub_entry(entry_id, a); + entry_id = StubInfo::next_in_stub(stub_id, entry_id); + } + assert(entry_id == EntryId::NO_ENTRYID, "too many entries for stub %s", StubInfo::name(stub_id)); +} + +void AOTCodeCache::add_stub_entry(EntryId entry_id, address a) { + if (a != nullptr) { + if (_table != nullptr) { + log_trace(aot, codecache, stubs)("Publishing stub entry %s at address " INTPTR_FORMAT, StubInfo::name(entry_id), p2i(a)); + return _table->add_stub_entry(entry_id, a); + } } } -void AOTCodeCache::init_shared_blobs_table() { +void AOTCodeCache::set_shared_stubs_complete() { AOTCodeAddressTable* table = addr_table(); if (table != nullptr) { - table->init_shared_blobs(); + table->set_shared_stubs_complete(); } } -void AOTCodeCache::init_early_c1_table() { +void AOTCodeCache::set_c1_stubs_complete() { AOTCodeAddressTable* table = addr_table(); if (table != nullptr) { - table->init_early_c1(); + table->set_c1_stubs_complete(); } } -AOTCodeCache::~AOTCodeCache() { - if (_closing) { - return; // Already closed +void AOTCodeCache::set_c2_stubs_complete() { + AOTCodeAddressTable* table = addr_table(); + if (table != nullptr) { + table->set_c2_stubs_complete(); } - // Stop any further access to cache. - _closing = true; +} - MutexLocker ml(Compile_lock); - if (for_dump()) { // Finalize cache - finish_write(); - } - _load_buffer = nullptr; - if (_C_store_buffer != nullptr) { - FREE_C_HEAP_ARRAY(char, _C_store_buffer); - _C_store_buffer = nullptr; - _store_buffer = nullptr; - } - if (_table != nullptr) { - MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag); - delete _table; - _table = nullptr; +void AOTCodeCache::set_stubgen_stubs_complete() { + AOTCodeAddressTable* table = addr_table(); + if (table != nullptr) { + table->set_stubgen_stubs_complete(); } } void AOTCodeCache::Config::record(uint cpu_features_offset) { - _flags = 0; -#ifdef ASSERT - _flags |= debugVM; -#endif - if (UseCompressedOops) { - _flags |= compressedOops; - } - if (UseCompressedClassPointers) { - _flags |= compressedClassPointers; - } - if (UseTLAB) { - _flags |= useTLAB; - } - if (JavaAssertions::systemClassDefault()) { - _flags |= systemClassAssertions; - } - if (JavaAssertions::userClassDefault()) { - _flags |= userClassAssertions; - } - if (EnableContended) { - _flags |= enableContendedPadding; - } - if (RestrictContended) { - _flags |= restrictContendedPadding; - } - _compressedOopShift = CompressedOops::shift(); + +#define AOTCODECACHE_SAVE_VAR(type, name) _saved_ ## name = name; +#define AOTCODECACHE_SAVE_FUN(type, name, fun) _saved_ ## name = fun; + + AOTCODECACHE_CONFIGS_DO(AOTCODECACHE_SAVE_VAR, AOTCODECACHE_SAVE_FUN); + + // Special configs that cannot be checked with macros _compressedOopBase = CompressedOops::base(); - _compressedKlassShift = CompressedKlassPointers::shift(); - _contendedPaddingWidth = ContendedPaddingWidth; - _gc = (uint)Universe::heap()->kind(); + +#if defined(X86) && !defined(ZERO) + _useUnalignedLoadStores = UseUnalignedLoadStores; +#endif + +#if defined(AARCH64) && !defined(ZERO) + _avoidUnalignedAccesses = AvoidUnalignedAccesses; +#endif + _cpu_features_offset = cpu_features_offset; } @@ -466,78 +494,114 @@ bool AOTCodeCache::Config::verify_cpu_features(AOTCodeCache* cache) const { } } } else { - if (log.is_enabled()) { + if (load_failure_log().is_enabled()) { ResourceMark rm; // required for stringStream::as_string() stringStream ss; char* runtime_cpu_features = NEW_RESOURCE_ARRAY(char, VM_Version::cpu_features_size()); VM_Version::store_cpu_features(runtime_cpu_features); VM_Version::get_missing_features_name(cached_cpu_features_buffer, runtime_cpu_features, ss); - log.print_cr("AOT Code Cache disabled: required cpu features are missing: %s", ss.as_string()); + load_failure_log().print_cr("AOT Code Cache disabled: required cpu features are missing: %s", ss.as_string()); } return false; } return true; } -bool AOTCodeCache::Config::verify(AOTCodeCache* cache) const { - // First checks affect all cached AOT code -#ifdef ASSERT - if ((_flags & debugVM) == 0) { - log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by product VM, it can't be used by debug VM"); - return false; - } -#else - if ((_flags & debugVM) != 0) { - log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by debug VM, it can't be used by product VM"); - return false; - } +#define AOTCODECACHE_DISABLED_MSG "AOT Code Cache disabled: it was created with %s = " + +// Special case, print "GC = ..." to be more understandable. +inline void log_config_mismatch(CollectedHeap::Name saved, CollectedHeap::Name current, const char* name/*unused*/) { + load_failure_log().print_cr("AOT Code Cache disabled: it was created with GC = \"%s\" vs current \"%s\"", + GCConfig::hs_err_name(saved), GCConfig::hs_err_name(current)); +} + +inline void log_config_mismatch(bool saved, bool current, const char* name) { + load_failure_log().print_cr(AOTCODECACHE_DISABLED_MSG "%s vs current %s", name, + saved ? "true" : "false", current ? "true" : "false"); +} + +inline void log_config_mismatch(int saved, int current, const char* name) { + load_failure_log().print_cr(AOTCODECACHE_DISABLED_MSG "%d vs current %d", name, saved, current); +} + +inline void log_config_mismatch(uint saved, uint current, const char* name) { + load_failure_log().print_cr(AOTCODECACHE_DISABLED_MSG "%u vs current %u", name, saved, current); +} + +#ifdef _LP64 +inline void log_config_mismatch(intx saved, intx current, const char* name) { + load_failure_log().print_cr(AOTCODECACHE_DISABLED_MSG "%zd vs current %zd", name, saved, current); +} + +inline void log_config_mismatch(uintx saved, uintx current, const char* name) { + load_failure_log().print_cr(AOTCODECACHE_DISABLED_MSG "%zu vs current %zu", name, saved, current); +} #endif - CollectedHeap::Name aot_gc = (CollectedHeap::Name)_gc; - if (aot_gc != Universe::heap()->kind()) { - log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different GC: %s vs current %s", GCConfig::hs_err_name(aot_gc), GCConfig::hs_err_name()); +template +bool check_config(T saved, T current, const char* name) { + if (saved != current) { + log_config_mismatch(saved, current, name); return false; + } else { + return true; } +} - if (((_flags & compressedClassPointers) != 0) != UseCompressedClassPointers) { - log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedClassPointers = %s", UseCompressedClassPointers ? "false" : "true"); - return false; - } - if (_compressedKlassShift != (uint)CompressedKlassPointers::shift()) { - log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with CompressedKlassPointers::shift() = %d vs current %d", _compressedKlassShift, CompressedKlassPointers::shift()); - return false; - } - - // The following checks do not affect AOT adapters caching - - if (((_flags & compressedOops) != 0) != UseCompressedOops) { - log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedOops = %s", UseCompressedOops ? "false" : "true"); - AOTStubCaching = false; - } - if (_compressedOopShift != (uint)CompressedOops::shift()) { - log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different CompressedOops::shift(): %d vs current %d", _compressedOopShift, CompressedOops::shift()); - AOTStubCaching = false; - } - - // This should be the last check as it only disables AOTStubCaching - if ((_compressedOopBase == nullptr || CompressedOops::base() == nullptr) && (_compressedOopBase != CompressedOops::base())) { - log_debug(aot, codecache, init)("AOTStubCaching is disabled: incompatible CompressedOops::base(): %p vs current %p", _compressedOopBase, CompressedOops::base()); - AOTStubCaching = false; - } - +bool AOTCodeCache::Config::verify(AOTCodeCache* cache) const { + // check CPU features before checking flags that may be + // auto-configured in response to them if (!verify_cpu_features(cache)) { return false; } + + // Tests for config options which might affect validity of adapters, + // stubs or nmethods. Currently we take a pessemistic stand and + // drop the whole cache if any of these are changed. + +#define AOTCODECACHE_CHECK_VAR(type, name) \ + if (!check_config(_saved_ ## name, name, #name)) { return false; } +#define AOTCODECACHE_CHECK_FUN(type, name, fun) \ + if (!check_config(_saved_ ## name, fun, #fun)) { return false; } + + AOTCODECACHE_CONFIGS_DO(AOTCODECACHE_CHECK_VAR, AOTCODECACHE_CHECK_FUN); + + // Special configs that cannot be checked with macros + + if ((_compressedOopBase == nullptr || CompressedOops::base() == nullptr) && (_compressedOopBase != CompressedOops::base())) { + load_failure_log().print_cr("AOT Code Cache disabled: incompatible CompressedOops::base(): %p vs current %p", + _compressedOopBase, CompressedOops::base()); + return false; + } + +#if defined(X86) && !defined(ZERO) + // switching off UseUnalignedLoadStores can affect validity of fill + // stubs + if (_useUnalignedLoadStores && !UseUnalignedLoadStores) { + log_config_mismatch(_useUnalignedLoadStores, UseUnalignedLoadStores, "UseUnalignedLoadStores"); + return false; + } +#endif // defined(X86) && !defined(ZERO) + +#if defined(AARCH64) && !defined(ZERO) + // switching on AvoidUnalignedAccesses may affect validity of array + // copy stubs and nmethods + if (!_avoidUnalignedAccesses && AvoidUnalignedAccesses) { + log_config_mismatch(_avoidUnalignedAccesses, AvoidUnalignedAccesses, "AvoidUnalignedAccesses"); + return false; + } +#endif // defined(AARCH64) && !defined(ZERO) + return true; } bool AOTCodeCache::Header::verify(uint load_size) const { if (_version != AOT_CODE_VERSION) { - log_debug(aot, codecache, init)("AOT Code Cache disabled: different AOT Code version %d vs %d recorded in AOT Code header", AOT_CODE_VERSION, _version); + load_failure_log().print_cr("AOT Code Cache disabled: different AOT Code version %d vs %d recorded in AOT Code header", AOT_CODE_VERSION, _version); return false; } if (load_size < _cache_size) { - log_debug(aot, codecache, init)("AOT Code Cache disabled: AOT Code Cache size %d < %d recorded in AOT Code header", load_size, _cache_size); + load_failure_log().print_cr("AOT Code Cache disabled: AOT Code Cache size %d < %d recorded in AOT Code header", load_size, _cache_size); return false; } return true; @@ -571,6 +635,13 @@ AOTCodeReader::AOTCodeReader(AOTCodeCache* cache, AOTCodeEntry* entry) { _load_buffer = cache->cache_buffer(); _read_position = 0; _lookup_failed = false; + _name = nullptr; + _reloc_data = nullptr; + _reloc_count = 0; + _oop_maps = nullptr; + _entry_kind = AOTCodeEntry::None; + _stub_data = nullptr; + _id = -1; } void AOTCodeReader::set_read_position(uint pos) { @@ -788,6 +859,7 @@ bool AOTCodeCache::finish_write() { AOTCodeEntry* entries_address = _store_entries; // Pointer to latest entry uint adapters_count = 0; uint shared_blobs_count = 0; + uint stubgen_blobs_count = 0; uint C1_blobs_count = 0; uint C2_blobs_count = 0; uint max_size = 0; @@ -815,6 +887,8 @@ bool AOTCodeCache::finish_write() { adapters_count++; } else if (kind == AOTCodeEntry::SharedBlob) { shared_blobs_count++; + } else if (kind == AOTCodeEntry::StubGenBlob) { + stubgen_blobs_count++; } else if (kind == AOTCodeEntry::C1Blob) { C1_blobs_count++; } else if (kind == AOTCodeEntry::C2Blob) { @@ -851,6 +925,7 @@ bool AOTCodeCache::finish_write() { log_debug(aot, codecache, exit)(" Adapters: total=%u", adapters_count); log_debug(aot, codecache, exit)(" Shared Blobs: total=%d", shared_blobs_count); + log_debug(aot, codecache, exit)(" StubGen Blobs: total=%d", stubgen_blobs_count); log_debug(aot, codecache, exit)(" C1 Blobs: total=%d", C1_blobs_count); log_debug(aot, codecache, exit)(" C2 Blobs: total=%d", C2_blobs_count); log_debug(aot, codecache, exit)(" AOT code cache size: %u bytes, max entry's size: %u bytes", size, max_size); @@ -860,7 +935,8 @@ bool AOTCodeCache::finish_write() { header->init(size, (uint)strings_count, strings_offset, entries_count, new_entries_offset, adapters_count, shared_blobs_count, - C1_blobs_count, C2_blobs_count, cpu_features_offset); + stubgen_blobs_count, C1_blobs_count, + C2_blobs_count, cpu_features_offset); log_info(aot, codecache, exit)("Wrote %d AOT code entries to AOT Code Cache", entries_count); } @@ -869,19 +945,53 @@ bool AOTCodeCache::finish_write() { //------------------Store/Load AOT code ---------------------- -bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, uint id, const char* name) { +bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, uint id, const char* name, AOTStubData* stub_data, CodeBuffer* code_buffer) { + assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind); + + // we only expect stub data and a code buffer for a multi stub blob + assert(AOTCodeEntry::is_multi_stub_blob(entry_kind) == (stub_data != nullptr), + "entry_kind %d does not match stub_data pointer %p", + entry_kind, stub_data); + + assert((stub_data == nullptr) == (code_buffer == nullptr), + "stub data and code buffer must both be null or both non null"); + + // If this is a stub and the cache is on for either load or dump we + // need to insert the stub entries into the AOTCacheAddressTable so + // that relocs which refer to entries defined by this blob get + // translated correctly. + // + // Entry insertion needs to be be done up front before writing the + // blob because some blobs rely on internal daisy-chain references + // from one entry to another. + // + // Entry insertion also needs to be done even if the cache is open + // for use but not for dump. This may be needed when an archived + // blob omits some entries -- either because of a config change or a + // load failure -- with the result that the entries end up being + // generated. These generated entry addresses may be needed to + // resolve references from subsequently loaded blobs (for either + // stubs or nmethods). + + if (is_on() && AOTCodeEntry::is_blob(entry_kind)) { + publish_stub_addresses(blob, (BlobId)id, stub_data); + } + AOTCodeCache* cache = open_for_dump(); if (cache == nullptr) { return false; } - assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind); - if (AOTCodeEntry::is_adapter(entry_kind) && !is_dumping_adapter()) { return false; } if (AOTCodeEntry::is_blob(entry_kind) && !is_dumping_stub()) { return false; } + // we do not currently store C2 stubs because we are seeing weird + // memory errors when loading them -- see JDK-8357593 + if (entry_kind == AOTCodeEntry::C2Blob) { + return false; + } log_debug(aot, codecache, stubs)("Writing blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]); #ifdef ASSERT @@ -921,8 +1031,44 @@ bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind } CodeBlob::archive_blob(&blob, archive_buffer); - uint reloc_data_size = blob.relocation_size(); - n = cache->write_bytes((address)blob.relocation_begin(), reloc_data_size); + // For a relocatable code blob its relocations are linked from the + // blob. However, for a non-relocatable (stubgen) blob we only have + // transient relocations attached to the code buffer that are added + // in order to support AOT-load time patching. in either case, we + // need to explicitly save these relocs when storing the blob to the + // archive so we can then reload them and reattach them to either + // the blob or to a code buffer when we reload the blob into a + // production JVM. + // + // Either way we are then in a position to iterate over the relocs + // and AOT patch the ones that refer to code that may move between + // assembly and production time. We also need to save and restore + // AOT address table indexes for the target addresses of affected + // relocs. That happens below. + + int reloc_count; + address reloc_data; + if (AOTCodeEntry::is_multi_stub_blob(entry_kind)) { + CodeSection* cs = code_buffer->code_section(CodeBuffer::SECT_INSTS); + reloc_count = (cs->has_locs() ? cs->locs_count() : 0); + reloc_data = (reloc_count > 0 ? (address)cs->locs_start() : nullptr); + } else { + reloc_count = blob.relocation_size() / sizeof(relocInfo); + reloc_data = (address)blob.relocation_begin(); + } + n = cache->write_bytes(&reloc_count, sizeof(int)); + if (n != sizeof(int)) { + return false; + } + if (AOTCodeEntry::is_multi_stub_blob(entry_kind)) { + // align to heap word size before writing the relocs so we can + // install them into a code buffer when they get restored + if (!cache->align_write()) { + return false; + } + } + uint reloc_data_size = (uint)(reloc_count * sizeof(relocInfo)); + n = cache->write_bytes(reloc_data, reloc_data_size); if (n != reloc_data_size) { return false; } @@ -935,8 +1081,40 @@ bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind has_oop_maps = true; } + // In the case of a multi-stub blob we need to write start, end, + // secondary entries and extras. For any other blob entry addresses + // beyond the blob start will be stored in the blob as offsets. + if (stub_data != nullptr) { + if (!cache->write_stub_data(blob, stub_data)) { + return false; + } + } + + // now we have added all the other data we can write details of any + // extra the AOT relocations + + bool write_ok = true; + if (AOTCodeEntry::is_multi_stub_blob(entry_kind)) { + if (reloc_count > 0) { + CodeSection* cs = code_buffer->code_section(CodeBuffer::SECT_INSTS); + RelocIterator iter(cs); + write_ok = cache->write_relocations(blob, iter); + } + } else { + RelocIterator iter(&blob); + write_ok = cache->write_relocations(blob, iter); + } + + if (!write_ok) { + if (!cache->failed()) { + // We may miss an address in AOT table - skip this code blob. + cache->set_write_position(entry_position); + } + return false; + } + #ifndef PRODUCT - // Write asm remarks + // Write asm remarks after relocation info if (!cache->write_asm_remarks(blob)) { return false; } @@ -945,15 +1123,8 @@ bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind } #endif /* PRODUCT */ - if (!cache->write_relocations(blob)) { - if (!cache->failed()) { - // We may miss an address in AOT table - skip this code blob. - cache->set_write_position(entry_position); - } - return false; - } - uint entry_size = cache->_write_position - entry_position; + AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_kind, encode_id(entry_kind, id), entry_position, entry_size, name_offset, name_size, blob_offset, has_oop_maps, blob.content_begin()); @@ -961,25 +1132,141 @@ bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind return true; } -bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, BlobId id) { - assert(AOTCodeEntry::is_blob(entry_kind), - "wrong entry kind for blob id %s", StubInfo::name(id)); - return store_code_blob(blob, entry_kind, (uint)id, StubInfo::name(id)); +bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, uint id, const char* name) { + assert(!AOTCodeEntry::is_blob(entry_kind), + "wrong entry kind for numeric id %d", id); + return store_code_blob(blob, entry_kind, (uint)id, name, nullptr, nullptr); } -CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, const char* name) { +bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, BlobId id) { + assert(AOTCodeEntry::is_single_stub_blob(entry_kind), + "wrong entry kind for blob id %s", StubInfo::name(id)); + return store_code_blob(blob, entry_kind, (uint)id, StubInfo::name(id), nullptr, nullptr); +} + +bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, BlobId id, AOTStubData* stub_data, CodeBuffer* code_buffer) { + assert(AOTCodeEntry::is_multi_stub_blob(entry_kind), + "wrong entry kind for multi stub blob id %s", StubInfo::name(id)); + return store_code_blob(blob, entry_kind, (uint)id, StubInfo::name(id), stub_data, code_buffer); +} + +bool AOTCodeCache::write_stub_data(CodeBlob &blob, AOTStubData *stub_data) { + BlobId blob_id = stub_data->blob_id(); + StubId stub_id = StubInfo::stub_base(blob_id); + address blob_base = blob.code_begin(); + int stub_cnt = StubInfo::stub_count(blob_id); + int n; + + LogStreamHandle(Trace, aot, codecache, stubs) log; + + if (log.is_enabled()) { + log.print_cr("======== Stub data starts at offset %d", _write_position); + } + + for (int i = 0; i < stub_cnt; i++, stub_id = StubInfo::next_in_blob(blob_id, stub_id)) { + // for each stub we find in the ranges list we write an int + // sequence where + // + // - start_pos is the stub start address encoded as a code section offset + // + // - end is the stub end address encoded as an offset from start + // + // - N counts the number of stub-local entries/extras + // + // - offseti is a stub-local entry/extra address encoded as len for + // a null address otherwise as an offset in range [1,len-1] + + StubAddrRange& range = stub_data->get_range(i); + GrowableArray
& addresses = stub_data->address_array(); + int base = range.start_index(); + if (base >= 0) { + n = write_bytes(&stub_id, sizeof(StubId)); + if (n != sizeof(StubId)) { + return false; + } + address start = addresses.at(base); + assert (blob_base <= start, "sanity"); + uint offset = (uint)(start - blob_base); + n = write_bytes(&offset, sizeof(uint)); + if (n != sizeof(int)) { + return false; + } + address end = addresses.at(base + 1); + assert (start < end, "sanity"); + offset = (uint)(end - start); + n = write_bytes(&offset, sizeof(uint)); + if (n != sizeof(int)) { + return false; + } + // write number of secondary and extra entries + int count = range.count() - 2; + n = write_bytes(&count, sizeof(int)); + if (n != sizeof(int)) { + return false; + } + for (int j = 0; j < count; j++) { + address next = addresses.at(base + 2 + j); + if (next != nullptr) { + // n.b. This maps next == end to the stub length which + // means we will reconstitute the address as nullptr. That + // happens when we have a handler range covers the end of + // a stub and needs to be handled specially by the client + // that restores the extras. + assert(start <= next && next <= end, "sanity"); + offset = (uint)(next - start); + } else { + // this can happen when a stub is not generated or an + // extra is the common handler target + offset = NULL_ADDRESS_MARKER; + } + n = write_bytes(&offset, sizeof(uint)); + if (n != sizeof(int)) { + return false; + } + } + if (log.is_enabled()) { + log.print_cr("======== wrote stub %s and %d addresses up to offset %d", + StubInfo::name(stub_id), range.count(), _write_position); + } + } + } + // we should have exhausted all stub ids in the blob + assert(stub_id == StubId::NO_STUBID, "sanity"); + // write NO_STUBID as an end marker + n = write_bytes(&stub_id, sizeof(StubId)); + if (n != sizeof(StubId)) { + return false; + } + + if (log.is_enabled()) { + log.print_cr("======== Stub data ends at offset %d", _write_position); + } + + return true; +} + +CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, const char* name, AOTStubData* stub_data) { AOTCodeCache* cache = open_for_use(); if (cache == nullptr) { return nullptr; } assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind); + assert(AOTCodeEntry::is_multi_stub_blob(entry_kind) == (stub_data != nullptr), + "entry_kind %d does not match stub_data pointer %p", + entry_kind, stub_data); + if (AOTCodeEntry::is_adapter(entry_kind) && !is_using_adapter()) { return nullptr; } if (AOTCodeEntry::is_blob(entry_kind) && !is_using_stub()) { return nullptr; } + // we do not currently load C2 stubs because we are seeing weird + // memory errors when loading them -- see JDK-8357593 + if (entry_kind == AOTCodeEntry::C2Blob) { + return nullptr; + } log_debug(aot, codecache, stubs)("Reading blob '%s' (id=%u, kind=%s) from AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]); AOTCodeEntry* entry = cache->find_entry(entry_kind, encode_id(entry_kind, id)); @@ -987,20 +1274,32 @@ CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, c return nullptr; } AOTCodeReader reader(cache, entry); - CodeBlob* blob = reader.compile_code_blob(name); + CodeBlob* blob = reader.compile_code_blob(name, entry_kind, id, stub_data); log_debug(aot, codecache, stubs)("%sRead blob '%s' (id=%u, kind=%s) from AOT Code Cache", (blob == nullptr? "Failed to " : ""), name, id, aot_code_entry_kind_name[entry_kind]); return blob; } -CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, BlobId id) { - assert(AOTCodeEntry::is_blob(entry_kind), - "wrong entry kind for blob id %s", StubInfo::name(id)); - return load_code_blob(entry_kind, (uint)id, StubInfo::name(id)); +CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, const char* name) { + assert(!AOTCodeEntry::is_blob(entry_kind), + "wrong entry kind for numeric id %d", id); + return load_code_blob(entry_kind, (uint)id, name, nullptr); } -CodeBlob* AOTCodeReader::compile_code_blob(const char* name) { +CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, BlobId id) { + assert(AOTCodeEntry::is_single_stub_blob(entry_kind), + "wrong entry kind for blob id %s", StubInfo::name(id)); + return load_code_blob(entry_kind, (uint)id, StubInfo::name(id), nullptr); +} + +CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, BlobId id, AOTStubData* stub_data) { + assert(AOTCodeEntry::is_multi_stub_blob(entry_kind), + "wrong entry kind for blob id %s", StubInfo::name(id)); + return load_code_blob(entry_kind, (uint)id, StubInfo::name(id), stub_data); +} + +CodeBlob* AOTCodeReader::compile_code_blob(const char* name, AOTCodeEntry::Kind entry_kind, int id, AOTStubData* stub_data) { uint entry_position = _entry->offset(); // Read name @@ -1014,39 +1313,40 @@ CodeBlob* AOTCodeReader::compile_code_blob(const char* name) { set_lookup_failed(); // Skip this blob return nullptr; } + _name = stored_name; - // Read archived code blob + // Read archived code blob and related info uint offset = entry_position + _entry->blob_offset(); CodeBlob* archived_blob = (CodeBlob*)addr(offset); offset += archived_blob->size(); - address reloc_data = (address)addr(offset); - offset += archived_blob->relocation_size(); + _reloc_count = *(int*)addr(offset); offset += sizeof(int); + if (AOTCodeEntry::is_multi_stub_blob(entry_kind)) { + // position of relocs will have been aligned to heap word size so + // we can install them into a code buffer + offset = align_up(offset, DATA_ALIGNMENT); + } + _reloc_data = (address)addr(offset); + offset += _reloc_count * sizeof(relocInfo); set_read_position(offset); - ImmutableOopMapSet* oop_maps = nullptr; if (_entry->has_oop_maps()) { - oop_maps = read_oop_map_set(); + _oop_maps = read_oop_map_set(); } - CodeBlob* code_blob = CodeBlob::create(archived_blob, - stored_name, - reloc_data, - oop_maps - ); + // record current context for use by that callback + _stub_data = stub_data; + _entry_kind = entry_kind; + _id = id; + + // CodeBlob::restore() calls AOTCodeReader::restore() + + CodeBlob* code_blob = CodeBlob::create(archived_blob, this); + if (code_blob == nullptr) { // no space left in CodeCache return nullptr; } -#ifndef PRODUCT - code_blob->asm_remarks().init(); - read_asm_remarks(code_blob->asm_remarks()); - code_blob->dbg_strings().init(); - read_dbg_strings(code_blob->dbg_strings()); -#endif // PRODUCT - - fix_relocations(code_blob); - #ifdef ASSERT LogStreamHandle(Trace, aot, codecache, stubs) log; if (log.is_enabled()) { @@ -1057,15 +1357,221 @@ CodeBlob* AOTCodeReader::compile_code_blob(const char* name) { return code_blob; } +void AOTCodeReader::restore(CodeBlob* code_blob) { + precond(AOTCodeCache::is_on_for_use()); + precond(_name != nullptr); + precond(_reloc_data != nullptr); + + code_blob->set_name(_name); + // Saved relocations need restoring except for the case of a + // multi-stub blob which has no runtime relocations. However, we may + // still have saved some (re-)load time relocs that were attached to + // the generator's code buffer. We don't attach them to the blob but + // they get processed below by fix_relocations. + if (!AOTCodeEntry::is_multi_stub_blob(_entry_kind)) { + code_blob->restore_mutable_data(_reloc_data); + } + code_blob->set_oop_maps(_oop_maps); + + // if this is a multi stub blob load its entries + if (AOTCodeEntry::is_blob(_entry_kind)) { + BlobId blob_id = static_cast(_id); + if (StubInfo::is_stubgen(blob_id)) { + assert(_stub_data != nullptr, "sanity"); + read_stub_data(code_blob, _stub_data); + } + // publish entries found either in stub_data or as offsets in blob + AOTCodeCache::publish_stub_addresses(*code_blob, blob_id, _stub_data); + } + + // Now that all the entry points are in the address table we can + // read all the extra reloc info and fix up any addresses that need + // patching to adjust for a new location in a new JVM. We can be + // sure to correctly update all runtime references, including + // cross-linked stubs that are internally daisy-chained. If + // relocation fails and we have to re-generate any of the stubs then + // the entry points for newly generated stubs will get updated, + // ensuring that any other stubs or nmethods we need to relocate + // will use the correct address. + + // if we have a relocatable code blob then the relocs are already + // attached to the blob and we can iterate over it to find the ones + // we need to patch. With a non-relocatable code blob we need to + // wrap it with a CodeBuffer and then reattach the relocs to the + // code buffer. + + if (AOTCodeEntry::is_multi_stub_blob(_entry_kind)) { + // the blob doesn't have any proper runtime relocs but we can + // reinstate the AOT-load time relocs we saved from the code + // buffer that generated this blob in a new code buffer and use + // the latter to iterate over them + if (_reloc_count > 0) { + CodeBuffer code_buffer(code_blob); + relocInfo* locs = (relocInfo*)_reloc_data; + code_buffer.insts()->initialize_shared_locs(locs, _reloc_count); + code_buffer.insts()->set_locs_end(locs + _reloc_count); + CodeSection *cs = code_buffer.code_section(CodeBuffer::SECT_INSTS); + RelocIterator reloc_iter(cs); + fix_relocations(code_blob, reloc_iter); + } + } else { + // the AOT-load time relocs will be in the blob's restored relocs + RelocIterator reloc_iter(code_blob); + fix_relocations(code_blob, reloc_iter); + } + +#ifndef PRODUCT + code_blob->asm_remarks().init(); + read_asm_remarks(code_blob->asm_remarks()); + code_blob->dbg_strings().init(); + read_dbg_strings(code_blob->dbg_strings()); +#endif // PRODUCT +} + +void AOTCodeReader::read_stub_data(CodeBlob* code_blob, AOTStubData* stub_data) { + GrowableArray
& addresses = stub_data->address_array(); + // Read the list of stub ids and associated start, end, secondary + // and extra addresses and install them in the stub data. + // + // Also insert all start and secondary addresses into the AOTCache + // address table so we correctly relocate this blob and any followng + // blobs/nmethods. + // + // n.b. if an error occurs and we need to regenerate any of these + // stubs the address table will be updated as a side-effect of + // regeneration. + + address blob_base = code_blob->code_begin(); + uint blob_size = (uint)(code_blob->code_end() - blob_base); + int offset = read_position(); + LogStreamHandle(Trace, aot, codecache, stubs) log; + if (log.is_enabled()) { + log.print_cr("======== Stub data starts at offset %d", offset); + } + // read stub and entries until we see NO_STUBID + StubId stub_id = *(StubId*)addr(offset); offset += sizeof(StubId); + // we ought to have at least one saved stub in the blob + assert(stub_id != StubId::NO_STUBID, "blob %s contains no stubs!", StubInfo::name(stub_data->blob_id())); + while (stub_id != StubId::NO_STUBID) { + assert(StubInfo::blob(stub_id) == stub_data->blob_id(), "sanity"); + int idx = StubInfo::stubgen_offset_in_blob(stub_data->blob_id(), stub_id); + StubAddrRange& range = stub_data->get_range(idx); + // we should only see a stub once + assert(range.start_index() < 0, "repeated entry for stub %s", StubInfo::name(stub_id)); + int address_base = addresses.length(); + // start is an offset from the blob base + uint start = *(uint*)addr(offset); offset += sizeof(uint); + assert(start < blob_size, "stub %s start offset %d exceeds buffer length %d", StubInfo::name(stub_id), start, blob_size); + address stub_start = blob_base + start; + addresses.append(stub_start); + // end is an offset from the stub start + uint end = *(uint*)addr(offset); offset += sizeof(uint); + assert(start + end <= blob_size, "stub %s end offset %d exceeds remaining buffer length %d", StubInfo::name(stub_id), end, blob_size - start); + addresses.append(stub_start + end); + // read count of secondary entries plus extras + int entries_count = *(int*)addr(offset); offset += sizeof(int); + assert(entries_count >= (StubInfo::entry_count(stub_id) - 1), "not enough entries for %s", StubInfo::name(stub_id)); + for (int i = 0; i < entries_count; i++) { + // entry offset is an offset from the stub start less than or + // equal to end + uint entry = *(uint*)addr(offset); offset += sizeof(uint); + if (entry <= end) { + // entry addresses may not address end but extras can + assert(entry < end || i >= StubInfo::entry_count(stub_id), + "entry offset 0x%x exceeds stub length 0x%x for stub %s", + entry, end, StubInfo::name(stub_id)); + addresses.append(stub_start + entry); + } else { + // special case: entry encodes a nullptr + assert(entry == AOTCodeCache::NULL_ADDRESS_MARKER, "stub %s entry offset %d lies beyond stub end %d and does not equal NULL_ADDRESS_MARKER", StubInfo::name(stub_id), entry, end); + addresses.append(nullptr); + } + } + if (log.is_enabled()) { + log.print_cr("======== read stub %s and %d addresses up to offset %d", + StubInfo::name(stub_id), 2 + entries_count, offset); + } + range.init_entry(address_base, 2 + entries_count); + // move on to next stub or NO_STUBID + stub_id = *(StubId*)addr(offset); offset += sizeof(StubId); + } + if (log.is_enabled()) { + log.print_cr("======== Stub data ends at offset %d", offset); + } + + set_read_position(offset); +} + +void AOTCodeCache::publish_external_addresses(GrowableArray
& addresses) { + DEBUG_ONLY( _passed_init2 = true; ) + if (opened_cache == nullptr) { + return; + } + + cache()->_table->add_external_addresses(addresses); +} + +void AOTCodeCache::publish_stub_addresses(CodeBlob &code_blob, BlobId blob_id, AOTStubData *stub_data) { + if (stub_data != nullptr) { + // register all entries in stub + assert(StubInfo::stub_count(blob_id) > 1, + "multiple stub data provided for single stub blob %s", + StubInfo::name(blob_id)); + assert(blob_id == stub_data->blob_id(), + "blob id %s does not match id in stub data %s", + StubInfo::name(blob_id), + StubInfo::name(stub_data->blob_id())); + // iterate over all stubs in the blob + StubId stub_id = StubInfo::stub_base(blob_id); + int stub_cnt = StubInfo::stub_count(blob_id); + GrowableArray
& addresses = stub_data->address_array(); + for (int i = 0; i < stub_cnt; i++) { + assert(stub_id != StubId::NO_STUBID, "sanity"); + StubAddrRange& range = stub_data->get_range(i); + int base = range.start_index(); + if (base >= 0) { + cache()->add_stub_entries(stub_id, addresses.at(base), &addresses, base + 2); + } + stub_id = StubInfo::next_in_blob(blob_id, stub_id); + } + // we should have exhausted all stub ids in the blob + assert(stub_id == StubId::NO_STUBID, "sanity"); + } else { + // register entry or entries for a single stub blob + StubId stub_id = StubInfo::stub_base(blob_id); + assert(StubInfo::stub_count(blob_id) == 1, + "multiple stub blob %s provided without stub data", + StubInfo::name(blob_id)); + address start = code_blob.code_begin(); + if (StubInfo::entry_count(stub_id) == 1) { + assert(!code_blob.is_deoptimization_stub(), "expecting multiple entries for stub %s", StubInfo::name(stub_id)); + // register the blob base address as the only entry + cache()->add_stub_entries(stub_id, start); + } else { + assert(code_blob.is_deoptimization_stub(), "only expecting one entry for stub %s", StubInfo::name(stub_id)); + DeoptimizationBlob *deopt_blob = code_blob.as_deoptimization_blob(); + assert(deopt_blob->unpack() == start, "unexpected offset 0x%x for deopt stub entry", (int)(deopt_blob->unpack() - start)); + GrowableArray
addresses; + addresses.append(deopt_blob->unpack_with_exception()); + addresses.append(deopt_blob->unpack_with_reexecution()); + addresses.append(deopt_blob->unpack_with_exception_in_tls()); +#if INCLUDE_JVMCI + addresses.append(deopt_blob->uncommon_trap()); + addresses.append(deopt_blob->implicit_exception_uncommon_trap()); +#endif // INCLUDE_JVMCI + cache()->add_stub_entries(stub_id, start, &addresses, 0); + } + } +} + // ------------ process code and data -------------- // Can't use -1. It is valid value for jump to iteself destination // used by static call stub: see NativeJump::jump_destination(). #define BAD_ADDRESS_ID -2 -bool AOTCodeCache::write_relocations(CodeBlob& code_blob) { +bool AOTCodeCache::write_relocations(CodeBlob& code_blob, RelocIterator& iter) { GrowableArray reloc_data; - RelocIterator iter(&code_blob); LogStreamHandle(Trace, aot, codecache, reloc) log; while (iter.next()) { int idx = reloc_data.append(0); // default value @@ -1119,6 +1625,11 @@ bool AOTCodeCache::write_relocations(CodeBlob& code_blob) { // Write the count first int count = reloc_data.length(); write_bytes(&count, sizeof(int)); + if (log.is_enabled()) { + log.print_cr("======== extra relocations count=%d", count); + log.print( " {"); + } + bool first = true; for (GrowableArrayIterator iter = reloc_data.begin(); iter != reloc_data.end(); ++iter) { uint value = *iter; @@ -1126,23 +1637,43 @@ bool AOTCodeCache::write_relocations(CodeBlob& code_blob) { if (n != sizeof(uint)) { return false; } + if (log.is_enabled()) { + if (first) { + first = false; + log.print("%d", value); + } else { + log.print(", %d", value); + } + } + } + if (log.is_enabled()) { + log.print_cr("}"); } return true; } -void AOTCodeReader::fix_relocations(CodeBlob* code_blob) { - LogStreamHandle(Trace, aot, reloc) log; +void AOTCodeReader::fix_relocations(CodeBlob *code_blob, RelocIterator& iter) { uint offset = read_position(); - int count = *(int*)addr(offset); + int reloc_count = *(int*)addr(offset); offset += sizeof(int); - if (log.is_enabled()) { - log.print_cr("======== extra relocations count=%d", count); - } uint* reloc_data = (uint*)addr(offset); - offset += (count * sizeof(uint)); + offset += (reloc_count * sizeof(uint)); set_read_position(offset); - RelocIterator iter(code_blob); + LogStreamHandle(Trace, aot, codecache, reloc) log; + if (log.is_enabled()) { + log.print_cr("======== extra relocations count=%d", reloc_count); + log.print(" {"); + for(int i = 0; i < reloc_count; i++) { + if (i == 0) { + log.print("%d", reloc_data[i]); + } else { + log.print(", %d", reloc_data[i]); + } + } + log.print_cr("}"); + } + int j = 0; while (iter.next()) { switch (iter.type()) { @@ -1191,7 +1722,7 @@ void AOTCodeReader::fix_relocations(CodeBlob* code_blob) { } j++; } - assert(j == count, "sanity"); + assert(j == reloc_count, "sanity"); } bool AOTCodeCache::write_oop_map_set(CodeBlob& cb) { @@ -1301,266 +1832,359 @@ void AOTCodeReader::read_dbg_strings(DbgStrings& dbg_strings) { //======================= AOTCodeAddressTable =============== -// address table ids for generated routines, external addresses and C -// string addresses are partitioned into positive integer ranges -// defined by the following positive base and max values -// i.e. [_extrs_base, _extrs_base + _extrs_max -1], -// [_blobs_base, _blobs_base + _blobs_max -1], -// ... -// [_c_str_base, _c_str_base + _c_str_max -1], +// address table ids for generated routine entry adresses, external +// addresses and C string addresses are partitioned into positive +// integer ranges defined by the following positive base and max +// values i.e. [_extrs_base, _extrs_base + _extrs_max -1], +// [_stubs_base, _stubs_base + _stubs_max -1], [_c_str_base, +// _c_str_base + _c_str_max -1], -#define _extrs_max 100 -#define _stubs_max 3 - -#define _shared_blobs_max 20 -#define _C1_blobs_max 10 -#define _blobs_max (_shared_blobs_max+_C1_blobs_max) -#define _all_max (_extrs_max+_stubs_max+_blobs_max) +#define _extrs_max 380 +#define _stubs_max static_cast(EntryId::NUM_ENTRYIDS) #define _extrs_base 0 #define _stubs_base (_extrs_base + _extrs_max) -#define _shared_blobs_base (_stubs_base + _stubs_max) -#define _C1_blobs_base (_shared_blobs_base + _shared_blobs_max) -#define _blobs_end (_shared_blobs_base + _blobs_max) +#define _all_max (_stubs_base + _stubs_max) -#define SET_ADDRESS(type, addr) \ - { \ - type##_addr[type##_length++] = (address) (addr); \ - assert(type##_length <= type##_max, "increase size"); \ +// setter for external addresses and string addresses inserts new +// addresses in the order they are encountered them which must remain +// the same across an assembly run and subsequent production run + +#define ADD_EXTERNAL_ADDRESS(addr) \ + { \ + hash_address((address) addr, _extrs_base + _extrs_length); \ + _extrs_addr[_extrs_length++] = (address) (addr); \ + assert(_extrs_length <= _extrs_max, "increase size"); \ } +// insert into to the address hash table the index of an external +// address or a stub address in the list of external or stub +// addresses, respectively, keyed by the relevant address + +void AOTCodeAddressTable::hash_address(address addr, int idx) { + // only do this if we have a non-null address to record and the + // cache is open for dumping + if (addr == nullptr) { + return; + } + // check opened_cache because this can be called before the cache is + // properly initialized and only continue when dumping is enabled + if (opened_cache != nullptr && opened_cache->for_dump()) { + if (_hash_table == nullptr) { + _hash_table = new (mtCode) AOTCodeAddressHashTable(); + } + assert(_hash_table->get(addr) == nullptr, "repeated insert of address " INTPTR_FORMAT, p2i(addr)); + _hash_table->put(addr, idx); + log_trace(aot, codecache)("Address " INTPTR_FORMAT " inserted into AOT Code Cache address hash table with index '%d'", + p2i(addr), idx); + } +} + static bool initializing_extrs = false; void AOTCodeAddressTable::init_extrs() { if (_extrs_complete || initializing_extrs) return; // Done already - assert(_blobs_end <= _all_max, "AOTCodeAddress table ranges need adjusting"); - initializing_extrs = true; _extrs_addr = NEW_C_HEAP_ARRAY(address, _extrs_max, mtCode); _extrs_length = 0; + { + // Required by initial stubs + ADD_EXTERNAL_ADDRESS(SharedRuntime::exception_handler_for_return_address); // used by forward_exception + ADD_EXTERNAL_ADDRESS(CompressedOops::base_addr()); // used by call_stub + ADD_EXTERNAL_ADDRESS(Thread::current); // used by call_stub + ADD_EXTERNAL_ADDRESS(SharedRuntime::throw_StackOverflowError); + ADD_EXTERNAL_ADDRESS(SharedRuntime::throw_delayed_StackOverflowError); + } + // Record addresses of VM runtime methods - SET_ADDRESS(_extrs, SharedRuntime::fixup_callers_callsite); - SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method); - SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_abstract); - SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_ic_miss); + ADD_EXTERNAL_ADDRESS(SharedRuntime::fixup_callers_callsite); + ADD_EXTERNAL_ADDRESS(SharedRuntime::handle_wrong_method); + ADD_EXTERNAL_ADDRESS(SharedRuntime::handle_wrong_method_abstract); + ADD_EXTERNAL_ADDRESS(SharedRuntime::handle_wrong_method_ic_miss); #if defined(AARCH64) && !defined(ZERO) - SET_ADDRESS(_extrs, JavaThread::aarch64_get_thread_helper); + ADD_EXTERNAL_ADDRESS(JavaThread::aarch64_get_thread_helper); + ADD_EXTERNAL_ADDRESS(BarrierSetAssembler::patching_epoch_addr()); #endif + +#ifndef PRODUCT + ADD_EXTERNAL_ADDRESS(&SharedRuntime::_jbyte_array_copy_ctr); // used by arraycopy stub on arm32 and x86_64 + ADD_EXTERNAL_ADDRESS(&SharedRuntime::_jshort_array_copy_ctr); // used by arraycopy stub + ADD_EXTERNAL_ADDRESS(&SharedRuntime::_jint_array_copy_ctr); // used by arraycopy stub + ADD_EXTERNAL_ADDRESS(&SharedRuntime::_jlong_array_copy_ctr); // used by arraycopy stub + ADD_EXTERNAL_ADDRESS(&SharedRuntime::_oop_array_copy_ctr); // used by arraycopy stub + ADD_EXTERNAL_ADDRESS(&SharedRuntime::_checkcast_array_copy_ctr); // used by arraycopy stub + ADD_EXTERNAL_ADDRESS(&SharedRuntime::_unsafe_array_copy_ctr); // used by arraycopy stub + ADD_EXTERNAL_ADDRESS(&SharedRuntime::_generic_array_copy_ctr); // used by arraycopy stub + ADD_EXTERNAL_ADDRESS(&SharedRuntime::_unsafe_set_memory_ctr); // used by arraycopy stub +#endif /* PRODUCT */ + + ADD_EXTERNAL_ADDRESS(SharedRuntime::enable_stack_reserved_zone); + +#if defined(AMD64) && !defined(ZERO) + ADD_EXTERNAL_ADDRESS(SharedRuntime::montgomery_multiply); + ADD_EXTERNAL_ADDRESS(SharedRuntime::montgomery_square); +#endif // defined(AMD64) && !defined(ZERO) + + ADD_EXTERNAL_ADDRESS(SharedRuntime::d2f); + ADD_EXTERNAL_ADDRESS(SharedRuntime::d2i); + ADD_EXTERNAL_ADDRESS(SharedRuntime::d2l); + ADD_EXTERNAL_ADDRESS(SharedRuntime::dcos); + ADD_EXTERNAL_ADDRESS(SharedRuntime::dexp); + ADD_EXTERNAL_ADDRESS(SharedRuntime::dlog); + ADD_EXTERNAL_ADDRESS(SharedRuntime::dlog10); + ADD_EXTERNAL_ADDRESS(SharedRuntime::dpow); +#ifndef ZERO + ADD_EXTERNAL_ADDRESS(SharedRuntime::drem); +#endif + ADD_EXTERNAL_ADDRESS(SharedRuntime::dsin); + ADD_EXTERNAL_ADDRESS(SharedRuntime::dtan); + ADD_EXTERNAL_ADDRESS(SharedRuntime::f2i); + ADD_EXTERNAL_ADDRESS(SharedRuntime::f2l); +#ifndef ZERO + ADD_EXTERNAL_ADDRESS(SharedRuntime::frem); +#endif + ADD_EXTERNAL_ADDRESS(SharedRuntime::l2d); + ADD_EXTERNAL_ADDRESS(SharedRuntime::l2f); + ADD_EXTERNAL_ADDRESS(SharedRuntime::ldiv); + ADD_EXTERNAL_ADDRESS(SharedRuntime::lmul); + ADD_EXTERNAL_ADDRESS(SharedRuntime::lrem); + +#if INCLUDE_JVMTI + ADD_EXTERNAL_ADDRESS(&JvmtiExport::_should_notify_object_alloc); +#endif /* INCLUDE_JVMTI */ + + ADD_EXTERNAL_ADDRESS(ThreadIdentifier::unsafe_offset()); + // already added + // ADD_EXTERNAL_ADDRESS(Thread::current); + + ADD_EXTERNAL_ADDRESS(os::javaTimeMillis); + ADD_EXTERNAL_ADDRESS(os::javaTimeNanos); +#ifndef PRODUCT + ADD_EXTERNAL_ADDRESS(os::breakpoint); +#endif + + ADD_EXTERNAL_ADDRESS(StubRoutines::crc_table_addr()); +#ifndef PRODUCT + ADD_EXTERNAL_ADDRESS(&SharedRuntime::_partial_subtype_ctr); +#endif + +#if INCLUDE_JFR + ADD_EXTERNAL_ADDRESS(JfrIntrinsicSupport::write_checkpoint); + ADD_EXTERNAL_ADDRESS(JfrIntrinsicSupport::return_lease); +#endif + + ADD_EXTERNAL_ADDRESS(UpcallLinker::handle_uncaught_exception); // used by upcall_stub_exception_handler + { // Required by Shared blobs - SET_ADDRESS(_extrs, Deoptimization::fetch_unroll_info); - SET_ADDRESS(_extrs, Deoptimization::unpack_frames); - SET_ADDRESS(_extrs, SafepointSynchronize::handle_polling_page_exception); - SET_ADDRESS(_extrs, SharedRuntime::resolve_opt_virtual_call_C); - SET_ADDRESS(_extrs, SharedRuntime::resolve_virtual_call_C); - SET_ADDRESS(_extrs, SharedRuntime::resolve_static_call_C); - SET_ADDRESS(_extrs, SharedRuntime::throw_StackOverflowError); - SET_ADDRESS(_extrs, SharedRuntime::throw_delayed_StackOverflowError); - SET_ADDRESS(_extrs, SharedRuntime::throw_AbstractMethodError); - SET_ADDRESS(_extrs, SharedRuntime::throw_IncompatibleClassChangeError); - SET_ADDRESS(_extrs, SharedRuntime::throw_NullPointerException_at_call); + ADD_EXTERNAL_ADDRESS(Deoptimization::fetch_unroll_info); + ADD_EXTERNAL_ADDRESS(Deoptimization::unpack_frames); + ADD_EXTERNAL_ADDRESS(SafepointSynchronize::handle_polling_page_exception); + ADD_EXTERNAL_ADDRESS(SharedRuntime::resolve_opt_virtual_call_C); + ADD_EXTERNAL_ADDRESS(SharedRuntime::resolve_virtual_call_C); + ADD_EXTERNAL_ADDRESS(SharedRuntime::resolve_static_call_C); + // already added + // ADD_EXTERNAL_ADDRESS(SharedRuntime::throw_delayed_StackOverflowError); + ADD_EXTERNAL_ADDRESS(SharedRuntime::throw_AbstractMethodError); + ADD_EXTERNAL_ADDRESS(SharedRuntime::throw_IncompatibleClassChangeError); + ADD_EXTERNAL_ADDRESS(SharedRuntime::throw_NullPointerException_at_call); } #ifdef COMPILER1 { // Required by C1 blobs - SET_ADDRESS(_extrs, static_cast(SharedRuntime::dtrace_object_alloc)); - SET_ADDRESS(_extrs, SharedRuntime::exception_handler_for_return_address); - SET_ADDRESS(_extrs, SharedRuntime::register_finalizer); - SET_ADDRESS(_extrs, Runtime1::is_instance_of); - SET_ADDRESS(_extrs, Runtime1::exception_handler_for_pc); - SET_ADDRESS(_extrs, Runtime1::check_abort_on_vm_exception); - SET_ADDRESS(_extrs, Runtime1::new_instance); - SET_ADDRESS(_extrs, Runtime1::counter_overflow); - SET_ADDRESS(_extrs, Runtime1::new_type_array); - SET_ADDRESS(_extrs, Runtime1::new_object_array); - SET_ADDRESS(_extrs, Runtime1::new_multi_array); - SET_ADDRESS(_extrs, Runtime1::throw_range_check_exception); - SET_ADDRESS(_extrs, Runtime1::throw_index_exception); - SET_ADDRESS(_extrs, Runtime1::throw_div0_exception); - SET_ADDRESS(_extrs, Runtime1::throw_null_pointer_exception); - SET_ADDRESS(_extrs, Runtime1::throw_array_store_exception); - SET_ADDRESS(_extrs, Runtime1::throw_class_cast_exception); - SET_ADDRESS(_extrs, Runtime1::throw_incompatible_class_change_error); - SET_ADDRESS(_extrs, Runtime1::is_instance_of); - SET_ADDRESS(_extrs, Runtime1::monitorenter); - SET_ADDRESS(_extrs, Runtime1::monitorexit); - SET_ADDRESS(_extrs, Runtime1::deoptimize); - SET_ADDRESS(_extrs, Runtime1::access_field_patching); - SET_ADDRESS(_extrs, Runtime1::move_klass_patching); - SET_ADDRESS(_extrs, Runtime1::move_mirror_patching); - SET_ADDRESS(_extrs, Runtime1::move_appendix_patching); - SET_ADDRESS(_extrs, Runtime1::predicate_failed_trap); - SET_ADDRESS(_extrs, Runtime1::unimplemented_entry); - SET_ADDRESS(_extrs, Thread::current); - SET_ADDRESS(_extrs, CompressedKlassPointers::base_addr()); -#ifndef PRODUCT - SET_ADDRESS(_extrs, os::breakpoint); -#endif + ADD_EXTERNAL_ADDRESS(static_cast(SharedRuntime::dtrace_object_alloc)); + ADD_EXTERNAL_ADDRESS(SharedRuntime::register_finalizer); + ADD_EXTERNAL_ADDRESS(Runtime1::is_instance_of); + ADD_EXTERNAL_ADDRESS(Runtime1::exception_handler_for_pc); + ADD_EXTERNAL_ADDRESS(Runtime1::check_abort_on_vm_exception); + ADD_EXTERNAL_ADDRESS(Runtime1::new_instance); + ADD_EXTERNAL_ADDRESS(Runtime1::counter_overflow); + ADD_EXTERNAL_ADDRESS(Runtime1::new_type_array); + ADD_EXTERNAL_ADDRESS(Runtime1::new_object_array); + ADD_EXTERNAL_ADDRESS(Runtime1::new_multi_array); + ADD_EXTERNAL_ADDRESS(Runtime1::throw_range_check_exception); + ADD_EXTERNAL_ADDRESS(Runtime1::throw_index_exception); + ADD_EXTERNAL_ADDRESS(Runtime1::throw_div0_exception); + ADD_EXTERNAL_ADDRESS(Runtime1::throw_null_pointer_exception); + ADD_EXTERNAL_ADDRESS(Runtime1::throw_array_store_exception); + ADD_EXTERNAL_ADDRESS(Runtime1::throw_class_cast_exception); + ADD_EXTERNAL_ADDRESS(Runtime1::throw_incompatible_class_change_error); + ADD_EXTERNAL_ADDRESS(Runtime1::monitorenter); + ADD_EXTERNAL_ADDRESS(Runtime1::monitorexit); + ADD_EXTERNAL_ADDRESS(Runtime1::deoptimize); + ADD_EXTERNAL_ADDRESS(Runtime1::access_field_patching); + ADD_EXTERNAL_ADDRESS(Runtime1::move_klass_patching); + ADD_EXTERNAL_ADDRESS(Runtime1::move_mirror_patching); + ADD_EXTERNAL_ADDRESS(Runtime1::move_appendix_patching); + ADD_EXTERNAL_ADDRESS(Runtime1::predicate_failed_trap); + ADD_EXTERNAL_ADDRESS(Runtime1::unimplemented_entry); + // already added + // ADD_EXTERNAL_ADDRESS(Thread::current); + ADD_EXTERNAL_ADDRESS(CompressedKlassPointers::base_addr()); } #endif #ifdef COMPILER2 { // Required by C2 blobs - SET_ADDRESS(_extrs, Deoptimization::uncommon_trap); - SET_ADDRESS(_extrs, OptoRuntime::handle_exception_C); - SET_ADDRESS(_extrs, OptoRuntime::new_instance_C); - SET_ADDRESS(_extrs, OptoRuntime::new_array_C); - SET_ADDRESS(_extrs, OptoRuntime::new_array_nozero_C); - SET_ADDRESS(_extrs, OptoRuntime::multianewarray2_C); - SET_ADDRESS(_extrs, OptoRuntime::multianewarray3_C); - SET_ADDRESS(_extrs, OptoRuntime::multianewarray4_C); - SET_ADDRESS(_extrs, OptoRuntime::multianewarray5_C); - SET_ADDRESS(_extrs, OptoRuntime::multianewarrayN_C); - SET_ADDRESS(_extrs, OptoRuntime::complete_monitor_locking_C); - SET_ADDRESS(_extrs, OptoRuntime::monitor_notify_C); - SET_ADDRESS(_extrs, OptoRuntime::monitor_notifyAll_C); - SET_ADDRESS(_extrs, OptoRuntime::rethrow_C); - SET_ADDRESS(_extrs, OptoRuntime::slow_arraycopy_C); - SET_ADDRESS(_extrs, OptoRuntime::register_finalizer_C); - SET_ADDRESS(_extrs, OptoRuntime::vthread_end_first_transition_C); - SET_ADDRESS(_extrs, OptoRuntime::vthread_start_final_transition_C); - SET_ADDRESS(_extrs, OptoRuntime::vthread_start_transition_C); - SET_ADDRESS(_extrs, OptoRuntime::vthread_end_transition_C); -#if defined(AARCH64) - SET_ADDRESS(_extrs, JavaThread::verify_cross_modify_fence_failure); -#endif // AARCH64 + ADD_EXTERNAL_ADDRESS(Deoptimization::uncommon_trap); + ADD_EXTERNAL_ADDRESS(OptoRuntime::handle_exception_C); + ADD_EXTERNAL_ADDRESS(OptoRuntime::new_instance_C); + ADD_EXTERNAL_ADDRESS(OptoRuntime::new_array_C); + ADD_EXTERNAL_ADDRESS(OptoRuntime::new_array_nozero_C); + ADD_EXTERNAL_ADDRESS(OptoRuntime::multianewarray2_C); + ADD_EXTERNAL_ADDRESS(OptoRuntime::multianewarray3_C); + ADD_EXTERNAL_ADDRESS(OptoRuntime::multianewarray4_C); + ADD_EXTERNAL_ADDRESS(OptoRuntime::multianewarray5_C); + ADD_EXTERNAL_ADDRESS(OptoRuntime::multianewarrayN_C); + ADD_EXTERNAL_ADDRESS(OptoRuntime::complete_monitor_locking_C); + ADD_EXTERNAL_ADDRESS(OptoRuntime::monitor_notify_C); + ADD_EXTERNAL_ADDRESS(OptoRuntime::monitor_notifyAll_C); + ADD_EXTERNAL_ADDRESS(OptoRuntime::rethrow_C); + ADD_EXTERNAL_ADDRESS(OptoRuntime::slow_arraycopy_C); + ADD_EXTERNAL_ADDRESS(OptoRuntime::register_finalizer_C); + ADD_EXTERNAL_ADDRESS(OptoRuntime::vthread_end_first_transition_C); + ADD_EXTERNAL_ADDRESS(OptoRuntime::vthread_start_final_transition_C); + ADD_EXTERNAL_ADDRESS(OptoRuntime::vthread_start_transition_C); + ADD_EXTERNAL_ADDRESS(OptoRuntime::vthread_end_transition_C); + // already added for +#if defined(AARCH64) && ! defined(PRODUCT) + ADD_EXTERNAL_ADDRESS(JavaThread::verify_cross_modify_fence_failure); +#endif // AARCH64 && !PRODUCT } #endif // COMPILER2 #if INCLUDE_G1GC - SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_pre_entry); + ADD_EXTERNAL_ADDRESS(G1BarrierSetRuntime::write_ref_field_pre_entry); + ADD_EXTERNAL_ADDRESS(G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry); // used by arraycopy stubs + ADD_EXTERNAL_ADDRESS(G1BarrierSetRuntime::write_ref_array_pre_oop_entry); // used by arraycopy stubs + ADD_EXTERNAL_ADDRESS(G1BarrierSetRuntime::write_ref_array_post_entry); // used by arraycopy stubs + ADD_EXTERNAL_ADDRESS(BarrierSetNMethod::nmethod_stub_entry_barrier); // used by method_entry_barrier + #endif #if INCLUDE_SHENANDOAHGC - SET_ADDRESS(_extrs, ShenandoahRuntime::write_barrier_pre); - SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom); - SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom_narrow); + ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::write_barrier_pre); + ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::load_reference_barrier_strong); + ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::load_reference_barrier_strong_narrow); + ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::load_reference_barrier_weak); + ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::load_reference_barrier_weak_narrow); + ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::load_reference_barrier_phantom); + ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::load_reference_barrier_phantom_narrow); + ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::arraycopy_barrier_oop); + ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::arraycopy_barrier_narrow_oop); #endif #if INCLUDE_ZGC - SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr()); - SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr()); + ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr()); + ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_store_good_addr()); + ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded_addr()); + ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr()); + ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::no_keepalive_load_barrier_on_weak_oop_field_preloaded_addr()); + ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::no_keepalive_load_barrier_on_phantom_oop_field_preloaded_addr()); + ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::store_barrier_on_oop_field_with_healing_addr()); + ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing_addr()); + ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::no_keepalive_store_barrier_on_oop_field_without_healing_addr()); + ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::store_barrier_on_native_oop_field_without_healing_addr()); + ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::load_barrier_on_oop_array_addr()); + + ADD_EXTERNAL_ADDRESS(ZPointerVectorLoadBadMask); + ADD_EXTERNAL_ADDRESS(ZPointerVectorStoreBadMask); + ADD_EXTERNAL_ADDRESS(ZPointerVectorStoreGoodMask); #if defined(AMD64) - SET_ADDRESS(_extrs, &ZPointerLoadShift); + ADD_EXTERNAL_ADDRESS(&ZPointerLoadShift); + ADD_EXTERNAL_ADDRESS(&ZPointerLoadShiftTable); #endif #endif #ifndef ZERO #if defined(AMD64) || defined(AARCH64) || defined(RISCV64) - SET_ADDRESS(_extrs, MacroAssembler::debug64); -#endif + ADD_EXTERNAL_ADDRESS(MacroAssembler::debug64); +#endif // defined(AMD64) || defined(AARCH64) || defined(RISCV64) +#if defined(AMD64) + ADD_EXTERNAL_ADDRESS(warning); +#endif // defined(AMD64) #endif // ZERO // addresses of fields in AOT runtime constants area address* p = AOTRuntimeConstants::field_addresses_list(); while (*p != nullptr) { - SET_ADDRESS(_extrs, *p++); + address to_add = (address)*p++; + ADD_EXTERNAL_ADDRESS(to_add); } - _extrs_complete = true; - log_debug(aot, codecache, init)("External addresses recorded"); + log_debug(aot, codecache, init)("External addresses opened and recorded"); + // allocate storage for stub entries + _stubs_addr = NEW_C_HEAP_ARRAY(address, _stubs_max, mtCode); + log_debug(aot, codecache, init)("Stub addresses opened"); } -static bool initializing_early_stubs = false; - -void AOTCodeAddressTable::init_early_stubs() { - if (_complete || initializing_early_stubs) return; // Done already - initializing_early_stubs = true; - _stubs_addr = NEW_C_HEAP_ARRAY(address, _stubs_max, mtCode); - _stubs_length = 0; - SET_ADDRESS(_stubs, StubRoutines::forward_exception_entry()); +void AOTCodeAddressTable::init_extrs2() { + assert(initializing_extrs && !_extrs_complete, + "invalid sequence for init_extrs2"); { - // Required by C1 blobs -#if defined(AMD64) && !defined(ZERO) - SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_flip()); - SET_ADDRESS(_stubs, StubRoutines::x86::d2l_fixup()); -#endif // AMD64 + ADD_EXTERNAL_ADDRESS(Continuation::prepare_thaw); // used by cont_thaw + ADD_EXTERNAL_ADDRESS(Continuation::thaw_entry()); // used by cont_thaw + ADD_EXTERNAL_ADDRESS(ContinuationEntry::thaw_call_pc_address()); // used by cont_preempt_stub } - - _early_stubs_complete = true; - log_info(aot, codecache, init)("Early stubs recorded"); + _extrs_complete = true; + initializing_extrs = false; + log_debug(aot, codecache, init)("External addresses recorded and closed"); } -static bool initializing_shared_blobs = false; - -void AOTCodeAddressTable::init_shared_blobs() { - if (_complete || initializing_shared_blobs) return; // Done already - initializing_shared_blobs = true; - address* blobs_addr = NEW_C_HEAP_ARRAY(address, _blobs_max, mtCode); - - // Divide _shared_blobs_addr array to chunks because they could be initialized in parrallel - _shared_blobs_addr = blobs_addr; - _C1_blobs_addr = _shared_blobs_addr + _shared_blobs_max; - - _shared_blobs_length = 0; - _C1_blobs_length = 0; - - // clear the address table - memset(blobs_addr, 0, sizeof(address)* _blobs_max); - - // Record addresses of generated code blobs - SET_ADDRESS(_shared_blobs, SharedRuntime::get_handle_wrong_method_stub()); - SET_ADDRESS(_shared_blobs, SharedRuntime::get_ic_miss_stub()); - SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack()); - SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception()); - SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_reexecution()); - SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception_in_tls()); -#if INCLUDE_JVMCI - if (EnableJVMCI) { - SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->uncommon_trap()); - SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap()); +void AOTCodeAddressTable::add_external_addresses(GrowableArray
& addresses) { + assert(initializing_extrs && !_extrs_complete, + "invalid sequence for add_external_addresses"); + for (int i = 0; i < addresses.length(); i++) { + ADD_EXTERNAL_ADDRESS(addresses.at(i)); } -#endif - - _shared_blobs_complete = true; - log_debug(aot, codecache, init)("Early shared blobs recorded"); - _complete = true; + log_debug(aot, codecache, init)("Recorded %d additional external addresses", + addresses.length()); } -void AOTCodeAddressTable::init_early_c1() { -#ifdef COMPILER1 - // Runtime1 Blobs - StubId id = StubInfo::stub_base(StubGroup::C1); - // include forward_exception in range we publish - StubId limit = StubInfo::next(StubId::c1_forward_exception_id); - for (; id != limit; id = StubInfo::next(id)) { - if (Runtime1::blob_for(id) == nullptr) { - log_info(aot, codecache, init)("C1 blob %s is missing", Runtime1::name_for(id)); - continue; - } - if (Runtime1::entry_for(id) == nullptr) { - log_info(aot, codecache, init)("C1 blob %s is missing entry", Runtime1::name_for(id)); - continue; - } - address entry = Runtime1::entry_for(id); - SET_ADDRESS(_C1_blobs, entry); - } -#endif // COMPILER1 - assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length); - _early_c1_complete = true; +void AOTCodeAddressTable::add_stub_entry(EntryId entry_id, address a) { + assert(_extrs_complete || initializing_extrs, + "recording stub entry address before external addresses complete"); + assert(!(StubInfo::is_shared(StubInfo::stub(entry_id)) && _shared_stubs_complete), "too late to add shared entry"); + assert(!(StubInfo::is_stubgen(StubInfo::stub(entry_id)) && _stubgen_stubs_complete), "too late to add stubgen entry"); + assert(!(StubInfo::is_c1(StubInfo::stub(entry_id)) && _c1_stubs_complete), "too late to add c1 entry"); + assert(!(StubInfo::is_c2(StubInfo::stub(entry_id)) && _c2_stubs_complete), "too late to add c2 entry"); + log_debug(aot, stubs)("Recording address 0x%p for %s entry %s", a, StubInfo::name(StubInfo::stubgroup(entry_id)), StubInfo::name(entry_id)); + int idx = static_cast(entry_id); + hash_address(a, _stubs_base + idx); + _stubs_addr[idx] = a; } -#undef SET_ADDRESS +void AOTCodeAddressTable::set_shared_stubs_complete() { + assert(!_shared_stubs_complete, "repeated close for shared stubs!"); + _shared_stubs_complete = true; + log_debug(aot, codecache, init)("Shared stubs closed"); +} -AOTCodeAddressTable::~AOTCodeAddressTable() { - if (_extrs_addr != nullptr) { - FREE_C_HEAP_ARRAY(address, _extrs_addr); - } - if (_stubs_addr != nullptr) { - FREE_C_HEAP_ARRAY(address, _stubs_addr); - } - if (_shared_blobs_addr != nullptr) { - FREE_C_HEAP_ARRAY(address, _shared_blobs_addr); - } +void AOTCodeAddressTable::set_c1_stubs_complete() { + assert(!_c1_stubs_complete, "repeated close for c1 stubs!"); + _c1_stubs_complete = true; + log_debug(aot, codecache, init)("C1 stubs closed"); +} + +void AOTCodeAddressTable::set_c2_stubs_complete() { + assert(!_c2_stubs_complete, "repeated close for c2 stubs!"); + _c2_stubs_complete = true; + log_debug(aot, codecache, init)("C2 stubs closed"); +} + +void AOTCodeAddressTable::set_stubgen_stubs_complete() { + assert(!_stubgen_stubs_complete, "repeated close for stubgen stubs!"); + _stubgen_stubs_complete = true; + log_debug(aot, codecache, init)("StubGen stubs closed"); } #ifdef PRODUCT #define MAX_STR_COUNT 200 #else -#define MAX_STR_COUNT 500 +#define MAX_STR_COUNT 2000 #endif #define _c_str_max MAX_STR_COUNT static const int _c_str_base = _all_max; @@ -1577,6 +2201,10 @@ void AOTCodeCache::load_strings() { if (strings_count == 0) { return; } + if (strings_count > MAX_STR_COUNT) { + fatal("Invalid strings_count loaded from AOT Code Cache: %d > MAX_STR_COUNT [%d]", strings_count, MAX_STR_COUNT); + return; + } uint strings_offset = _load_header->strings_offset(); uint* string_lengths = (uint*)addr(strings_offset); strings_offset += (strings_count * sizeof(uint)); @@ -1587,12 +2215,12 @@ void AOTCodeCache::load_strings() { char* p = NEW_C_HEAP_ARRAY(char, strings_size+1, mtCode); memcpy(p, addr(strings_offset), strings_size); _C_strings_buf = p; - assert(strings_count <= MAX_STR_COUNT, "sanity"); for (uint i = 0; i < strings_count; i++) { _C_strings[i] = p; uint len = string_lengths[i]; _C_strings_s[i] = i; _C_strings_id[i] = i; + log_trace(aot, codecache, stringtable)("load_strings: _C_strings[%d] " INTPTR_FORMAT " '%s'", i, p2i(p), p); p += len; } assert((uint)(p - _C_strings_buf) <= strings_size, "(" INTPTR_FORMAT " - " INTPTR_FORMAT ") = %d > %d ", p2i(p), p2i(_C_strings_buf), (uint)(p - _C_strings_buf), strings_size); @@ -1612,6 +2240,7 @@ int AOTCodeCache::store_strings() { } for (int i = 0; i < _C_strings_used; i++) { const char* str = _C_strings[_C_strings_s[i]]; + log_trace(aot, codecache, stringtable)("store_strings: _C_strings[%d] " INTPTR_FORMAT " '%s'", i, p2i(str), str); uint len = (uint)strlen(str) + 1; length += len; assert(len < 1000, "big string: %s", str); @@ -1639,7 +2268,7 @@ const char* AOTCodeCache::add_C_string(const char* str) { } const char* AOTCodeAddressTable::add_C_string(const char* str) { - if (_extrs_complete) { + if (_extrs_complete || initializing_extrs) { // Check previous strings address for (int i = 0; i < _C_strings_count; i++) { if (_C_strings_in[i] == str) { @@ -1667,7 +2296,7 @@ const char* AOTCodeAddressTable::add_C_string(const char* str) { int AOTCodeAddressTable::id_for_C_string(address str) { if (str == nullptr) { - return -1; + return BAD_ADDRESS_ID; } MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag); for (int i = 0; i < _C_strings_count; i++) { @@ -1677,6 +2306,7 @@ int AOTCodeAddressTable::id_for_C_string(address str) { assert(id < _C_strings_used, "%d >= %d", id , _C_strings_used); return id; // Found recorded } + log_trace(aot, codecache, stringtable)("id_for_C_string: _C_strings[%d ==> %d] " INTPTR_FORMAT " '%s'", i, _C_strings_used, p2i(str), str); // Not found in recorded, add new id = _C_strings_used++; _C_strings_s[id] = i; @@ -1684,7 +2314,7 @@ int AOTCodeAddressTable::id_for_C_string(address str) { return id; } } - return -1; + return BAD_ADDRESS_ID; } address AOTCodeAddressTable::address_for_C_string(int idx) { @@ -1702,7 +2332,7 @@ static int search_address(address addr, address* table, uint length) { } address AOTCodeAddressTable::address_for_id(int idx) { - assert(_extrs_complete, "AOT Code Cache VM runtime addresses table is not complete"); + assert(_extrs_complete || initializing_extrs, "AOT Code Cache VM runtime addresses table is not complete"); if (idx == -1) { return (address)-1; } @@ -1719,15 +2349,9 @@ address AOTCodeAddressTable::address_for_id(int idx) { if (/* id >= _extrs_base && */ id < _extrs_length) { return _extrs_addr[id - _extrs_base]; } - if (id >= _stubs_base && id < _stubs_base + _stubs_length) { + if (id >= _stubs_base && id < _c_str_base) { return _stubs_addr[id - _stubs_base]; } - if (id >= _shared_blobs_base && id < _shared_blobs_base + _shared_blobs_length) { - return _shared_blobs_addr[id - _shared_blobs_base]; - } - if (id >= _C1_blobs_base && id < _C1_blobs_base + _C1_blobs_length) { - return _C1_blobs_addr[id - _C1_blobs_base]; - } if (id >= _c_str_base && id < (_c_str_base + (uint)_C_strings_count)) { return address_for_C_string(id - _c_str_base); } @@ -1736,7 +2360,7 @@ address AOTCodeAddressTable::address_for_id(int idx) { } int AOTCodeAddressTable::id_for_address(address addr, RelocIterator reloc, CodeBlob* code_blob) { - assert(_extrs_complete, "AOT Code Cache VM runtime addresses table is not complete"); + assert(_extrs_complete || initializing_extrs, "AOT Code Cache VM runtime addresses table is not complete"); int id = -1; if (addr == (address)-1) { // Static call stub has jump to itself return id; @@ -1745,16 +2369,25 @@ int AOTCodeAddressTable::id_for_address(address addr, RelocIterator reloc, CodeB BarrierSet* bs = BarrierSet::barrier_set(); bool is_const_card_table_base = !UseG1GC && !UseShenandoahGC && bs->is_a(BarrierSet::CardTableBarrierSet); guarantee(!is_const_card_table_base || addr != ci_card_table_address_const(), "sanity"); - + // fast path for stubs and external addresses + if (_hash_table != nullptr) { + int *result = _hash_table->get(addr); + if (result != nullptr) { + id = *result; + log_trace(aot, codecache)("Address " INTPTR_FORMAT " retrieved from AOT Code Cache address hash table with index '%d'", + p2i(addr), id); + return id; + } + } // Seach for C string id = id_for_C_string(addr); - if (id >= 0) { + if (id != BAD_ADDRESS_ID) { return id + _c_str_base; } - if (StubRoutines::contains(addr)) { - // Search in stubs - id = search_address(addr, _stubs_addr, _stubs_length); - if (id < 0) { + if (StubRoutines::contains(addr) || CodeCache::find_blob(addr) != nullptr) { + // Search for a matching stub entry + id = search_address(addr, _stubs_addr, _stubs_max); + if (id == BAD_ADDRESS_ID) { StubCodeDesc* desc = StubCodeDesc::desc_for(addr); if (desc == nullptr) { desc = StubCodeDesc::desc_for(addr + frame::pc_return_offset); @@ -1765,51 +2398,39 @@ int AOTCodeAddressTable::id_for_address(address addr, RelocIterator reloc, CodeB return id + _stubs_base; } } else { - CodeBlob* cb = CodeCache::find_blob(addr); - if (cb != nullptr) { - // Search in code blobs - int id_base = _shared_blobs_base; - id = search_address(addr, _shared_blobs_addr, _blobs_max); - if (id < 0) { - assert(false, "Address " INTPTR_FORMAT " for Blob:%s is missing in AOT Code Cache addresses table", p2i(addr), cb->name()); + // Search in runtime functions + id = search_address(addr, _extrs_addr, _extrs_length); + if (id == BAD_ADDRESS_ID) { + ResourceMark rm; + const int buflen = 1024; + char* func_name = NEW_RESOURCE_ARRAY(char, buflen); + int offset = 0; + if (os::dll_address_to_function_name(addr, func_name, buflen, &offset)) { + if (offset > 0) { + // Could be address of C string + uint dist = (uint)pointer_delta(addr, (address)os::init, 1); + log_debug(aot, codecache)("Address " INTPTR_FORMAT " (offset %d) for runtime target '%s' is missing in AOT Code Cache addresses table", + p2i(addr), dist, (const char*)addr); + assert(dist > (uint)(_all_max + MAX_STR_COUNT), "change encoding of distance"); + return dist; + } +#ifdef ASSERT + reloc.print_current_on(tty); + code_blob->print_on(tty); + code_blob->print_code_on(tty); + assert(false, "Address " INTPTR_FORMAT " for runtime target '%s+%d' is missing in AOT Code Cache addresses table", p2i(addr), func_name, offset); +#endif } else { - return id_base + id; +#ifdef ASSERT + reloc.print_current_on(tty); + code_blob->print_on(tty); + code_blob->print_code_on(tty); + os::find(addr, tty); + assert(false, "Address " INTPTR_FORMAT " for /('%s') is missing in AOT Code Cache addresses table", p2i(addr), (const char*)addr); +#endif } } else { - // Search in runtime functions - id = search_address(addr, _extrs_addr, _extrs_length); - if (id < 0) { - ResourceMark rm; - const int buflen = 1024; - char* func_name = NEW_RESOURCE_ARRAY(char, buflen); - int offset = 0; - if (os::dll_address_to_function_name(addr, func_name, buflen, &offset)) { - if (offset > 0) { - // Could be address of C string - uint dist = (uint)pointer_delta(addr, (address)os::init, 1); - log_debug(aot, codecache)("Address " INTPTR_FORMAT " (offset %d) for runtime target '%s' is missing in AOT Code Cache addresses table", - p2i(addr), dist, (const char*)addr); - assert(dist > (uint)(_all_max + MAX_STR_COUNT), "change encoding of distance"); - return dist; - } -#ifdef ASSERT - reloc.print_current_on(tty); - code_blob->print_on(tty); - code_blob->print_code_on(tty); - assert(false, "Address " INTPTR_FORMAT " for runtime target '%s+%d' is missing in AOT Code Cache addresses table", p2i(addr), func_name, offset); -#endif - } else { -#ifdef ASSERT - reloc.print_current_on(tty); - code_blob->print_on(tty); - code_blob->print_code_on(tty); - os::find(addr, tty); - assert(false, "Address " INTPTR_FORMAT " for /('%s') is missing in AOT Code Cache addresses table", p2i(addr), (const char*)addr); -#endif - } - } else { - return _extrs_base + id; - } + return _extrs_base + id; } } return id; @@ -1876,3 +2497,162 @@ void AOTCodeCache::print_on(outputStream* st) { } } } + +// methods for managing entries in multi-stub blobs + + +AOTStubData::AOTStubData(BlobId blob_id) : + _blob_id(blob_id), + _cached_blob(nullptr), + _stub_cnt(0), + _ranges(nullptr), + _flags(0) { + assert(StubInfo::is_stubgen(blob_id), + "AOTStubData expects a multi-stub blob not %s", + StubInfo::name(blob_id)); + + // we cannot save or restore preuniversestubs because the cache + // cannot be accessed before initialising the universe + if (blob_id == BlobId::stubgen_preuniverse_id) { + // invalidate any attempt to use this + _flags = INVALID; + return; + } + if (AOTCodeCache::is_on()) { + _flags = OPEN; + // allow update of stub entry addresses + if (AOTCodeCache::is_using_stub()) { + // allow stub loading + _flags |= USING; + } + if (AOTCodeCache::is_dumping_stub()) { + // allow stub saving + _flags |= DUMPING; + } + // we need to track all the blob's entries + _stub_cnt = StubInfo::stub_count(_blob_id); + _ranges = NEW_C_HEAP_ARRAY(StubAddrRange, _stub_cnt, mtCode); + for (int i = 0; i < _stub_cnt; i++) { + _ranges[i].default_init(); + } + } +} + +bool AOTStubData::load_code_blob() { + assert(is_using(), "should not call"); + assert(!is_invalid() && _cached_blob == nullptr, "repeated init"); + _cached_blob = AOTCodeCache::load_code_blob(AOTCodeEntry::StubGenBlob, + _blob_id, + this); + if (_cached_blob == nullptr) { + set_invalid(); + return false; + } else { + return true; + } +} + +bool AOTStubData::store_code_blob(CodeBlob& new_blob, CodeBuffer *code_buffer) { + assert(is_dumping(), "should not call"); + assert(_cached_blob == nullptr, "should not be loading and storing!"); + if (!AOTCodeCache::store_code_blob(new_blob, + AOTCodeEntry::StubGenBlob, + _blob_id, this, code_buffer)) { + set_invalid(); + return false; + } else { + return true; + } +} + +address AOTStubData::load_archive_data(StubId stub_id, address& end, GrowableArray
* entries, GrowableArray
* extras) { + assert(StubInfo::blob(stub_id) == _blob_id, "sanity check"); + if (is_invalid()) { + return nullptr; + } + int idx = StubInfo::stubgen_offset_in_blob(_blob_id, stub_id); + assert(idx >= 0 && idx < _stub_cnt, "invalid index %d for stub count %d", idx, _stub_cnt); + // ensure we have a valid associated range + StubAddrRange &range = _ranges[idx]; + int base = range.start_index(); + if (base < 0) { +#ifdef DEBUG + // reset index so we can idenitfy which ones we failed to find + range.init_entry(-2, 0); +#endif + return nullptr; + } + int count = range.count(); + assert(base >= 0, "sanity"); + assert(count >= 2, "sanity"); + // first two saved addresses are start and end + address start = _address_array.at(base); + end = _address_array.at(base + 1); + assert(start != nullptr, "failed to load start address of stub %s", StubInfo::name(stub_id)); + assert(end != nullptr, "failed to load end address of stub %s", StubInfo::name(stub_id)); + assert(start < end, "start address %p should be less than end %p address for stub %s", start, end, StubInfo::name(stub_id)); + + int entry_count = StubInfo::entry_count(stub_id); + // the address count must at least include the stub start, end + // and secondary addresses + assert(count >= entry_count + 1, "stub %s requires %d saved addresses but only has %d", StubInfo::name(stub_id), entry_count + 1, count); + + // caller must retrieve secondary entries if and only if they exist + assert((entry_count == 1) == (entries == nullptr), "trying to retrieve wrong number of entries for stub %s", StubInfo::name(stub_id)); + int index = 2; + if (entries != nullptr) { + assert(entries->length() == 0, "non-empty array when retrieving entries for stub %s!", StubInfo::name(stub_id)); + while (index < entry_count + 1) { + address entry = _address_array.at(base + index++); + assert(entry == nullptr || (start < entry && entry < end), "entry address %p not in range (%p, %p) for stub %s", entry, start, end, StubInfo::name(stub_id)); + entries->append(entry); + } + } + // caller must retrieve extras if and only if they exist + assert((index < count) == (extras != nullptr), "trying to retrieve wrong number of extras for stub %s", StubInfo::name(stub_id)); + if (extras != nullptr) { + assert(extras->length() == 0, "non-empty array when retrieving extras for stub %s!", StubInfo::name(stub_id)); + while (index < count) { + address extra = _address_array.at(base + index++); + assert(extra == nullptr || (start <= extra && extra <= end), "extra address %p not in range (%p, %p) for stub %s", extra, start, end, StubInfo::name(stub_id)); + extras->append(extra); + } + } + + return start; +} + +void AOTStubData::store_archive_data(StubId stub_id, address start, address end, GrowableArray
* entries, GrowableArray
* extras) { + assert(StubInfo::blob(stub_id) == _blob_id, "sanity check"); + assert(start != nullptr, "start address cannot be null"); + assert(end != nullptr, "end address cannot be null"); + assert(start < end, "start address %p should be less than end %p address for stub %s", start, end, StubInfo::name(stub_id)); + int idx = StubInfo::stubgen_offset_in_blob(_blob_id, stub_id); + StubAddrRange& range = _ranges[idx]; + assert(range.start_index() == -1, "sanity"); + int base = _address_array.length(); + assert(base >= 0, "sanity"); + // first two saved addresses are start and end + _address_array.append(start); + _address_array.append(end); + // caller must save secondary entries if and only if they exist + assert((StubInfo::entry_count(stub_id) == 1) == (entries == nullptr), "trying to save wrong number of entries for stub %s", StubInfo::name(stub_id)); + if (entries != nullptr) { + assert(entries->length() == StubInfo::entry_count(stub_id) - 1, "incorrect entry count %d when saving entries for stub %s!", entries->length(), StubInfo::name(stub_id)); + for (int i = 0; i < entries->length(); i++) { + address entry = entries->at(i); + assert(entry == nullptr || (start < entry && entry < end), "entry address %p not in range (%p, %p) for stub %s", entry, start, end, StubInfo::name(stub_id)); + _address_array.append(entry); + } + } + // caller may wish to save extra addresses + if (extras != nullptr) { + for (int i = 0; i < extras->length(); i++) { + address extra = extras->at(i); + // handler range end may be end -- it gets restored as nullptr + assert(extra == nullptr || (start <= extra && extra <= end), "extra address %p not in range (%p, %p) for stub %s", extra, start, end, StubInfo::name(stub_id)); + _address_array.append(extra); + } + } + range.init_entry(base, _address_array.length() - base); +} diff --git a/src/hotspot/share/code/aotCodeCache.hpp b/src/hotspot/share/code/aotCodeCache.hpp index 85f8b47920f..5b773a986f1 100644 --- a/src/hotspot/share/code/aotCodeCache.hpp +++ b/src/hotspot/share/code/aotCodeCache.hpp @@ -25,8 +25,10 @@ #ifndef SHARE_CODE_AOTCODECACHE_HPP #define SHARE_CODE_AOTCODECACHE_HPP +#include "gc/shared/collectedHeap.hpp" #include "gc/shared/gc_globals.hpp" #include "runtime/stubInfo.hpp" +#include "utilities/hashTable.hpp" /* * AOT Code Cache collects code from Code Cache and corresponding metadata @@ -38,6 +40,7 @@ class CodeBuffer; class RelocIterator; class AOTCodeCache; +class AOTCodeReader; class AdapterBlob; class ExceptionBlob; class ImmutableOopMapSet; @@ -53,6 +56,7 @@ enum CompLevel : signed char; Fn(SharedBlob) \ Fn(C1Blob) \ Fn(C2Blob) \ + Fn(StubGenBlob) \ // Descriptor of AOT Code Cache's entry class AOTCodeEntry { @@ -114,49 +118,57 @@ public: address dumptime_content_start_addr() const { return _dumptime_content_start_addr; } static bool is_valid_entry_kind(Kind kind) { return kind > None && kind < Kind_count; } - static bool is_blob(Kind kind) { return kind == SharedBlob || kind == C1Blob || kind == C2Blob; } + static bool is_blob(Kind kind) { return kind == SharedBlob || kind == C1Blob || kind == C2Blob || kind == StubGenBlob; } + static bool is_single_stub_blob(Kind kind) { return kind == SharedBlob || kind == C1Blob || kind == C2Blob; } + static bool is_multi_stub_blob(Kind kind) { return kind == StubGenBlob; } static bool is_adapter(Kind kind) { return kind == Adapter; } }; +// we use a hash table to speed up translation of external addresses +// or stub addresses to their corresponding indexes when dumping stubs +// or nmethods to the AOT code cache. +class AOTCodeAddressHashTable : public HashTable< + address, + int, + 36137, // prime number + AnyObj::C_HEAP, + mtCode> {}; + // Addresses of stubs, blobs and runtime finctions called from compiled code. class AOTCodeAddressTable : public CHeapObj { private: address* _extrs_addr; address* _stubs_addr; - address* _shared_blobs_addr; - address* _C1_blobs_addr; uint _extrs_length; - uint _stubs_length; - uint _shared_blobs_length; - uint _C1_blobs_length; bool _extrs_complete; - bool _early_stubs_complete; - bool _shared_blobs_complete; - bool _early_c1_complete; - bool _complete; + bool _shared_stubs_complete; + bool _c1_stubs_complete; + bool _c2_stubs_complete; + bool _stubgen_stubs_complete; + AOTCodeAddressHashTable* _hash_table; + void hash_address(address addr, int idx); public: AOTCodeAddressTable() : _extrs_addr(nullptr), _stubs_addr(nullptr), - _shared_blobs_addr(nullptr), - _C1_blobs_addr(nullptr), _extrs_length(0), - _stubs_length(0), - _shared_blobs_length(0), - _C1_blobs_length(0), _extrs_complete(false), - _early_stubs_complete(false), - _shared_blobs_complete(false), - _early_c1_complete(false), - _complete(false) + _shared_stubs_complete(false), + _c1_stubs_complete(false), + _c2_stubs_complete(false), + _stubgen_stubs_complete(false), + _hash_table(nullptr) { } - ~AOTCodeAddressTable(); void init_extrs(); - void init_early_stubs(); - void init_shared_blobs(); - void init_early_c1(); + void init_extrs2(); + void add_stub_entry(EntryId entry_id, address entry); + void add_external_addresses(GrowableArray
& addresses) NOT_CDS_RETURN; + void set_shared_stubs_complete(); + void set_c1_stubs_complete(); + void set_c2_stubs_complete(); + void set_stubgen_stubs_complete(); const char* add_C_string(const char* str); int id_for_C_string(address str); address address_for_C_string(int idx); @@ -164,30 +176,217 @@ public: address address_for_id(int id); }; +// Auxiliary class used by AOTStubData to locate addresses owned by a +// stub in the _address_array. + +class StubAddrRange { +private: + // Index of the first address owned by a stub or -1 if none present + int _start_index; + // Total number of addresses owned by a stub, including in order: + // start address for stub code and first entry, (exclusive) end + // address for stub code, all secondary entry addresses, any + // auxiliary addresses + uint _naddr; + public: + StubAddrRange() : _start_index(-1), _naddr(0) {} + int start_index() { return _start_index; } + int count() { return _naddr; } + + void default_init() { + _start_index = -1; + _naddr = 0; + } + + void init_entry(int start_index, int naddr) { + _start_index = start_index; + _naddr = naddr; + } +}; + +// class used to save and restore details of stubs embedded in a +// multi-stub (StubGen) blob + +class AOTStubData : public StackObj { + friend class AOTCodeCache; + friend class AOTCodeReader; +private: + BlobId _blob_id; // must be a stubgen blob id + // whatever buffer blob was successfully loaded from the AOT cache + // following a call to load_code_blob or nullptr + CodeBlob *_cached_blob; + // Array of addresses owned by stubs. Each stub appends addresses to + // this array as a block, whether at the end of generation or at the + // end of restoration from the cache. The first two addresses in + // each block are the "start" and "end2 address of the stub. Any + // other visible addresses located within the range [start,end) + // follow, either extra entries, data addresses or SEGV-protected + // subrange start, end and handler addresses. In the special case + // that the SEGV handler address is the (external) common address + // handler the array will hold value nullptr. + GrowableArray
_address_array; + // count of how many stubs exist in the current blob (not all of + // which may actually be generated) + int _stub_cnt; + // array identifying range of entries in _address_array for each stub + // indexed by offset of stub in blob + StubAddrRange* _ranges; + + // flags indicating whether the AOT code cache is open and, if so, + // whether we are loading or storing stubs or have encountered any + // invalid stubs. + enum Flags { + OPEN = 1 << 0, // cache is open + USING = 1 << 1, // open and loading stubs + DUMPING = 1 << 2, // open and storing stubs + INVALID = 1 << 3, // found invalid stub when loading + }; + + uint32_t _flags; + + void set_invalid() { _flags |= INVALID; } + + StubAddrRange& get_range(int idx) const { return _ranges[idx]; } + GrowableArray
& address_array() { return _address_array; } + // accessor for entry/auxiliary addresses defaults to start entry +public: + AOTStubData(BlobId blob_id) NOT_CDS({}); + + ~AOTStubData() CDS_ONLY({FREE_C_HEAP_ARRAY(StubAddrRange, _ranges);}) NOT_CDS({}) + + bool is_open() CDS_ONLY({ return (_flags & OPEN) != 0; }) NOT_CDS_RETURN_(false); + bool is_using() CDS_ONLY({ return (_flags & USING) != 0; }) NOT_CDS_RETURN_(false); + bool is_dumping() CDS_ONLY({ return (_flags & DUMPING) != 0; }) NOT_CDS_RETURN_(false); + bool is_invalid() CDS_ONLY({ return (_flags & INVALID) != 0; }) NOT_CDS_RETURN_(false); + + BlobId blob_id() { return _blob_id; } + bool load_code_blob() NOT_CDS_RETURN_(true); + bool store_code_blob(CodeBlob& new_blob, CodeBuffer *code_buffer) NOT_CDS_RETURN_(true); + + address load_archive_data(StubId stub_id, address &end, GrowableArray
* entries = nullptr, GrowableArray
* extras = nullptr) NOT_CDS_RETURN_(nullptr); + void store_archive_data(StubId stub_id, address start, address end, GrowableArray
* entries = nullptr, GrowableArray
* extras = nullptr) NOT_CDS_RETURN; + + const AOTStubData* as_const() { return (const AOTStubData*)this; } +}; + +#define AOTCODECACHE_CONFIGS_GENERIC_DO(do_var, do_fun) \ + do_var(int, AllocateInstancePrefetchLines) /* stubs and nmethods */ \ + do_var(int, AllocatePrefetchDistance) /* stubs and nmethods */ \ + do_var(int, AllocatePrefetchLines) /* stubs and nmethods */ \ + do_var(int, AllocatePrefetchStepSize) /* stubs and nmethods */ \ + do_var(uint, CodeEntryAlignment) /* array copy stubs and nmethods */ \ + do_var(bool, UseCompressedOops) /* stubs and nmethods */ \ + do_var(bool, EnableContended) /* nmethods */ \ + do_var(intx, OptoLoopAlignment) /* array copy stubs and nmethods */ \ + do_var(bool, RestrictContended) /* nmethods */ \ + do_var(bool, UseAESCTRIntrinsics) \ + do_var(bool, UseAESIntrinsics) \ + do_var(bool, UseBASE64Intrinsics) \ + do_var(bool, UseChaCha20Intrinsics) \ + do_var(bool, UseCRC32CIntrinsics) \ + do_var(bool, UseCRC32Intrinsics) \ + do_var(bool, UseDilithiumIntrinsics) \ + do_var(bool, UseGHASHIntrinsics) \ + do_var(bool, UseKyberIntrinsics) \ + do_var(bool, UseMD5Intrinsics) \ + do_var(bool, UsePoly1305Intrinsics) \ + do_var(bool, UseSecondarySupersTable) \ + do_var(bool, UseSHA1Intrinsics) \ + do_var(bool, UseSHA256Intrinsics) \ + do_var(bool, UseSHA3Intrinsics) \ + do_var(bool, UseSHA512Intrinsics) \ + do_var(bool, UseVectorizedMismatchIntrinsic) \ + do_fun(int, CompressedKlassPointers_shift, CompressedKlassPointers::shift()) \ + do_fun(int, CompressedOops_shift, CompressedOops::shift()) \ + do_fun(bool, JavaAssertions_systemClassDefault, JavaAssertions::systemClassDefault()) \ + do_fun(bool, JavaAssertions_userClassDefault, JavaAssertions::userClassDefault()) \ + do_fun(CollectedHeap::Name, Universe_heap_kind, Universe::heap()->kind()) \ + // END + +#ifdef COMPILER2 +#define AOTCODECACHE_CONFIGS_COMPILER2_DO(do_var, do_fun) \ + do_var(intx, ArrayOperationPartialInlineSize) /* array copy stubs and nmethods */ \ + do_var(intx, MaxVectorSize) /* array copy/fill stubs */ \ + do_var(bool, UseMontgomeryMultiplyIntrinsic) \ + do_var(bool, UseMontgomerySquareIntrinsic) \ + do_var(bool, UseMulAddIntrinsic) \ + do_var(bool, UseMultiplyToLenIntrinsic) \ + do_var(bool, UseSquareToLenIntrinsic) \ + // END +#else +#define AOTCODECACHE_CONFIGS_COMPILER2_DO(do_var, do_fun) +#endif + +#if INCLUDE_JVMCI +#define AOTCODECACHE_CONFIGS_JVMCI_DO(do_var, do_fun) \ + do_var(bool, EnableJVMCI) /* adapters and nmethods */ \ + // END +#else +#define AOTCODECACHE_CONFIGS_JVMCI_DO(do_var, do_fun) +#endif + +#if defined(AARCH64) && !defined(ZERO) +#define AOTCODECACHE_CONFIGS_AARCH64_DO(do_var, do_fun) \ + do_var(intx, BlockZeroingLowLimit) /* array fill stubs */ \ + do_var(intx, PrefetchCopyIntervalInBytes) /* array copy stubs */ \ + do_var(int, SoftwarePrefetchHintDistance) /* array fill stubs */ \ + do_var(bool, UseBlockZeroing) \ + do_var(bool, UseLSE) /* stubs and nmethods */ \ + do_var(uint, UseSVE) /* stubs and nmethods */ \ + do_var(bool, UseSecondarySupersCache) \ + do_var(bool, UseSIMDForArrayEquals) /* array copy stubs and nmethods */ \ + do_var(bool, UseSIMDForBigIntegerShiftIntrinsics) \ + do_var(bool, UseSIMDForMemoryOps) /* array copy stubs and nmethods */ \ + do_var(bool, UseSIMDForSHA3Intrinsic) /* SHA3 stubs */ \ + do_var(bool, UseSimpleArrayEquals) \ + // END +#else +#define AOTCODECACHE_CONFIGS_AARCH64_DO(do_var, do_fun) +#endif + +#if defined(X86) && !defined(ZERO) +#define AOTCODECACHE_CONFIGS_X86_DO(do_var, do_fun) \ + do_var(int, AVX3Threshold) /* array copy stubs and nmethods */ \ + do_var(bool, EnableX86ECoreOpts) /* nmethods */ \ + do_var(int, UseAVX) /* array copy stubs and nmethods */ \ + do_var(bool, UseAPX) /* nmethods and stubs */ \ + do_var(bool, UseLibmIntrinsic) \ + do_var(bool, UseIntPolyIntrinsics) \ + // END +#else +#define AOTCODECACHE_CONFIGS_X86_DO(do_var, do_fun) +#endif + +#define AOTCODECACHE_CONFIGS_DO(do_var, do_fun) \ + AOTCODECACHE_CONFIGS_GENERIC_DO(do_var, do_fun) \ + AOTCODECACHE_CONFIGS_COMPILER2_DO(do_var, do_fun) \ + AOTCODECACHE_CONFIGS_JVMCI_DO(do_var, do_fun) \ + AOTCODECACHE_CONFIGS_AARCH64_DO(do_var, do_fun) \ + AOTCODECACHE_CONFIGS_X86_DO(do_var, do_fun) \ + // END + +#define AOTCODECACHE_DECLARE_VAR(type, name) type _saved_ ## name; +#define AOTCODECACHE_DECLARE_FUN(type, name, func) type _saved_ ## name; + class AOTCodeCache : public CHeapObj { // Classes used to describe AOT code cache. protected: class Config { - address _compressedOopBase; - uint _compressedOopShift; - uint _compressedKlassShift; - uint _contendedPaddingWidth; - uint _gc; - enum Flags { - none = 0, - debugVM = 1, - compressedOops = 2, - compressedClassPointers = 4, - useTLAB = 8, - systemClassAssertions = 16, - userClassAssertions = 32, - enableContendedPadding = 64, - restrictContendedPadding = 128 - }; - uint _flags; - uint _cpu_features_offset; // offset in the cache where cpu features are stored + AOTCODECACHE_CONFIGS_DO(AOTCODECACHE_DECLARE_VAR, AOTCODECACHE_DECLARE_FUN) + // Special configs that cannot be checked with macros + address _compressedOopBase; + +#if defined(X86) && !defined(ZERO) + bool _useUnalignedLoadStores; +#endif + +#if defined(AARCH64) && !defined(ZERO) + bool _avoidUnalignedAccesses; +#endif + + uint _cpu_features_offset; // offset in the cache where cpu features are stored public: void record(uint cpu_features_offset); bool verify_cpu_features(AOTCodeCache* cache) const; @@ -207,17 +406,18 @@ protected: uint _entries_offset; // offset of AOTCodeEntry array describing entries uint _adapters_count; uint _shared_blobs_count; + uint _stubgen_blobs_count; uint _C1_blobs_count; uint _C2_blobs_count; Config _config; // must be the last element as there is trailing data stored immediately after Config public: void init(uint cache_size, - uint strings_count, uint strings_offset, - uint entries_count, uint entries_offset, - uint adapters_count, uint shared_blobs_count, - uint C1_blobs_count, uint C2_blobs_count, - uint cpu_features_offset) { + uint strings_count, uint strings_offset, + uint entries_count, uint entries_offset, + uint adapters_count, uint shared_blobs_count, + uint stubgen_blobs_count, uint C1_blobs_count, + uint C2_blobs_count, uint cpu_features_offset) { _version = AOT_CODE_VERSION; _cache_size = cache_size; _strings_count = strings_count; @@ -226,6 +426,7 @@ protected: _entries_offset = entries_offset; _adapters_count = adapters_count; _shared_blobs_count = shared_blobs_count; + _stubgen_blobs_count = stubgen_blobs_count; _C1_blobs_count = C1_blobs_count; _C2_blobs_count = C2_blobs_count; _config.record(cpu_features_offset); @@ -238,6 +439,7 @@ protected: uint entries_count() const { return _entries_count; } uint entries_offset() const { return _entries_offset; } uint adapters_count() const { return _adapters_count; } + uint stubgen_blobs_count() const { return _stubgen_blobs_count; } uint shared_blobs_count() const { return _shared_blobs_count; } uint C1_blobs_count() const { return _C1_blobs_count; } uint C2_blobs_count() const { return _C2_blobs_count; } @@ -260,7 +462,6 @@ private: uint _store_size; // Used when writing cache bool _for_use; // AOT cache is open for using AOT code bool _for_dump; // AOT cache is open for dumping AOT code - bool _closing; // Closing cache file bool _failed; // Failed read/write to/from cache (cache is broken?) bool _lookup_failed; // Failed to lookup for info (skip only this code load) @@ -288,9 +489,9 @@ private: void clear_lookup_failed() { _lookup_failed = false; } bool lookup_failed() const { return _lookup_failed; } + void add_stub_entry(EntryId entry_id, address entry) NOT_CDS_RETURN; public: AOTCodeCache(bool is_dumping, bool is_using); - ~AOTCodeCache(); const char* cache_buffer() const { return _load_buffer; } bool failed() const { return _failed; } @@ -304,9 +505,12 @@ public: void load_strings(); int store_strings(); - static void init_early_stubs_table() NOT_CDS_RETURN; - static void init_shared_blobs_table() NOT_CDS_RETURN; - static void init_early_c1_table() NOT_CDS_RETURN; + static void set_shared_stubs_complete() NOT_CDS_RETURN; + static void set_c1_stubs_complete() NOT_CDS_RETURN ; + static void set_c2_stubs_complete() NOT_CDS_RETURN; + static void set_stubgen_stubs_complete() NOT_CDS_RETURN; + + void add_stub_entries(StubId stub_id, address start, GrowableArray
*entries = nullptr, int offset = -1) NOT_CDS_RETURN; address address_for_C_string(int idx) const { return _table->address_for_C_string(idx); } address address_for_id(int id) const { return _table->address_for_id(id); } @@ -314,8 +518,6 @@ public: bool for_use() const { return _for_use && !_failed; } bool for_dump() const { return _for_dump && !_failed; } - bool closing() const { return _closing; } - AOTCodeEntry* add_entry() { _store_entries_cnt++; _store_entries -= 1; @@ -328,22 +530,41 @@ public: bool finish_write(); - bool write_relocations(CodeBlob& code_blob); + bool write_relocations(CodeBlob& code_blob, RelocIterator& iter); bool write_oop_map_set(CodeBlob& cb); + bool write_stub_data(CodeBlob& blob, AOTStubData *stub_data); #ifndef PRODUCT bool write_asm_remarks(CodeBlob& cb); bool write_dbg_strings(CodeBlob& cb); #endif // PRODUCT +private: + // internal private API to save and restore blobs + static bool store_code_blob(CodeBlob& blob, + AOTCodeEntry::Kind entry_kind, + uint id, + const char* name, + AOTStubData* stub_data, + CodeBuffer* code_buffer) NOT_CDS_RETURN_(false); + + static CodeBlob* load_code_blob(AOTCodeEntry::Kind kind, + uint id, + const char* name, + AOTStubData* stub_data) NOT_CDS_RETURN_(nullptr); + +public: // save and restore API for non-enumerable code blobs static bool store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, - uint id, const char* name) NOT_CDS_RETURN_(false); + uint id, + const char* name) NOT_CDS_RETURN_(false); static CodeBlob* load_code_blob(AOTCodeEntry::Kind kind, uint id, const char* name) NOT_CDS_RETURN_(nullptr); // save and restore API for enumerable code blobs + + // API for single-stub blobs static bool store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, BlobId id) NOT_CDS_RETURN_(false); @@ -351,6 +572,22 @@ public: static CodeBlob* load_code_blob(AOTCodeEntry::Kind kind, BlobId id) NOT_CDS_RETURN_(nullptr); + // API for multi-stub blobs -- for use by class StubGenerator. + + static bool store_code_blob(CodeBlob& blob, + AOTCodeEntry::Kind kind, + BlobId id, + AOTStubData* stub_data, + CodeBuffer *code_buffer) NOT_CDS_RETURN_(false); + + static CodeBlob* load_code_blob(AOTCodeEntry::Kind kind, + BlobId id, + AOTStubData* stub_data) NOT_CDS_RETURN_(nullptr); + + static void publish_external_addresses(GrowableArray
& addresses) NOT_CDS_RETURN; + // publish all entries for a code blob in code cache address table + static void publish_stub_addresses(CodeBlob &code_blob, BlobId id, AOTStubData *stub_data) NOT_CDS_RETURN; + static uint store_entries_cnt() { if (is_on_for_dump()) { return cache()->_store_entries_cnt; @@ -372,11 +609,16 @@ private: return true; } public: + // marker used where an address offset needs to be stored for later + // retrieval and the address turns out to be null + static const uint NULL_ADDRESS_MARKER = UINT_MAX; + static AOTCodeCache* cache() { assert(_passed_init2, "Too early to ask"); return _cache; } static void initialize() NOT_CDS_RETURN; static void init2() NOT_CDS_RETURN; - static void close() NOT_CDS_RETURN; - static bool is_on() CDS_ONLY({ return cache() != nullptr && !_cache->closing(); }) NOT_CDS_RETURN_(false); + static void init3() NOT_CDS_RETURN; + static void dump() NOT_CDS_RETURN; + static bool is_on() CDS_ONLY({ return cache() != nullptr; }) NOT_CDS_RETURN_(false); static bool is_on_for_use() CDS_ONLY({ return is_on() && _cache->for_use(); }) NOT_CDS_RETURN_(false); static bool is_on_for_dump() CDS_ONLY({ return is_on() && _cache->for_dump(); }) NOT_CDS_RETURN_(false); static bool is_dumping_stub() NOT_CDS_RETURN_(false); @@ -395,7 +637,7 @@ public: // Concurent AOT code reader class AOTCodeReader { private: - const AOTCodeCache* _cache; + AOTCodeCache* _cache; const AOTCodeEntry* _entry; const char* _load_buffer; // Loaded cached code buffer uint _read_position; // Position in _load_buffer @@ -408,19 +650,33 @@ private: void clear_lookup_failed() { _lookup_failed = false; } bool lookup_failed() const { return _lookup_failed; } - AOTCodeEntry* aot_code_entry() { return (AOTCodeEntry*)_entry; } -public: - AOTCodeReader(AOTCodeCache* cache, AOTCodeEntry* entry); + // Values used by restore(code_blob). + // They should be set before calling it. + const char* _name; + address _reloc_data; + int _reloc_count; + ImmutableOopMapSet* _oop_maps; + AOTCodeEntry::Kind _entry_kind; + int _id; + AOTStubData* _stub_data; - CodeBlob* compile_code_blob(const char* name); + AOTCodeEntry* aot_code_entry() { return (AOTCodeEntry*)_entry; } ImmutableOopMapSet* read_oop_map_set(); + void read_stub_data(CodeBlob* code_blob, AOTStubData *stub_data); - void fix_relocations(CodeBlob* code_blob); + void fix_relocations(CodeBlob* code_blob, RelocIterator& iter); #ifndef PRODUCT void read_asm_remarks(AsmRemarks& asm_remarks); void read_dbg_strings(DbgStrings& dbg_strings); #endif // PRODUCT + +public: + AOTCodeReader(AOTCodeCache* cache, AOTCodeEntry* entry); + + CodeBlob* compile_code_blob(const char* name, AOTCodeEntry::Kind entry_kind, int id, AOTStubData* stub_data = nullptr); + + void restore(CodeBlob* code_blob); }; // code cache internal runtime constants area used by AOT code diff --git a/src/hotspot/share/code/codeBlob.cpp b/src/hotspot/share/code/codeBlob.cpp index fcc0b42a461..e0c286937d0 100644 --- a/src/hotspot/share/code/codeBlob.cpp +++ b/src/hotspot/share/code/codeBlob.cpp @@ -22,6 +22,7 @@ * */ +#include "code/aotCodeCache.hpp" #include "code/codeBlob.hpp" #include "code/codeCache.hpp" #include "code/relocInfo.hpp" @@ -39,6 +40,7 @@ #include "prims/forte.hpp" #include "prims/jvmtiExport.hpp" #include "runtime/handles.inline.hpp" +#include "runtime/icache.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/javaFrameAnchor.hpp" #include "runtime/jniHandles.inline.hpp" @@ -77,8 +79,10 @@ const BufferBlob::Vptr BufferBlob::_vpntr; const RuntimeStub::Vptr RuntimeStub::_vpntr; const SingletonBlob::Vptr SingletonBlob::_vpntr; const DeoptimizationBlob::Vptr DeoptimizationBlob::_vpntr; +const SafepointBlob::Vptr SafepointBlob::_vpntr; #ifdef COMPILER2 const ExceptionBlob::Vptr ExceptionBlob::_vpntr; +const UncommonTrapBlob::Vptr UncommonTrapBlob::_vpntr; #endif // COMPILER2 const UpcallStub::Vptr UpcallStub::_vpntr; @@ -188,22 +192,6 @@ CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, int size, uint16_t heade assert(_mutable_data == blob_end(), "sanity"); } -void CodeBlob::restore_mutable_data(address reloc_data) { - // Relocation data is now stored as part of the mutable data area; allocate it before copy relocations - if (_mutable_data_size > 0) { - _mutable_data = (address)os::malloc(_mutable_data_size, mtCode); - if (_mutable_data == nullptr) { - vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "codebuffer: no space for mutable data"); - } - } else { - _mutable_data = blob_end(); // default value - } - if (_relocation_size > 0) { - assert(_mutable_data_size > 0, "relocation is part of mutable data section"); - memcpy((address)relocation_begin(), reloc_data, relocation_size()); - } -} - void CodeBlob::purge() { assert(_mutable_data != nullptr, "should never be null"); if (_mutable_data != blob_end()) { @@ -240,6 +228,23 @@ void CodeBlob::print_code_on(outputStream* st) { Disassembler::decode(this, st); } +#if INCLUDE_CDS +void CodeBlob::restore_mutable_data(address reloc_data) { + // Relocation data is now stored as part of the mutable data area; allocate it before copy relocations + if (_mutable_data_size > 0) { + _mutable_data = (address)os::malloc(_mutable_data_size, mtCode); + if (_mutable_data == nullptr) { + vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "codebuffer: no space for mutable data"); + } + } else { + _mutable_data = blob_end(); // default value + } + if (_relocation_size > 0) { + assert(_mutable_data_size > 0, "relocation is part of mutable data section"); + memcpy((address)relocation_begin(), reloc_data, relocation_size()); + } +} + void CodeBlob::prepare_for_archiving_impl() { set_name(nullptr); _oop_maps = nullptr; @@ -269,24 +274,15 @@ void CodeBlob::post_restore() { vptr(_kind)->post_restore(this); } -CodeBlob* CodeBlob::restore(address code_cache_buffer, - const char* name, - address archived_reloc_data, - ImmutableOopMapSet* archived_oop_maps) +CodeBlob* CodeBlob::restore(address code_cache_buffer, AOTCodeReader* reader) { copy_to(code_cache_buffer); CodeBlob* code_blob = (CodeBlob*)code_cache_buffer; - code_blob->set_name(name); - code_blob->restore_mutable_data(archived_reloc_data); - code_blob->set_oop_maps(archived_oop_maps); + reader->restore(code_blob); return code_blob; } -CodeBlob* CodeBlob::create(CodeBlob* archived_blob, - const char* name, - address archived_reloc_data, - ImmutableOopMapSet* archived_oop_maps - ) +CodeBlob* CodeBlob::create(CodeBlob* archived_blob, AOTCodeReader* reader) { ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock @@ -298,10 +294,7 @@ CodeBlob* CodeBlob::create(CodeBlob* archived_blob, MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); address code_cache_buffer = (address)CodeCache::allocate(size, CodeBlobType::NonNMethod); if (code_cache_buffer != nullptr) { - blob = archived_blob->restore(code_cache_buffer, - name, - archived_reloc_data, - archived_oop_maps); + blob = archived_blob->restore(code_cache_buffer, reader); assert(blob != nullptr, "sanity check"); // Flush the code block @@ -315,6 +308,8 @@ CodeBlob* CodeBlob::create(CodeBlob* archived_blob, return blob; } +#endif // INCLUDE_CDS + //----------------------------------------------------------------------------------------- // Creates a RuntimeBlob from a CodeBuffer and copy code and relocation info. @@ -331,7 +326,15 @@ RuntimeBlob::RuntimeBlob( : CodeBlob(name, kind, cb, size, header_size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments, align_up(cb->total_relocation_size(), oopSize)) { + if (code_size() == 0) { + // Nothing to copy + return; + } + cb->copy_code_and_locs_to(this); + + // Flush generated code + ICache::invalidate_range(code_begin(), code_size()); } void RuntimeBlob::free(RuntimeBlob* blob) { @@ -390,7 +393,7 @@ void RuntimeBlob::trace_new_stub(RuntimeBlob* stub, const char* name1, const cha // Implementation of BufferBlob BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, int size, uint16_t header_size) -: RuntimeBlob(name, kind, size, header_size) + : RuntimeBlob(name, kind, size, header_size) {} BufferBlob* BufferBlob::create(const char* name, uint buffer_size) { @@ -625,8 +628,8 @@ DeoptimizationBlob::DeoptimizationBlob( int unpack_with_reexecution_offset, int frame_size ) -: SingletonBlob("DeoptimizationBlob", CodeBlobKind::Deoptimization, cb, - size, sizeof(DeoptimizationBlob), frame_size, oop_maps) + : SingletonBlob("DeoptimizationBlob", CodeBlobKind::Deoptimization, cb, + size, sizeof(DeoptimizationBlob), frame_size, oop_maps) { _unpack_offset = unpack_offset; _unpack_with_exception = unpack_with_exception_offset; @@ -675,8 +678,8 @@ UncommonTrapBlob::UncommonTrapBlob( OopMapSet* oop_maps, int frame_size ) -: SingletonBlob("UncommonTrapBlob", CodeBlobKind::UncommonTrap, cb, - size, sizeof(UncommonTrapBlob), frame_size, oop_maps) + : SingletonBlob("UncommonTrapBlob", CodeBlobKind::UncommonTrap, cb, + size, sizeof(UncommonTrapBlob), frame_size, oop_maps) {} @@ -707,8 +710,8 @@ ExceptionBlob::ExceptionBlob( OopMapSet* oop_maps, int frame_size ) -: SingletonBlob("ExceptionBlob", CodeBlobKind::Exception, cb, - size, sizeof(ExceptionBlob), frame_size, oop_maps) + : SingletonBlob("ExceptionBlob", CodeBlobKind::Exception, cb, + size, sizeof(ExceptionBlob), frame_size, oop_maps) {} @@ -741,8 +744,8 @@ SafepointBlob::SafepointBlob( OopMapSet* oop_maps, int frame_size ) -: SingletonBlob("SafepointBlob", CodeBlobKind::Safepoint, cb, - size, sizeof(SafepointBlob), frame_size, oop_maps) + : SingletonBlob(cb->name(), CodeBlobKind::Safepoint, cb, + size, sizeof(SafepointBlob), frame_size, oop_maps) {} @@ -759,7 +762,7 @@ SafepointBlob* SafepointBlob::create( blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size); } - trace_new_stub(blob, "SafepointBlob"); + trace_new_stub(blob, "SafepointBlob - ", blob->name()); return blob; } @@ -899,7 +902,7 @@ void CodeBlob::dump_for_addr(address addr, outputStream* st, bool verbose) const } } if (is_nmethod()) { - nmethod* nm = (nmethod*)this; + nmethod* nm = as_nmethod(); ResourceMark rm; st->print(INTPTR_FORMAT " is at entry_point+%d in (nmethod*)" INTPTR_FORMAT, p2i(addr), (int)(addr - nm->entry_point()), p2i(nm)); @@ -935,7 +938,7 @@ void RuntimeStub::print_on_impl(outputStream* st) const { RuntimeBlob::print_on_impl(st); st->print("Runtime Stub (" INTPTR_FORMAT "): ", p2i(this)); st->print_cr("%s", name()); - Disassembler::decode((RuntimeBlob*)this, st); + Disassembler::decode((CodeBlob*)this, st); } void RuntimeStub::print_value_on_impl(outputStream* st) const { @@ -946,7 +949,7 @@ void SingletonBlob::print_on_impl(outputStream* st) const { ttyLocker ttyl; RuntimeBlob::print_on_impl(st); st->print_cr("%s", name()); - Disassembler::decode((RuntimeBlob*)this, st); + Disassembler::decode((CodeBlob*)this, st); } void SingletonBlob::print_value_on_impl(outputStream* st) const { @@ -964,7 +967,7 @@ void UpcallStub::print_on_impl(outputStream* st) const { oop recv = JNIHandles::resolve(_receiver); st->print("Receiver MH="); recv->print_on(st); - Disassembler::decode((RuntimeBlob*)this, st); + Disassembler::decode((CodeBlob*)this, st); } void UpcallStub::print_value_on_impl(outputStream* st) const { diff --git a/src/hotspot/share/code/codeBlob.hpp b/src/hotspot/share/code/codeBlob.hpp index 0469b6c71b1..709623de308 100644 --- a/src/hotspot/share/code/codeBlob.hpp +++ b/src/hotspot/share/code/codeBlob.hpp @@ -34,6 +34,7 @@ #include "utilities/align.hpp" #include "utilities/macros.hpp" +class AOTCodeReader; class ImmutableOopMap; class ImmutableOopMapSet; class JNIHandleBlock; @@ -44,9 +45,10 @@ class OopMapSet; enum class CodeBlobType { MethodNonProfiled = 0, // Execution level 1 and 4 (non-profiled) nmethods (including native nmethods) MethodProfiled = 1, // Execution level 2 and 3 (profiled) nmethods - NonNMethod = 2, // Non-nmethods like Buffers, Adapters and Runtime Stubs - All = 3, // All types (No code cache segmentation) - NumTypes = 4 // Number of CodeBlobTypes + MethodHot = 2, // Nmethods predicted to be always hot + NonNMethod = 3, // Non-nmethods like Buffers, Adapters and Runtime Stubs + All = 4, // All types (No code cache segmentation) + NumTypes = 5 // Number of CodeBlobTypes }; // CodeBlob - superclass for all entries in the CodeCache. @@ -97,7 +99,9 @@ enum class CodeBlobKind : u1 { class UpcallStub; // for as_upcall_stub() class RuntimeStub; // for as_runtime_stub() class JavaFrameAnchor; // for UpcallStub::jfa_for_frame +class BufferBlob; class AdapterBlob; +class SingletonBlob; class ExceptionBlob; class DeoptimizationBlob; class SafepointBlob; @@ -107,9 +111,6 @@ class CodeBlob { friend class VMStructs; friend class JVMCIVMStructs; -private: - void restore_mutable_data(address reloc_data); - protected: // order fields from large to small to minimize padding between fields ImmutableOopMapSet* _oop_maps; // OopMap for this CodeBlob @@ -169,8 +170,8 @@ protected: void operator delete(void* p) { } - void prepare_for_archiving_impl(); - void post_restore_impl(); + void prepare_for_archiving_impl() NOT_CDS_RETURN; + void post_restore_impl() NOT_CDS_RETURN; public: @@ -187,8 +188,19 @@ public: // Typing bool is_nmethod() const { return _kind == CodeBlobKind::Nmethod; } - bool is_buffer_blob() const { return _kind == CodeBlobKind::Buffer; } + // we may want to check for an actual buffer blob or subtype instance + bool is_buffer_blob(bool strict=true) const { + if (strict) { + return _kind == CodeBlobKind::Buffer; + } else { + return (_kind == CodeBlobKind::Buffer || + _kind == CodeBlobKind::Adapter || + _kind == CodeBlobKind::Vtable || + _kind == CodeBlobKind::MHAdapter); + } + } bool is_runtime_stub() const { return _kind == CodeBlobKind::RuntimeStub; } + // singleton blobs are never directly implemented bool is_deoptimization_stub() const { return _kind == CodeBlobKind::Deoptimization; } #ifdef COMPILER2 bool is_uncommon_trap_stub() const { return _kind == CodeBlobKind::UncommonTrap; } @@ -198,6 +210,12 @@ public: bool is_exception_stub() const { return false; } #endif bool is_safepoint_stub() const { return _kind == CodeBlobKind::Safepoint; } + bool is_singleton_blob() const { + return (is_deoptimization_stub() || + is_uncommon_trap_stub() || + is_exception_stub() || + is_safepoint_stub()); + } bool is_adapter_blob() const { return _kind == CodeBlobKind::Adapter; } bool is_vtable_blob() const { return _kind == CodeBlobKind::Vtable; } bool is_method_handles_adapter_blob() const { return _kind == CodeBlobKind::MHAdapter; } @@ -207,8 +225,12 @@ public: nmethod* as_nmethod_or_null() const { return is_nmethod() ? (nmethod*) this : nullptr; } nmethod* as_nmethod() const { assert(is_nmethod(), "must be nmethod"); return (nmethod*) this; } CodeBlob* as_codeblob() const { return (CodeBlob*) this; } + // we may want to force an actual buffer blob or subtype instance + BufferBlob* as_buffer_blob(bool strict = true) const { assert(is_buffer_blob(strict), "must be %sbuffer blob", (strict ? "strict " : "")); return (BufferBlob*) this; } AdapterBlob* as_adapter_blob() const { assert(is_adapter_blob(), "must be adapter blob"); return (AdapterBlob*) this; } ExceptionBlob* as_exception_blob() const { assert(is_exception_stub(), "must be exception stub"); return (ExceptionBlob*) this; } + // this will always return a subtype instance + SingletonBlob* as_singleton_blob() const { assert(is_singleton_blob(), "must be singleton blob"); return (SingletonBlob*) this; } DeoptimizationBlob* as_deoptimization_blob() const { assert(is_deoptimization_stub(), "must be deopt stub"); return (DeoptimizationBlob*) this; } SafepointBlob* as_safepoint_blob() const { assert(is_safepoint_stub(), "must be safepoint stub"); return (SafepointBlob*) this; } UpcallStub* as_upcall_stub() const { assert(is_upcall_stub(), "must be upcall stub"); return (UpcallStub*) this; } @@ -304,6 +326,9 @@ public: void use_strings(DbgStrings &strings) { _dbg_strings.share(strings); } #endif +#if INCLUDE_CDS + void restore_mutable_data(address reloc_data); + void copy_to(address buffer) { memcpy(buffer, this, this->size()); } @@ -314,11 +339,9 @@ public: // methods to restore a blob from AOT code cache into the CodeCache void post_restore(); - CodeBlob* restore(address code_cache_buffer, const char* name, address archived_reloc_data, ImmutableOopMapSet* archived_oop_maps); - static CodeBlob* create(CodeBlob* archived_blob, - const char* name, - address archived_reloc_data, - ImmutableOopMapSet* archived_oop_maps); + CodeBlob* restore(address code_cache_buffer, AOTCodeReader* reader); + static CodeBlob* create(CodeBlob* archived_blob, AOTCodeReader* reader); +#endif }; //---------------------------------------------------------------------------------------------------- @@ -388,10 +411,10 @@ class BufferBlob: public RuntimeBlob { class Vptr : public RuntimeBlob::Vptr { void print_on(const CodeBlob* instance, outputStream* st) const override { - ((const BufferBlob*)instance)->print_on_impl(st); + instance->as_buffer_blob(false)->print_on_impl(st); } void print_value_on(const CodeBlob* instance, outputStream* st) const override { - ((const BufferBlob*)instance)->print_value_on_impl(st); + instance->as_buffer_blob(false)->print_value_on_impl(st); } }; @@ -487,10 +510,17 @@ class RuntimeStub: public RuntimeBlob { address entry_point() const { return code_begin(); } + void post_restore_impl() { + trace_new_stub(this, "RuntimeStub - ", name()); + } + void print_on_impl(outputStream* st) const; void print_value_on_impl(outputStream* st) const; class Vptr : public RuntimeBlob::Vptr { + void post_restore(CodeBlob* instance) const override { + instance->as_runtime_stub()->post_restore_impl(); + } void print_on(const CodeBlob* instance, outputStream* st) const override { instance->as_runtime_stub()->print_on_impl(st); } @@ -532,10 +562,10 @@ class SingletonBlob: public RuntimeBlob { class Vptr : public RuntimeBlob::Vptr { void print_on(const CodeBlob* instance, outputStream* st) const override { - ((const SingletonBlob*)instance)->print_on_impl(st); + instance->as_singleton_blob()->print_on_impl(st); } void print_value_on(const CodeBlob* instance, outputStream* st) const override { - ((const SingletonBlob*)instance)->print_value_on_impl(st); + instance->as_singleton_blob()->print_value_on_impl(st); } }; @@ -574,7 +604,7 @@ class DeoptimizationBlob: public SingletonBlob { ); public: - static const int ENTRY_COUNT = 4 JVMTI_ONLY(+ 2); + static const int ENTRY_COUNT = 4 JVMCI_ONLY(+ 2); // Creation static DeoptimizationBlob* create( CodeBuffer* cb, @@ -606,20 +636,28 @@ class DeoptimizationBlob: public SingletonBlob { _uncommon_trap_offset = offset; assert(contains(code_begin() + _uncommon_trap_offset), "must be PC inside codeblob"); } - address uncommon_trap() const { return code_begin() + _uncommon_trap_offset; } + address uncommon_trap() const { return (EnableJVMCI ? code_begin() + _uncommon_trap_offset : nullptr); } void set_implicit_exception_uncommon_trap_offset(int offset) { _implicit_exception_uncommon_trap_offset = offset; assert(contains(code_begin() + _implicit_exception_uncommon_trap_offset), "must be PC inside codeblob"); } - address implicit_exception_uncommon_trap() const { return code_begin() + _implicit_exception_uncommon_trap_offset; } + address implicit_exception_uncommon_trap() const { return (EnableJVMCI ? code_begin() + _implicit_exception_uncommon_trap_offset : nullptr); } #endif // INCLUDE_JVMCI + void post_restore_impl() { + trace_new_stub(this, "DeoptimizationBlob"); + } + void print_value_on_impl(outputStream* st) const; class Vptr : public SingletonBlob::Vptr { + void post_restore(CodeBlob* instance) const override { + instance->as_deoptimization_blob()->post_restore_impl(); + } + void print_value_on(const CodeBlob* instance, outputStream* st) const override { - ((const DeoptimizationBlob*)instance)->print_value_on_impl(st); + instance->as_deoptimization_blob()->print_value_on_impl(st); } }; @@ -649,6 +687,16 @@ class UncommonTrapBlob: public SingletonBlob { OopMapSet* oop_maps, int frame_size ); + void post_restore_impl() { + trace_new_stub(this, "UncommonTrapBlob"); + } + class Vptr : public SingletonBlob::Vptr { + void post_restore(CodeBlob* instance) const override { + instance->as_uncommon_trap_blob()->post_restore_impl(); + } + }; + + static const Vptr _vpntr; }; @@ -679,7 +727,7 @@ class ExceptionBlob: public SingletonBlob { class Vptr : public SingletonBlob::Vptr { void post_restore(CodeBlob* instance) const override { - ((ExceptionBlob*)instance)->post_restore_impl(); + instance->as_exception_blob()->post_restore_impl(); } }; @@ -709,6 +757,17 @@ class SafepointBlob: public SingletonBlob { OopMapSet* oop_maps, int frame_size ); + + void post_restore_impl() { + trace_new_stub(this, "SafepointBlob - ", name()); + } + class Vptr : public SingletonBlob::Vptr { + void post_restore(CodeBlob* instance) const override { + instance->as_safepoint_blob()->post_restore_impl(); + } + }; + + static const Vptr _vpntr; }; //---------------------------------------------------------------------------------------------------- diff --git a/src/hotspot/share/code/codeCache.cpp b/src/hotspot/share/code/codeCache.cpp index 2a0256cc316..c0b4918102e 100644 --- a/src/hotspot/share/code/codeCache.cpp +++ b/src/hotspot/share/code/codeCache.cpp @@ -201,6 +201,7 @@ void CodeCache::initialize_heaps() { CodeHeapInfo non_nmethod = {NonNMethodCodeHeapSize, FLAG_IS_CMDLINE(NonNMethodCodeHeapSize), true}; CodeHeapInfo profiled = {ProfiledCodeHeapSize, FLAG_IS_CMDLINE(ProfiledCodeHeapSize), true}; CodeHeapInfo non_profiled = {NonProfiledCodeHeapSize, FLAG_IS_CMDLINE(NonProfiledCodeHeapSize), true}; + CodeHeapInfo hot = {HotCodeHeapSize, FLAG_IS_CMDLINE(HotCodeHeapSize), true}; const bool cache_size_set = FLAG_IS_CMDLINE(ReservedCodeCacheSize); const size_t ps = page_size(false, 8); @@ -219,6 +220,12 @@ void CodeCache::initialize_heaps() { profiled.enabled = false; } + if (!heap_available(CodeBlobType::MethodHot)) { + hot.size = 0; + hot.set = true; + hot.enabled = false; + } + assert(heap_available(CodeBlobType::MethodNonProfiled), "MethodNonProfiled heap is always available for segmented code heap"); size_t compiler_buffer_size = 0; @@ -238,14 +245,36 @@ void CodeCache::initialize_heaps() { set_size_of_unset_code_heap(&non_profiled, cache_size, non_nmethod.size + profiled.size, min_size); } - if (!profiled.set && non_profiled.set) { - set_size_of_unset_code_heap(&profiled, cache_size, non_nmethod.size + non_profiled.size, min_size); + if (!profiled.set && non_profiled.set && hot.set) { + set_size_of_unset_code_heap(&profiled, cache_size, non_nmethod.size + non_profiled.size + hot.size, min_size); + } + + if (hot.enabled) { + if (!hot.set) { + assert(hot.size == 0, "must be calculated during heaps initialization"); + // An application usually has ~20% hot code which is mostly non-profiled code. + // We set the hot code heap size to 20% of the non-profiled code heap. + hot.size = MAX2(non_profiled.size / 5, min_size); + + if (non_profiled.set) { + err_msg msg("Must manually set HotCodeHeapSize when NonProfiledCodeHeapSize is set"); + vm_exit_during_initialization("Invalid code heap sizes", msg); + } + + non_profiled.size -= hot.size; + } + + if (hot.size > non_profiled.size) { + err_msg msg("Hot (%zuK) exceeds NonProfiled (%zuK).", + hot.size / K, non_profiled.size / K); + vm_exit_during_initialization("Invalid code heap sizes", msg); + } } // Compatibility. size_t non_nmethod_min_size = min_cache_size + compiler_buffer_size; - if (!non_nmethod.set && profiled.set && non_profiled.set) { - set_size_of_unset_code_heap(&non_nmethod, cache_size, profiled.size + non_profiled.size, non_nmethod_min_size); + if (!non_nmethod.set && profiled.set && non_profiled.set && hot.set) { + set_size_of_unset_code_heap(&non_nmethod, cache_size, profiled.size + non_profiled.size + hot.size, non_nmethod_min_size); } // Note: if large page support is enabled, min_size is at least the large @@ -253,8 +282,9 @@ void CodeCache::initialize_heaps() { non_nmethod.size = align_up(non_nmethod.size, min_size); profiled.size = align_up(profiled.size, min_size); non_profiled.size = align_up(non_profiled.size, min_size); + hot.size = align_up(hot.size, min_size); - size_t aligned_total = non_nmethod.size + profiled.size + non_profiled.size; + size_t aligned_total = non_nmethod.size + profiled.size + non_profiled.size + hot.size; if (!cache_size_set) { // If ReservedCodeCacheSize is explicitly set and exceeds CODE_CACHE_SIZE_LIMIT, // it is rejected by flag validation elsewhere. Here we only handle the case @@ -262,15 +292,15 @@ void CodeCache::initialize_heaps() { // sizes (after alignment) exceed the platform limit. if (aligned_total > CODE_CACHE_SIZE_LIMIT) { err_msg message("ReservedCodeCacheSize (%zuK), Max (%zuK)." - "Segments: NonNMethod (%zuK), NonProfiled (%zuK), Profiled (%zuK).", + "Segments: NonNMethod (%zuK), NonProfiled (%zuK), Profiled (%zuK), Hot (%zuK).", aligned_total/K, CODE_CACHE_SIZE_LIMIT/K, - non_nmethod.size/K, non_profiled.size/K, profiled.size/K); + non_nmethod.size/K, non_profiled.size/K, profiled.size/K, hot.size/K); vm_exit_during_initialization("Code cache size exceeds platform limit", message); } if (aligned_total != cache_size) { log_info(codecache)("ReservedCodeCache size %zuK changed to total segments size NonNMethod " - "%zuK NonProfiled %zuK Profiled %zuK = %zuK", - cache_size/K, non_nmethod.size/K, non_profiled.size/K, profiled.size/K, aligned_total/K); + "%zuK NonProfiled %zuK Profiled %zuK Hot %zuK = %zuK", + cache_size/K, non_nmethod.size/K, non_profiled.size/K, profiled.size/K, hot.size/K, aligned_total/K); // Adjust ReservedCodeCacheSize as necessary because it was not set explicitly cache_size = aligned_total; } @@ -295,19 +325,23 @@ void CodeCache::initialize_heaps() { } if (profiled.enabled && !profiled.set && profiled.size > min_size) { profiled.size -= min_size; + if (--delta == 0) break; + } + if (hot.enabled && !hot.set && hot.size > min_size) { + hot.size -= min_size; delta--; } if (delta == start_delta) { break; } } - aligned_total = non_nmethod.size + profiled.size + non_profiled.size; + aligned_total = non_nmethod.size + profiled.size + non_profiled.size + hot.size; } } log_debug(codecache)("Initializing code heaps ReservedCodeCache %zuK NonNMethod %zuK" - " NonProfiled %zuK Profiled %zuK", - cache_size/K, non_nmethod.size/K, non_profiled.size/K, profiled.size/K); + " NonProfiled %zuK Profiled %zuK Hot %zuK", + cache_size/K, non_nmethod.size/K, non_profiled.size/K, profiled.size/K, hot.size/K); // Validation // Check minimal required sizes @@ -318,6 +352,9 @@ void CodeCache::initialize_heaps() { if (non_profiled.enabled) { // non_profiled.enabled is always ON for segmented code heap, leave it checked for clarity check_min_size("non-profiled code heap", non_profiled.size, min_size); } + if (hot.enabled) { + check_min_size("hot code heap", hot.size, min_size); + } // ReservedCodeCacheSize was set explicitly, so report an error and abort if it doesn't match the segment sizes if (aligned_total != cache_size && cache_size_set) { @@ -328,6 +365,9 @@ void CodeCache::initialize_heaps() { if (non_profiled.enabled) { message.append(" + NonProfiledCodeHeapSize (%zuK)", non_profiled.size/K); } + if (hot.enabled) { + message.append(" + HotCodeHeapSize (%zuK)", hot.size/K); + } message.append(" = %zuK", aligned_total/K); message.append((aligned_total > cache_size) ? " is greater than " : " is less than "); message.append("ReservedCodeCacheSize (%zuK).", cache_size/K); @@ -348,6 +388,7 @@ void CodeCache::initialize_heaps() { FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod.size); FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled.size); FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled.size); + FLAG_SET_ERGO(HotCodeHeapSize, hot.size); FLAG_SET_ERGO(ReservedCodeCacheSize, cache_size); ReservedSpace rs = reserve_heap_memory(cache_size, ps); @@ -368,6 +409,13 @@ void CodeCache::initialize_heaps() { // Non-nmethods (stubs, adapters, ...) add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod); + if (hot.enabled) { + ReservedSpace hot_space = rs.partition(offset, hot.size); + offset += hot.size; + // Nmethods known to be always hot. + add_heap(hot_space, "CodeHeap 'hot nmethods'", CodeBlobType::MethodHot); + } + if (non_profiled.enabled) { ReservedSpace non_profiled_space = rs.partition(offset, non_profiled.size); // Tier 1 and tier 4 (non-profiled) methods and native methods @@ -406,16 +454,25 @@ bool CodeCache::heap_available(CodeBlobType code_blob_type) { // Interpreter only: we don't need any method code heaps return (code_blob_type == CodeBlobType::NonNMethod); } else if (CompilerConfig::is_c1_profiling()) { - // Tiered compilation: use all code heaps + // Tiered compilation: use all code heaps including + // the hot code heap when it is present. + + if (COMPILER2_PRESENT(!HotCodeHeap &&) (code_blob_type == CodeBlobType::MethodHot)) { + return false; + } + return (code_blob_type < CodeBlobType::All); } else { // No TieredCompilation: we only need the non-nmethod and non-profiled code heap + // and the hot code heap if it is requested. return (code_blob_type == CodeBlobType::NonNMethod) || - (code_blob_type == CodeBlobType::MethodNonProfiled); + (code_blob_type == CodeBlobType::MethodNonProfiled) + COMPILER2_PRESENT(|| ((code_blob_type == CodeBlobType::MethodHot) && HotCodeHeap)); } } -const char* CodeCache::get_code_heap_flag_name(CodeBlobType code_blob_type) { +// Returns the name of the VM option to set the size of the corresponding CodeHeap +static const char* get_code_heap_flag_name(CodeBlobType code_blob_type) { switch(code_blob_type) { case CodeBlobType::NonNMethod: return "NonNMethodCodeHeapSize"; @@ -426,6 +483,9 @@ const char* CodeCache::get_code_heap_flag_name(CodeBlobType code_blob_type) { case CodeBlobType::MethodProfiled: return "ProfiledCodeHeapSize"; break; + case CodeBlobType::MethodHot: + return "HotCodeHeapSize"; + break; default: ShouldNotReachHere(); return nullptr; @@ -542,7 +602,7 @@ CodeBlob* CodeCache::allocate(uint size, CodeBlobType code_blob_type, bool handl // Get CodeHeap for the given CodeBlobType CodeHeap* heap = get_code_heap(code_blob_type); - assert(heap != nullptr, "heap is null"); + assert(heap != nullptr, "No heap for given code_blob_type (%d), heap is null", (int)code_blob_type); while (true) { cb = (CodeBlob*)heap->allocate(size); @@ -570,6 +630,9 @@ CodeBlob* CodeCache::allocate(uint size, CodeBlobType code_blob_type, bool handl type = CodeBlobType::MethodNonProfiled; } break; + case CodeBlobType::MethodHot: + type = CodeBlobType::MethodNonProfiled; + break; default: break; } diff --git a/src/hotspot/share/code/codeCache.hpp b/src/hotspot/share/code/codeCache.hpp index 349cc652bf4..6384cb397b8 100644 --- a/src/hotspot/share/code/codeCache.hpp +++ b/src/hotspot/share/code/codeCache.hpp @@ -118,10 +118,6 @@ class CodeCache : AllStatic { // Creates a new heap with the given name and size, containing CodeBlobs of the given type static void add_heap(ReservedSpace rs, const char* name, CodeBlobType code_blob_type); static CodeHeap* get_code_heap_containing(void* p); // Returns the CodeHeap containing the given pointer, or nullptr - static CodeHeap* get_code_heap(const void* cb); // Returns the CodeHeap for the given CodeBlob - static CodeHeap* get_code_heap(CodeBlobType code_blob_type); // Returns the CodeHeap for the given CodeBlobType - // Returns the name of the VM option to set the size of the corresponding CodeHeap - static const char* get_code_heap_flag_name(CodeBlobType code_blob_type); static ReservedSpace reserve_heap_memory(size_t size, size_t rs_ps); // Reserves one continuous chunk of memory for the CodeHeaps // Iteration @@ -145,6 +141,8 @@ class CodeCache : AllStatic { static int code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs); static void add_heap(CodeHeap* heap); + static CodeHeap* get_code_heap(const void* cb); // Returns the CodeHeap for the given CodeBlob + static CodeHeap* get_code_heap(CodeBlobType code_blob_type); // Returns the CodeHeap for the given CodeBlobType static const GrowableArray* heaps() { return _heaps; } static const GrowableArray* nmethod_heaps() { return _nmethod_heaps; } @@ -264,7 +262,7 @@ class CodeCache : AllStatic { } static bool code_blob_type_accepts_nmethod(CodeBlobType type) { - return type == CodeBlobType::All || type <= CodeBlobType::MethodProfiled; + return type == CodeBlobType::All || type <= CodeBlobType::MethodHot; } static bool code_blob_type_accepts_allocable(CodeBlobType type) { diff --git a/src/hotspot/share/code/compiledIC.cpp b/src/hotspot/share/code/compiledIC.cpp index 5f5c9711441..07d96d6cd44 100644 --- a/src/hotspot/share/code/compiledIC.cpp +++ b/src/hotspot/share/code/compiledIC.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -76,11 +76,7 @@ CompiledICData::CompiledICData() // Inline cache callsite info is initialized once the first time it is resolved void CompiledICData::initialize(CallInfo* call_info, Klass* receiver_klass) { _speculated_method = call_info->selected_method(); - if (UseCompressedClassPointers) { - _speculated_klass = (uintptr_t)CompressedKlassPointers::encode_not_null(receiver_klass); - } else { - _speculated_klass = (uintptr_t)receiver_klass; - } + _speculated_klass = (uintptr_t)CompressedKlassPointers::encode_not_null(receiver_klass); if (call_info->call_kind() == CallInfo::itable_call) { assert(call_info->resolved_method() != nullptr, "virtual or interface method must be found"); _itable_defc_klass = call_info->resolved_method()->method_holder(); @@ -133,12 +129,7 @@ Klass* CompiledICData::speculated_klass() const { if (is_speculated_klass_unloaded()) { return nullptr; } - - if (UseCompressedClassPointers) { - return CompressedKlassPointers::decode_not_null((narrowKlass)_speculated_klass); - } else { - return (Klass*)_speculated_klass; - } + return CompressedKlassPointers::decode_not_null((narrowKlass)_speculated_klass); } //----------------------------------------------------------------------------- diff --git a/src/hotspot/share/code/nmethod.cpp b/src/hotspot/share/code/nmethod.cpp index 4c2f9157b99..815c0c7b4b0 100644 --- a/src/hotspot/share/code/nmethod.cpp +++ b/src/hotspot/share/code/nmethod.cpp @@ -66,6 +66,10 @@ #include "runtime/flags/flagSetting.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" +#ifdef COMPILER2 +#include "runtime/hotCodeCollector.hpp" +#endif // COMPILER2 +#include "runtime/icache.hpp" #include "runtime/jniHandles.inline.hpp" #include "runtime/orderAccess.hpp" #include "runtime/os.hpp" @@ -1253,7 +1257,15 @@ void nmethod::post_init() { finalize_relocations(); + // Flush generated code + ICache::invalidate_range(code_begin(), code_size()); + Universe::heap()->register_nmethod(this); + +#ifdef COMPILER2 + HotCodeCollector::register_nmethod(this); +#endif // COMPILER2 + DEBUG_ONLY(Universe::heap()->verify_nmethod(this)); CodeCache::commit(this); @@ -1306,9 +1318,7 @@ nmethod::nmethod( _deopt_handler_entry_offset = 0; _unwind_handler_offset = 0; - CHECKED_CAST(_oops_size, uint16_t, align_up(code_buffer->total_oop_size(), oopSize)); - uint16_t metadata_size; - CHECKED_CAST(metadata_size, uint16_t, align_up(code_buffer->total_metadata_size(), wordSize)); + int metadata_size = align_up(code_buffer->total_metadata_size(), wordSize); JVMCI_ONLY( _metadata_size = metadata_size; ) assert(_mutable_data_size == _relocation_size + metadata_size, "wrong mutable data size: %d != %d + %d", @@ -1446,7 +1456,6 @@ nmethod::nmethod(const nmethod &nm) : CodeBlob(nm._name, nm._kind, nm._size, nm. _deopt_handler_entry_offset = nm._deopt_handler_entry_offset; _unwind_handler_offset = nm._unwind_handler_offset; _num_stack_arg_slots = nm._num_stack_arg_slots; - _oops_size = nm._oops_size; #if INCLUDE_JVMCI _metadata_size = nm._metadata_size; #endif @@ -1587,8 +1596,6 @@ nmethod* nmethod::relocate(CodeBlobType code_blob_type) { // Attempt to start using the copy if (nm_copy->make_in_use()) { - ICache::invalidate_range(nm_copy->code_begin(), nm_copy->code_size()); - methodHandle mh(Thread::current(), nm_copy->method()); nm_copy->method()->set_code(mh, nm_copy); @@ -1749,9 +1756,7 @@ nmethod::nmethod( _unwind_handler_offset = -1; } - CHECKED_CAST(_oops_size, uint16_t, align_up(code_buffer->total_oop_size(), oopSize)); - uint16_t metadata_size; - CHECKED_CAST(metadata_size, uint16_t, align_up(code_buffer->total_metadata_size(), wordSize)); + int metadata_size = align_up(code_buffer->total_metadata_size(), wordSize); JVMCI_ONLY( _metadata_size = metadata_size; ) int jvmci_data_size = 0 JVMCI_ONLY( + align_up(compiler->is_jvmci() ? jvmci_data->size() : 0, oopSize)); assert(_mutable_data_size == _relocation_size + metadata_size + jvmci_data_size, @@ -2041,7 +2046,7 @@ void nmethod::copy_values(GrowableArray* array) { // The code and relocations have already been initialized by the // CodeBlob constructor, so it is valid even at this early point to // iterate over relocations and patch the code. - fix_oop_relocations(nullptr, nullptr, /*initialize_immediates=*/ true); + fix_oop_relocations(/*initialize_immediates=*/ true); } void nmethod::copy_values(GrowableArray* array) { @@ -2053,24 +2058,42 @@ void nmethod::copy_values(GrowableArray* array) { } } -void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) { +bool nmethod::fix_oop_relocations(bool initialize_immediates) { // re-patch all oop-bearing instructions, just in case some oops moved - RelocIterator iter(this, begin, end); + RelocIterator iter(this); + bool modified_code = false; while (iter.next()) { if (iter.type() == relocInfo::oop_type) { oop_Relocation* reloc = iter.oop_reloc(); - if (initialize_immediates && reloc->oop_is_immediate()) { + if (!reloc->oop_is_immediate()) { + // Refresh the oop-related bits of this instruction. + reloc->set_value(reloc->value()); + modified_code = true; + } else if (initialize_immediates) { oop* dest = reloc->oop_addr(); jobject obj = *reinterpret_cast(dest); initialize_immediate_oop(dest, obj); } - // Refresh the oop-related bits of this instruction. - reloc->fix_oop_relocation(); } else if (iter.type() == relocInfo::metadata_type) { metadata_Relocation* reloc = iter.metadata_reloc(); reloc->fix_metadata_relocation(); + modified_code |= !reloc->metadata_is_immediate(); } } + return modified_code; +} + +void nmethod::fix_oop_relocations() { + ICacheInvalidationContext icic; + fix_oop_relocations(&icic); +} + +void nmethod::fix_oop_relocations(ICacheInvalidationContext* icic) { + assert(icic != nullptr, "must provide context to track if code was modified"); + bool modified_code = fix_oop_relocations(/*initialize_immediates=*/ false); + if (modified_code) { + icic->set_has_modified_code(); + } } static void install_post_call_nop_displacement(nmethod* nm, address pc) { @@ -2461,6 +2484,11 @@ void nmethod::purge(bool unregister_nmethod) { if (unregister_nmethod) { Universe::heap()->unregister_nmethod(this); } + +#ifdef COMPILER2 + HotCodeCollector::unregister_nmethod(this); +#endif // COMPILER2 + CodeCache::unregister_old_nmethod(this); JVMCI_ONLY( _metadata_size = 0; ) diff --git a/src/hotspot/share/code/nmethod.hpp b/src/hotspot/share/code/nmethod.hpp index 2391bc6d830..ea8c0e2ad5d 100644 --- a/src/hotspot/share/code/nmethod.hpp +++ b/src/hotspot/share/code/nmethod.hpp @@ -41,6 +41,7 @@ class Dependencies; class DirectiveSet; class DebugInformationRecorder; class ExceptionHandlerTable; +class ICacheInvalidationContext; class ImplicitExceptionTable; class JvmtiThreadState; class MetadataClosure; @@ -235,11 +236,10 @@ class nmethod : public CodeBlob { // Number of arguments passed on the stack uint16_t _num_stack_arg_slots; - uint16_t _oops_size; #if INCLUDE_JVMCI // _metadata_size is not specific to JVMCI. In the non-JVMCI case, it can be derived as: // _metadata_size = mutable_data_size - relocation_size - uint16_t _metadata_size; + int _metadata_size; #endif // Offset in immutable data section @@ -802,15 +802,15 @@ public: // Relocation support private: - void fix_oop_relocations(address begin, address end, bool initialize_immediates); + bool fix_oop_relocations(bool initialize_immediates); inline void initialize_immediate_oop(oop* dest, jobject handle); protected: address oops_reloc_begin() const; public: - void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); } - void fix_oop_relocations() { fix_oop_relocations(nullptr, nullptr, false); } + void fix_oop_relocations(ICacheInvalidationContext* icic); + void fix_oop_relocations(); bool is_at_poll_return(address pc); bool is_at_poll_or_poll_return(address pc); diff --git a/src/hotspot/share/code/relocInfo.cpp b/src/hotspot/share/code/relocInfo.cpp index 2a6335e2118..73e4b6de7b4 100644 --- a/src/hotspot/share/code/relocInfo.cpp +++ b/src/hotspot/share/code/relocInfo.cpp @@ -590,15 +590,15 @@ oop oop_Relocation::oop_value() { return *oop_addr(); } - void oop_Relocation::fix_oop_relocation() { + // TODO: we need to add some assert here that ICache::invalidate_range is called in the code + // which uses this function. if (!oop_is_immediate()) { // get the oop from the pool, and re-insert it into the instruction: set_value(value()); } } - void oop_Relocation::verify_oop_relocation() { if (!oop_is_immediate()) { // get the oop from the pool, and re-insert it into the instruction: diff --git a/src/hotspot/share/compiler/compilerDefinitions.cpp b/src/hotspot/share/compiler/compilerDefinitions.cpp index 9bd6e893bcd..cf7744cfe03 100644 --- a/src/hotspot/share/compiler/compilerDefinitions.cpp +++ b/src/hotspot/share/compiler/compilerDefinitions.cpp @@ -185,49 +185,6 @@ intx CompilerConfig::scaled_freq_log(intx freq_log, double scale) { } } -void CompilerConfig::set_client_emulation_mode_flags() { - assert(has_c1(), "Must have C1 compiler present"); - CompilationModeFlag::set_quick_only(); - - FLAG_SET_ERGO(ProfileInterpreter, false); -#if INCLUDE_JVMCI - FLAG_SET_ERGO(EnableJVMCI, false); - FLAG_SET_ERGO(UseJVMCICompiler, false); -#endif - if (FLAG_IS_DEFAULT(NeverActAsServerClassMachine)) { - FLAG_SET_ERGO(NeverActAsServerClassMachine, true); - } - if (FLAG_IS_DEFAULT(InitialCodeCacheSize)) { - FLAG_SET_ERGO(InitialCodeCacheSize, 160*K); - } - if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) { - FLAG_SET_ERGO(ReservedCodeCacheSize, 32*M); - } - if (FLAG_IS_DEFAULT(NonProfiledCodeHeapSize)) { - FLAG_SET_ERGO(NonProfiledCodeHeapSize, 27*M); - } - if (FLAG_IS_DEFAULT(ProfiledCodeHeapSize)) { - FLAG_SET_ERGO(ProfiledCodeHeapSize, 0); - } - if (FLAG_IS_DEFAULT(NonNMethodCodeHeapSize)) { - FLAG_SET_ERGO(NonNMethodCodeHeapSize, 5*M); - } - if (FLAG_IS_DEFAULT(CodeCacheExpansionSize)) { - FLAG_SET_ERGO(CodeCacheExpansionSize, 32*K); - } - if (FLAG_IS_DEFAULT(CICompilerCount)) { - FLAG_SET_ERGO(CICompilerCount, 1); - } -} - -bool CompilerConfig::is_compilation_mode_selected() { - return !FLAG_IS_DEFAULT(TieredCompilation) || - !FLAG_IS_DEFAULT(TieredStopAtLevel) || - !FLAG_IS_DEFAULT(CompilationMode) - JVMCI_ONLY(|| !FLAG_IS_DEFAULT(EnableJVMCI) - || !FLAG_IS_DEFAULT(UseJVMCICompiler)); -} - static bool check_legacy_flags() { JVMFlag* compile_threshold_flag = JVMFlag::flag_from_enum(FLAG_MEMBER_ENUM(CompileThreshold)); if (JVMFlagAccess::check_constraint(compile_threshold_flag, JVMFlagLimit::get_constraint(compile_threshold_flag)->constraint_func(), false) != JVMFlag::SUCCESS) { @@ -329,8 +286,38 @@ void CompilerConfig::set_compilation_policy_flags() { } } +#ifdef COMPILER2 + if (HotCodeHeap) { + if (FLAG_IS_DEFAULT(SegmentedCodeCache)) { + FLAG_SET_ERGO(SegmentedCodeCache, true); + } else if (!SegmentedCodeCache) { + vm_exit_during_initialization("HotCodeHeap requires SegmentedCodeCache enabled"); + } + + if (FLAG_IS_DEFAULT(NMethodRelocation)) { + FLAG_SET_ERGO(NMethodRelocation, true); + } else if (!NMethodRelocation) { + vm_exit_during_initialization("HotCodeHeap requires NMethodRelocation enabled"); + } + + if (!is_c2_enabled()) { + vm_exit_during_initialization("HotCodeHeap requires C2 enabled"); + } + + if (HotCodeMinSamplingMs > HotCodeMaxSamplingMs) { + vm_exit_during_initialization("HotCodeMinSamplingMs cannot be larger than HotCodeMaxSamplingMs"); + } + } else if (HotCodeHeapSize > 0) { + vm_exit_during_initialization("HotCodeHeapSize requires HotCodeHeap enabled"); + } +#else + if (HotCodeHeapSize > 0) { + vm_exit_during_initialization("HotCodeHeapSize requires C2 present"); + } +#endif // COMPILER2 + if (CompileThresholdScaling < 0) { - vm_exit_during_initialization("Negative value specified for CompileThresholdScaling", nullptr); + vm_exit_during_initialization("Negative value specified for CompileThresholdScaling"); } if (CompilationModeFlag::disable_intermediate()) { @@ -447,9 +434,6 @@ void CompilerConfig::set_jvmci_specific_flags() { if (FLAG_IS_DEFAULT(InitialCodeCacheSize)) { FLAG_SET_DEFAULT(InitialCodeCacheSize, MAX2(16*M, InitialCodeCacheSize)); } - if (FLAG_IS_DEFAULT(NewSizeThreadIncrease)) { - FLAG_SET_DEFAULT(NewSizeThreadIncrease, MAX2(4*K, NewSizeThreadIncrease)); - } if (FLAG_IS_DEFAULT(Tier3DelayOn)) { // This effectively prevents the compile broker scheduling tier 2 // (i.e., limited C1 profiling) compilations instead of tier 3 @@ -546,36 +530,11 @@ bool CompilerConfig::check_args_consistency(bool status) { return status; } -bool CompilerConfig::should_set_client_emulation_mode_flags() { -#if !COMPILER1_OR_COMPILER2 - return false; -#endif - - if (has_c1()) { - if (!is_compilation_mode_selected()) { - if (NeverActAsServerClassMachine) { - return true; - } - } else if (!has_c2() && !is_jvmci_compiler()) { - return true; - } - } - - return false; -} - void CompilerConfig::ergo_initialize() { #if !COMPILER1_OR_COMPILER2 return; #endif - // This property is also checked when selecting the heap size. Since client - // emulation mode influences Java heap memory usage, part of the logic must - // occur before choosing the heap size. - if (should_set_client_emulation_mode_flags()) { - set_client_emulation_mode_flags(); - } - set_legacy_emulation_flags(); set_compilation_policy_flags(); @@ -594,9 +553,6 @@ void CompilerConfig::ergo_initialize() { } if (ProfileInterpreter && CompilerConfig::is_c1_simple_only()) { - if (!FLAG_IS_DEFAULT(ProfileInterpreter)) { - warning("ProfileInterpreter disabled due to client emulation mode"); - } FLAG_SET_CMDLINE(ProfileInterpreter, false); } diff --git a/src/hotspot/share/compiler/compilerDefinitions.hpp b/src/hotspot/share/compiler/compilerDefinitions.hpp index a9b052ff782..e8ba977f705 100644 --- a/src/hotspot/share/compiler/compilerDefinitions.hpp +++ b/src/hotspot/share/compiler/compilerDefinitions.hpp @@ -151,14 +151,10 @@ public: inline static CompilerType compiler_type(); - static bool should_set_client_emulation_mode_flags(); - private: - static bool is_compilation_mode_selected(); static void set_compilation_policy_flags(); static void set_jvmci_specific_flags(); static void set_legacy_emulation_flags(); - static void set_client_emulation_mode_flags(); }; #endif // SHARE_COMPILER_COMPILERDEFINITIONS_HPP diff --git a/src/hotspot/share/compiler/compiler_globals_pd.hpp b/src/hotspot/share/compiler/compiler_globals_pd.hpp index 6a87fdaaaf1..8ac4b53d6cd 100644 --- a/src/hotspot/share/compiler/compiler_globals_pd.hpp +++ b/src/hotspot/share/compiler/compiler_globals_pd.hpp @@ -58,7 +58,6 @@ define_pd_global(bool, TieredCompilation, false); define_pd_global(intx, CompileThreshold, 0); define_pd_global(intx, OnStackReplacePercentage, 0); -define_pd_global(size_t, NewSizeThreadIncrease, 4*K); define_pd_global(bool, InlineClassNatives, true); define_pd_global(bool, InlineUnsafeOps, true); define_pd_global(size_t, InitialCodeCacheSize, 160*K); @@ -70,13 +69,6 @@ define_pd_global(size_t, NonNMethodCodeHeapSize, 32*M); define_pd_global(size_t, CodeCacheExpansionSize, 32*K); define_pd_global(size_t, CodeCacheMinBlockLength, 1); define_pd_global(size_t, CodeCacheMinimumUseSpace, 200*K); -#ifndef ZERO -define_pd_global(bool, NeverActAsServerClassMachine, true); -#else -// Zero runs without compilers. Do not let this code to force -// the GC mode and default heap settings. -define_pd_global(bool, NeverActAsServerClassMachine, false); -#endif #define CI_COMPILER_COUNT 0 #else diff --git a/src/hotspot/share/compiler/disassembler.hpp b/src/hotspot/share/compiler/disassembler.hpp index db7066c9023..b9de9c3d27d 100644 --- a/src/hotspot/share/compiler/disassembler.hpp +++ b/src/hotspot/share/compiler/disassembler.hpp @@ -112,7 +112,7 @@ class Disassembler : public AbstractDisassembler { // interpreter code, by riding on the customary __ macro in the interpreter generator. // See templateTable_x86.cpp for an example. template inline static T* hook(const char* file, int line, T* masm) { - if (PrintInterpreter) { + if (PrintInterpreter NOT_PRODUCT(|| true)) { _hook(file, line, masm); } return masm; diff --git a/src/hotspot/share/gc/g1/g1Arguments.cpp b/src/hotspot/share/gc/g1/g1Arguments.cpp index 8bec6e7e86f..c3bbd5a3b52 100644 --- a/src/hotspot/share/gc/g1/g1Arguments.cpp +++ b/src/hotspot/share/gc/g1/g1Arguments.cpp @@ -148,8 +148,9 @@ void G1Arguments::initialize_card_set_configuration() { if (FLAG_IS_DEFAULT(G1RemSetArrayOfCardsEntries)) { uint max_cards_in_inline_ptr = G1CardSetConfiguration::max_cards_in_inline_ptr(G1HeapRegion::LogCardsPerRegion); + const JVMTypedFlagLimit* limit = JVMFlagLimit::get_range_at(FLAG_MEMBER_ENUM(G1RemSetArrayOfCardsEntries))->cast(); FLAG_SET_ERGO(G1RemSetArrayOfCardsEntries, MAX2(max_cards_in_inline_ptr * 2, - G1RemSetArrayOfCardsEntriesBase << region_size_log_mb)); + MIN2(G1RemSetArrayOfCardsEntriesBase << region_size_log_mb, limit->max()))); } // Howl card set container globals. diff --git a/src/hotspot/share/gc/g1/g1BlockOffsetTable.cpp b/src/hotspot/share/gc/g1/g1BlockOffsetTable.cpp index fd70796251d..7f0e5e86cd9 100644 --- a/src/hotspot/share/gc/g1/g1BlockOffsetTable.cpp +++ b/src/hotspot/share/gc/g1/g1BlockOffsetTable.cpp @@ -73,8 +73,8 @@ void G1BlockOffsetTable::set_offset_array(Atomic* left, Atomic #ifdef ASSERT void G1BlockOffsetTable::check_address(Atomic* addr, const char* msg) const { - Atomic* start_addr = const_cast*>(_offset_base + (uintptr_t(_reserved.start()) >> CardTable::card_shift())); - Atomic* end_addr = const_cast*>(_offset_base + (uintptr_t(_reserved.end()) >> CardTable::card_shift())); + Atomic* start_addr = _offset_base + (uintptr_t(_reserved.start()) >> CardTable::card_shift()); + Atomic* end_addr = _offset_base + (uintptr_t(_reserved.end()) >> CardTable::card_shift()); assert(addr >= start_addr && addr <= end_addr, "%s - offset address: " PTR_FORMAT ", start address: " PTR_FORMAT ", end address: " PTR_FORMAT, msg, (p2i(addr)), (p2i(start_addr)), (p2i(end_addr))); diff --git a/src/hotspot/share/gc/g1/g1BlockOffsetTable.inline.hpp b/src/hotspot/share/gc/g1/g1BlockOffsetTable.inline.hpp index b707e310781..1236d24bb03 100644 --- a/src/hotspot/share/gc/g1/g1BlockOffsetTable.inline.hpp +++ b/src/hotspot/share/gc/g1/g1BlockOffsetTable.inline.hpp @@ -54,7 +54,7 @@ uint8_t G1BlockOffsetTable::offset_array(Atomic* addr) const { inline Atomic* G1BlockOffsetTable::entry_for_addr(const void* const p) const { assert(_reserved.contains(p), "out of bounds access to block offset table"); - Atomic* result = const_cast*>(&_offset_base[uintptr_t(p) >> CardTable::card_shift()]); + Atomic* result = &_offset_base[uintptr_t(p) >> CardTable::card_shift()]; return result; } diff --git a/src/hotspot/share/gc/g1/g1CardSetMemory.cpp b/src/hotspot/share/gc/g1/g1CardSetMemory.cpp index 60602ef942b..0da2f90da3f 100644 --- a/src/hotspot/share/gc/g1/g1CardSetMemory.cpp +++ b/src/hotspot/share/gc/g1/g1CardSetMemory.cpp @@ -90,7 +90,7 @@ G1CardSetMemoryManager::~G1CardSetMemoryManager() { for (uint i = 0; i < num_mem_object_types(); i++) { _allocators[i].~G1CardSetAllocator(); } - FREE_C_HEAP_ARRAY(G1CardSetAllocator, _allocators); + FREE_C_HEAP_ARRAY(G1CardSetAllocator, _allocators); } void G1CardSetMemoryManager::free(uint type, void* value) { diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp index 3f5d674c443..2709e6b3008 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp @@ -34,7 +34,7 @@ #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectionSet.hpp" #include "gc/g1/g1CollectionSetCandidates.hpp" -#include "gc/g1/g1CollectorState.hpp" +#include "gc/g1/g1CollectorState.inline.hpp" #include "gc/g1/g1ConcurrentMarkThread.inline.hpp" #include "gc/g1/g1ConcurrentRefine.hpp" #include "gc/g1/g1ConcurrentRefineThread.hpp" @@ -42,7 +42,6 @@ #include "gc/g1/g1FullCollector.hpp" #include "gc/g1/g1GCCounters.hpp" #include "gc/g1/g1GCParPhaseTimesTracker.hpp" -#include "gc/g1/g1GCPauseType.hpp" #include "gc/g1/g1GCPhaseTimes.hpp" #include "gc/g1/g1HeapRegion.inline.hpp" #include "gc/g1/g1HeapRegionPrinter.hpp" @@ -1652,21 +1651,14 @@ jint G1CollectedHeap::initialize() { return JNI_OK; } -bool G1CollectedHeap::concurrent_mark_is_terminating() const { - assert(_cm != nullptr, "_cm must have been created"); - assert(_cm->is_fully_initialized(), "thread must exist in order to check if mark is terminating"); - return _cm->cm_thread()->should_terminate(); -} - void G1CollectedHeap::stop() { + assert_not_at_safepoint(); // Stop all concurrent threads. We do this to make sure these threads // do not continue to execute and access resources (e.g. logging) // that are destroyed during shutdown. _cr->stop(); _service_thread->stop(); - if (_cm->is_fully_initialized()) { - _cm->cm_thread()->stop(); - } + _cm->stop(); } void G1CollectedHeap::safepoint_synchronize_begin() { @@ -1857,12 +1849,12 @@ void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent, record_whole_heap_examined_timestamp(); } - // We need to clear the "in_progress" flag in the CM thread before + // We need to tell G1ConcurrentMark to update the state before // we wake up any waiters (especially when ExplicitInvokesConcurrent // is set) so that if a waiter requests another System.gc() it doesn't // incorrectly see that a marking cycle is still in progress. if (concurrent) { - _cm->cm_thread()->set_idle(); + _cm->notify_concurrent_cycle_completed(); } // Notify threads waiting in System.gc() (with ExplicitGCInvokesConcurrent) @@ -2489,7 +2481,7 @@ void G1CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) { void G1CollectedHeap::gc_prologue(bool full) { // Update common counters. increment_total_collections(full /* full gc */); - if (full || collector_state()->in_concurrent_start_gc()) { + if (full || collector_state()->is_in_concurrent_start_gc()) { increment_old_marking_cycles_started(); } } @@ -2562,14 +2554,12 @@ HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size, void G1CollectedHeap::start_concurrent_cycle(bool concurrent_operation_is_full_mark) { assert(_cm->is_fully_initialized(), "sanity"); - assert(!_cm->in_progress(), "Can not start concurrent operation while in progress"); + assert(!collector_state()->is_in_concurrent_cycle(), "Can not start concurrent cycle when already running"); MutexLocker x(G1CGC_lock, Mutex::_no_safepoint_check_flag); if (concurrent_operation_is_full_mark) { - _cm->post_concurrent_mark_start(); - _cm->cm_thread()->start_full_mark(); + _cm->start_full_concurrent_cycle(); } else { - _cm->post_concurrent_undo_start(); - _cm->cm_thread()->start_undo_mark(); + _cm->start_undo_concurrent_cycle(); } G1CGC_lock->notify(); } @@ -2661,7 +2651,7 @@ void G1CollectedHeap::verify_after_young_collection(G1HeapVerifier::G1VerifyType verify_numa_regions("GC End"); _verifier->verify_region_sets_optional(); - if (collector_state()->in_concurrent_start_gc()) { + if (collector_state()->is_in_concurrent_start_gc()) { log_debug(gc, verify)("Marking state"); _verifier->verify_marking_state(); } @@ -2742,7 +2732,7 @@ void G1CollectedHeap::do_collection_pause_at_safepoint(size_t allocation_word_si // Record whether this pause may need to trigger a concurrent operation. Later, // when we signal the G1ConcurrentMarkThread, the collector state has already // been reset for the next pause. - bool should_start_concurrent_mark_operation = collector_state()->in_concurrent_start_gc(); + bool should_start_concurrent_mark_operation = collector_state()->is_in_concurrent_start_gc(); // Perform the collection. G1YoungCollector collector(gc_cause(), allocation_word_size); @@ -2837,7 +2827,7 @@ bool G1STWSubjectToDiscoveryClosure::do_object_b(oop obj) { } void G1CollectedHeap::make_pending_list_reachable() { - if (collector_state()->in_concurrent_start_gc()) { + if (collector_state()->is_in_concurrent_start_gc()) { oop pll_head = Universe::reference_pending_list(); if (pll_head != nullptr) { // Any valid worker id is fine here as we are in the VM thread and single-threaded. @@ -3222,14 +3212,14 @@ void G1CollectedHeap::retire_gc_alloc_region(G1HeapRegion* alloc_region, _survivor.add_used_bytes(allocated_bytes); } - bool const during_im = collector_state()->in_concurrent_start_gc(); + bool const during_im = collector_state()->is_in_concurrent_start_gc(); if (during_im && allocated_bytes > 0) { _cm->add_root_region(alloc_region); } G1HeapRegionPrinter::retire(alloc_region); } -void G1CollectedHeap::mark_evac_failure_object(uint worker_id, const oop obj, size_t obj_size) const { +void G1CollectedHeap::mark_evac_failure_object(const oop obj) const { assert(!_cm->is_marked_in_bitmap(obj), "must be"); _cm->raw_mark_in_bitmap(obj); diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp index 8ff9d481000..3a47453819e 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp @@ -34,7 +34,6 @@ #include "gc/g1/g1ConcurrentMark.hpp" #include "gc/g1/g1EdenRegions.hpp" #include "gc/g1/g1EvacStats.hpp" -#include "gc/g1/g1GCPauseType.hpp" #include "gc/g1/g1HeapRegionAttr.hpp" #include "gc/g1/g1HeapRegionManager.hpp" #include "gc/g1/g1HeapRegionSet.hpp" @@ -915,9 +914,6 @@ public: // specified by the policy object. jint initialize() override; - // Returns whether concurrent mark threads (and the VM) are about to terminate. - bool concurrent_mark_is_terminating() const; - void safepoint_synchronize_begin() override; void safepoint_synchronize_end() override; @@ -1279,7 +1275,7 @@ public: inline bool is_obj_dead_full(const oop obj) const; // Mark the live object that failed evacuation in the bitmap. - void mark_evac_failure_object(uint worker_id, oop obj, size_t obj_size) const; + void mark_evac_failure_object(oop obj) const; G1ConcurrentMark* concurrent_mark() const { return _cm; } diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp index 8782b65b6f9..bad9ac18eec 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp @@ -28,7 +28,6 @@ #include "gc/g1/g1CollectedHeap.hpp" #include "gc/g1/g1BarrierSet.hpp" -#include "gc/g1/g1CollectorState.hpp" #include "gc/g1/g1ConcurrentMark.inline.hpp" #include "gc/g1/g1EvacFailureRegions.hpp" #include "gc/g1/g1EvacStats.inline.hpp" @@ -47,9 +46,9 @@ #include "utilities/bitMap.inline.hpp" inline bool G1STWIsAliveClosure::do_object_b(oop p) { - // An object is reachable if it is outside the collection set, - // or is inside and copied. - return !_g1h->is_in_cset(p) || p->is_forwarded(); + // An object is reachable if it is outside the collection set and not a + // humongous candidate, or is inside and copied. + return !_g1h->is_in_cset_or_humongous_candidate(p) || p->is_forwarded(); } inline JavaThread* const* G1JavaThreadsListClaimer::claim(uint& count) { diff --git a/src/hotspot/share/gc/g1/g1CollectionSet.cpp b/src/hotspot/share/gc/g1/g1CollectionSet.cpp index abfddf860e6..b3bcf6094ab 100644 --- a/src/hotspot/share/gc/g1/g1CollectionSet.cpp +++ b/src/hotspot/share/gc/g1/g1CollectionSet.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,7 +26,7 @@ #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectionSet.inline.hpp" #include "gc/g1/g1CollectionSetCandidates.inline.hpp" -#include "gc/g1/g1CollectorState.hpp" +#include "gc/g1/g1CollectorState.inline.hpp" #include "gc/g1/g1HeapRegion.inline.hpp" #include "gc/g1/g1HeapRegionRemSet.inline.hpp" #include "gc/g1/g1HeapRegionSet.hpp" @@ -174,7 +174,6 @@ void G1CollectionSet::iterate(G1HeapRegionClosure* cl) const { G1HeapRegion* r = _g1h->region_at(_regions[i]); bool result = cl->do_heap_region(r); if (result) { - cl->set_incomplete(); return; } } @@ -326,7 +325,7 @@ double G1CollectionSet::finalize_young_part(double target_pause_time_ms, G1Survi guarantee(target_pause_time_ms > 0.0, "target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms); - bool in_young_only_phase = _policy->collector_state()->in_young_only_phase(); + bool in_young_only_phase = _policy->collector_state()->is_in_young_only_phase(); size_t pending_cards = _policy->analytics()->predict_pending_cards(in_young_only_phase); log_trace(gc, ergo, cset)("Start choosing CSet. Pending cards: %zu target pause time: %1.2fms", @@ -379,7 +378,7 @@ void G1CollectionSet::finalize_old_part(double time_remaining_ms) { if (!candidates()->is_empty()) { candidates()->verify(); - if (collector_state()->in_mixed_phase()) { + if (collector_state()->is_in_mixed_phase()) { time_remaining_ms = select_candidates_from_marking(time_remaining_ms); } else { log_debug(gc, ergo, cset)("Do not add marking candidates to collection set due to pause type."); diff --git a/src/hotspot/share/gc/g1/g1CollectionSetCandidates.cpp b/src/hotspot/share/gc/g1/g1CollectionSetCandidates.cpp index d71108d4d0e..2113db1163b 100644 --- a/src/hotspot/share/gc/g1/g1CollectionSetCandidates.cpp +++ b/src/hotspot/share/gc/g1/g1CollectionSetCandidates.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,7 +23,6 @@ */ #include "gc/g1/g1CollectionSetCandidates.inline.hpp" -#include "gc/g1/g1CollectionSetChooser.hpp" #include "gc/g1/g1HeapRegion.inline.hpp" #include "utilities/growableArray.hpp" @@ -250,8 +249,9 @@ void G1CollectionSetCandidates::sort_marking_by_efficiency() { _from_marking_groups.verify(); } -void G1CollectionSetCandidates::set_candidates_from_marking(G1HeapRegion** candidates, - uint num_candidates) { +void G1CollectionSetCandidates::set_candidates_from_marking(GrowableArrayCHeap* candidates) { + uint num_candidates = candidates->length(); + if (num_candidates == 0) { log_debug(gc, ergo, cset) ("No regions selected from marking."); return; @@ -264,7 +264,7 @@ void G1CollectionSetCandidates::set_candidates_from_marking(G1HeapRegion** candi // During each Mixed GC, we must collect at least G1Policy::calc_min_old_cset_length regions to meet // the G1MixedGCCountTarget. For the first collection in a Mixed GC cycle, we can add all regions // required to meet this threshold to the same remset group. We are certain these will be collected in - // the same MixedGC. + // the same Mixed GC. uint group_limit = p->calc_min_old_cset_length(num_candidates); G1CSetCandidateGroup::reset_next_group_id(); @@ -273,7 +273,7 @@ void G1CollectionSetCandidates::set_candidates_from_marking(G1HeapRegion** candi current = new G1CSetCandidateGroup(); for (uint i = 0; i < num_candidates; i++) { - G1HeapRegion* r = candidates[i]; + G1HeapRegion* r = candidates->at(i); assert(!contains(r), "must not contain region %u", r->hrm_index()); _contains_map[r->hrm_index()] = CandidateOrigin::Marking; diff --git a/src/hotspot/share/gc/g1/g1CollectionSetCandidates.hpp b/src/hotspot/share/gc/g1/g1CollectionSetCandidates.hpp index 7e882de7e5a..8a2235cf89c 100644 --- a/src/hotspot/share/gc/g1/g1CollectionSetCandidates.hpp +++ b/src/hotspot/share/gc/g1/g1CollectionSetCandidates.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -245,8 +245,7 @@ public: // Merge collection set candidates from marking into the current marking candidates // (which needs to be empty). - void set_candidates_from_marking(G1HeapRegion** candidates, - uint num_candidates); + void set_candidates_from_marking(GrowableArrayCHeap* selected); // The most recent length of the list that had been merged last via // set_candidates_from_marking(). Used for calculating minimum collection set // regions. diff --git a/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp b/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp deleted file mode 100644 index e7bab32129e..00000000000 --- a/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp +++ /dev/null @@ -1,296 +0,0 @@ -/* - * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "gc/g1/g1CollectedHeap.inline.hpp" -#include "gc/g1/g1CollectionSetCandidates.hpp" -#include "gc/g1/g1CollectionSetChooser.hpp" -#include "gc/g1/g1HeapRegionRemSet.inline.hpp" -#include "gc/shared/space.hpp" -#include "runtime/atomic.hpp" -#include "utilities/quickSort.hpp" - -// Determine collection set candidates (from marking): For all regions determine -// whether they should be a collection set candidate. Calculate their efficiency, -// sort, and put them into the collection set candidates. -// -// Threads calculate the GC efficiency of the regions they get to process, and -// put them into some work area without sorting. At the end that array is sorted and -// moved to the destination. -class G1BuildCandidateRegionsTask : public WorkerTask { - // Work area for building the set of collection set candidates. Contains references - // to heap regions with their GC efficiencies calculated. To reduce contention - // on claiming array elements, worker threads claim parts of this array in chunks; - // Array elements may be null as threads might not get enough regions to fill - // up their chunks completely. - // Final sorting will remove them. - class G1BuildCandidateArray : public StackObj { - uint const _max_size; - uint const _chunk_size; - - G1HeapRegion** _data; - - Atomic _cur_claim_idx; - - static int compare_region_gc_efficiency(G1HeapRegion** rr1, G1HeapRegion** rr2) { - G1HeapRegion* r1 = *rr1; - G1HeapRegion* r2 = *rr2; - // Make sure that null entries are moved to the end. - if (r1 == nullptr) { - if (r2 == nullptr) { - return 0; - } else { - return 1; - } - } else if (r2 == nullptr) { - return -1; - } - - G1Policy* p = G1CollectedHeap::heap()->policy(); - double gc_efficiency1 = p->predict_gc_efficiency(r1); - double gc_efficiency2 = p->predict_gc_efficiency(r2); - - if (gc_efficiency1 > gc_efficiency2) { - return -1; - } else if (gc_efficiency1 < gc_efficiency2) { - return 1; - } else { - return 0; - } - } - - // Calculates the maximum array size that will be used. - static uint required_array_size(uint num_regions, uint chunk_size, uint num_workers) { - uint const max_waste = num_workers * chunk_size; - // The array should be aligned with respect to chunk_size. - uint const aligned_num_regions = ((num_regions + chunk_size - 1) / chunk_size) * chunk_size; - - return aligned_num_regions + max_waste; - } - - public: - G1BuildCandidateArray(uint max_num_regions, uint chunk_size, uint num_workers) : - _max_size(required_array_size(max_num_regions, chunk_size, num_workers)), - _chunk_size(chunk_size), - _data(NEW_C_HEAP_ARRAY(G1HeapRegion*, _max_size, mtGC)), - _cur_claim_idx(0) { - for (uint i = 0; i < _max_size; i++) { - _data[i] = nullptr; - } - } - - ~G1BuildCandidateArray() { - FREE_C_HEAP_ARRAY(G1HeapRegion*, _data); - } - - // Claim a new chunk, returning its bounds [from, to[. - void claim_chunk(uint& from, uint& to) { - uint result = _cur_claim_idx.add_then_fetch(_chunk_size); - assert(_max_size > result - 1, - "Array too small, is %u should be %u with chunk size %u.", - _max_size, result, _chunk_size); - from = result - _chunk_size; - to = result; - } - - // Set element in array. - void set(uint idx, G1HeapRegion* hr) { - assert(idx < _max_size, "Index %u out of bounds %u", idx, _max_size); - assert(_data[idx] == nullptr, "Value must not have been set."); - _data[idx] = hr; - } - - void sort_by_gc_efficiency() { - uint length = _cur_claim_idx.load_relaxed(); - if (length == 0) { - return; - } - for (uint i = length; i < _max_size; i++) { - assert(_data[i] == nullptr, "must be"); - } - qsort(_data, length, sizeof(_data[0]), (_sort_Fn)compare_region_gc_efficiency); - for (uint i = length; i < _max_size; i++) { - assert(_data[i] == nullptr, "must be"); - } - } - - G1HeapRegion** array() const { return _data; } - }; - - // Per-region closure. In addition to determining whether a region should be - // added to the candidates, and calculating those regions' gc efficiencies, also - // gather additional statistics. - class G1BuildCandidateRegionsClosure : public G1HeapRegionClosure { - G1BuildCandidateArray* _array; - - uint _cur_chunk_idx; - uint _cur_chunk_end; - - uint _regions_added; - - void add_region(G1HeapRegion* hr) { - if (_cur_chunk_idx == _cur_chunk_end) { - _array->claim_chunk(_cur_chunk_idx, _cur_chunk_end); - } - assert(_cur_chunk_idx < _cur_chunk_end, "Must be"); - - _array->set(_cur_chunk_idx, hr); - _cur_chunk_idx++; - - _regions_added++; - } - - public: - G1BuildCandidateRegionsClosure(G1BuildCandidateArray* array) : - _array(array), - _cur_chunk_idx(0), - _cur_chunk_end(0), - _regions_added(0) { } - - bool do_heap_region(G1HeapRegion* r) { - // Candidates from marking are always old; also keep regions that are already - // collection set candidates (some retained regions) in that list. - if (!r->is_old() || r->is_collection_set_candidate()) { - // Keep remembered sets and everything for these regions. - return false; - } - - // Can not add a region without a remembered set to the candidates. - if (!r->rem_set()->is_tracked()) { - return false; - } - - // Skip any region that is currently used as an old GC alloc region. We should - // not consider those for collection before we fill them up as the effective - // gain from them is small. I.e. we only actually reclaim from the filled part, - // as the remainder is still eligible for allocation. These objects are also - // likely to have already survived a few collections, so they might be longer - // lived anyway. - // Otherwise the Old region must satisfy the liveness condition. - bool should_add = !G1CollectedHeap::heap()->is_old_gc_alloc_region(r) && - G1CollectionSetChooser::region_occupancy_low_enough_for_evac(r->live_bytes()); - if (should_add) { - add_region(r); - } else { - r->rem_set()->clear(true /* only_cardset */); - } - return false; - } - - uint regions_added() const { return _regions_added; } - }; - - G1CollectedHeap* _g1h; - G1HeapRegionClaimer _hrclaimer; - - Atomic _num_regions_added; - - G1BuildCandidateArray _result; - - void update_totals(uint num_regions) { - if (num_regions > 0) { - _num_regions_added.add_then_fetch(num_regions); - } - } - - // Early prune (remove) regions meeting the G1HeapWastePercent criteria. That - // is, either until only the minimum amount of old collection set regions are - // available (for forward progress in evacuation) or the waste accumulated by the - // removed regions is above the maximum allowed waste. - // Updates number of candidates and reclaimable bytes given. - void prune(G1HeapRegion** data) { - G1Policy* p = G1CollectedHeap::heap()->policy(); - - uint num_candidates = _num_regions_added.load_relaxed(); - - uint min_old_cset_length = p->calc_min_old_cset_length(num_candidates); - uint num_pruned = 0; - size_t wasted_bytes = 0; - - if (min_old_cset_length >= num_candidates) { - // We take all of the candidate regions to provide some forward progress. - return; - } - - size_t allowed_waste = p->allowed_waste_in_collection_set(); - uint max_to_prune = num_candidates - min_old_cset_length; - - while (true) { - G1HeapRegion* r = data[num_candidates - num_pruned - 1]; - size_t const reclaimable = r->reclaimable_bytes(); - if (num_pruned >= max_to_prune || - wasted_bytes + reclaimable > allowed_waste) { - break; - } - r->rem_set()->clear(true /* cardset_only */); - - wasted_bytes += reclaimable; - num_pruned++; - } - - log_debug(gc, ergo, cset)("Pruned %u regions out of %u, leaving %zu bytes waste (allowed %zu)", - num_pruned, - num_candidates, - wasted_bytes, - allowed_waste); - - _num_regions_added.sub_then_fetch(num_pruned, memory_order_relaxed); - } - -public: - G1BuildCandidateRegionsTask(uint max_num_regions, uint chunk_size, uint num_workers) : - WorkerTask("G1 Build Candidate Regions"), - _g1h(G1CollectedHeap::heap()), - _hrclaimer(num_workers), - _num_regions_added(0), - _result(max_num_regions, chunk_size, num_workers) { } - - void work(uint worker_id) { - G1BuildCandidateRegionsClosure cl(&_result); - _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id); - update_totals(cl.regions_added()); - } - - void sort_and_prune_into(G1CollectionSetCandidates* candidates) { - _result.sort_by_gc_efficiency(); - prune(_result.array()); - candidates->set_candidates_from_marking(_result.array(), - _num_regions_added.load_relaxed()); - } -}; - -uint G1CollectionSetChooser::calculate_work_chunk_size(uint num_workers, uint num_regions) { - assert(num_workers > 0, "Active gc workers should be greater than 0"); - return MAX2(num_regions / num_workers, 1U); -} - -void G1CollectionSetChooser::build(WorkerThreads* workers, uint max_num_regions, G1CollectionSetCandidates* candidates) { - uint num_workers = workers->active_workers(); - uint chunk_size = calculate_work_chunk_size(num_workers, max_num_regions); - - G1BuildCandidateRegionsTask cl(max_num_regions, chunk_size, num_workers); - workers->run_task(&cl, num_workers); - - cl.sort_and_prune_into(candidates); - candidates->verify(); -} diff --git a/src/hotspot/share/gc/g1/g1CollectionSetChooser.hpp b/src/hotspot/share/gc/g1/g1CollectionSetChooser.hpp deleted file mode 100644 index 4db8ed23c49..00000000000 --- a/src/hotspot/share/gc/g1/g1CollectionSetChooser.hpp +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_GC_G1_G1COLLECTIONSETCHOOSER_HPP -#define SHARE_GC_G1_G1COLLECTIONSETCHOOSER_HPP - -#include "gc/g1/g1HeapRegion.hpp" -#include "gc/shared/gc_globals.hpp" -#include "memory/allStatic.hpp" -#include "runtime/globals.hpp" - -class G1CollectionSetCandidates; -class WorkerThreads; - -// Helper class to calculate collection set candidates, and containing some related -// methods. -class G1CollectionSetChooser : public AllStatic { - static uint calculate_work_chunk_size(uint num_workers, uint num_regions); - - static size_t mixed_gc_live_threshold_bytes() { - return G1HeapRegion::GrainBytes * (size_t)G1MixedGCLiveThresholdPercent / 100; - } - -public: - static bool region_occupancy_low_enough_for_evac(size_t live_bytes) { - return live_bytes < mixed_gc_live_threshold_bytes(); - } - - // Build and return set of collection set candidates sorted by decreasing gc - // efficiency. - static void build(WorkerThreads* workers, uint max_num_regions, G1CollectionSetCandidates* candidates); -}; - -#endif // SHARE_GC_G1_G1COLLECTIONSETCHOOSER_HPP diff --git a/src/hotspot/share/gc/g1/g1CollectorState.cpp b/src/hotspot/share/gc/g1/g1CollectorState.cpp index d41ee22fdce..76de9c65cc8 100644 --- a/src/hotspot/share/gc/g1/g1CollectorState.cpp +++ b/src/hotspot/share/gc/g1/g1CollectorState.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,25 +22,32 @@ * */ -#include "gc/g1/g1CollectorState.hpp" -#include "gc/g1/g1GCPauseType.hpp" +#include "gc/g1/g1CollectorState.inline.hpp" +#include "runtime/safepoint.hpp" +#include "utilities/debug.hpp" -G1GCPauseType G1CollectorState::young_gc_pause_type(bool concurrent_operation_is_full_mark) const { - assert(!in_full_gc(), "must be"); - if (in_concurrent_start_gc()) { - assert(!in_young_gc_before_mixed(), "must be"); - return concurrent_operation_is_full_mark ? G1GCPauseType::ConcurrentStartMarkGC : - G1GCPauseType::ConcurrentStartUndoGC; - } else if (in_young_gc_before_mixed()) { - assert(!in_concurrent_start_gc(), "must be"); - return G1GCPauseType::LastYoungGC; - } else if (in_mixed_phase()) { - assert(!in_concurrent_start_gc(), "must be"); - assert(!in_young_gc_before_mixed(), "must be"); - return G1GCPauseType::MixedGC; - } else { - assert(!in_concurrent_start_gc(), "must be"); - assert(!in_young_gc_before_mixed(), "must be"); - return G1GCPauseType::YoungGC; +G1CollectorState::Pause G1CollectorState::gc_pause_type(bool concurrent_operation_is_full_mark) const { + assert(SafepointSynchronize::is_at_safepoint(), "must be"); + switch (_phase) { + case Phase::YoungNormal: return Pause::Normal; + case Phase::YoungConcurrentStart: + return concurrent_operation_is_full_mark ? Pause::ConcurrentStartFull : + Pause::ConcurrentStartUndo; + case Phase::YoungPrepareMixed: return Pause::PrepareMixed; + case Phase::Mixed: return Pause::Mixed; + case Phase::FullGC: return Pause::Full; + default: ShouldNotReachHere(); } } + +const char* G1CollectorState::to_string(Pause type) { + static const char* pause_strings[] = { "Normal", + "Concurrent Start", // Do not distinguish between the different + "Concurrent Start", // Concurrent Start pauses. + "Prepare Mixed", + "Cleanup", + "Remark", + "Mixed", + "Full" }; + return pause_strings[static_cast(type)]; +} diff --git a/src/hotspot/share/gc/g1/g1CollectorState.hpp b/src/hotspot/share/gc/g1/g1CollectorState.hpp index fca30792344..42aaeab03b2 100644 --- a/src/hotspot/share/gc/g1/g1CollectorState.hpp +++ b/src/hotspot/share/gc/g1/g1CollectorState.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,98 +25,100 @@ #ifndef SHARE_GC_G1_G1COLLECTORSTATE_HPP #define SHARE_GC_G1_G1COLLECTORSTATE_HPP -#include "gc/g1/g1GCPauseType.hpp" +#include "utilities/debug.hpp" +#include "utilities/enumIterator.hpp" #include "utilities/globalDefinitions.hpp" // State of the G1 collection. +// +// The rough phasing is Young-Only, Mixed / Space Reclamation and +// Full GC "phase". +// +// We split the Young-only phase into three parts to cover interesting +// sub-phases and avoid separate tracking. class G1CollectorState { - // Indicates whether we are in the phase where we do partial gcs that only contain - // the young generation. Not set while _in_full_gc is set. - bool _in_young_only_phase; + enum class Phase { + // Indicates that the next GC in the Young-Only phase will (likely) be a "Normal" + // young GC. + YoungNormal, + // We are in a concurrent start GC during the Young-Only phase. This is only set + // during that GC because we only decide whether we do this type of GC at the start + // of the pause. + YoungConcurrentStart, + // Indicates that we are about to start or in the prepare mixed gc in the Young-Only + // phase before the Mixed phase. This GC is required to keep pause time requirements. + YoungPrepareMixed, + // Doing extra old generation evacuation. + Mixed, + // The Full GC phase (that coincides with the Full GC pause). + FullGC + } _phase; - // Indicates whether we are in the last young gc before the mixed gc phase. This GC - // is required to keep pause time requirements. - bool _in_young_gc_before_mixed; - - // If _initiate_conc_mark_if_possible is set at the beginning of a - // pause, it is a suggestion that the pause should start a marking - // cycle by doing the concurrent start work. However, it is possible - // that the concurrent marking thread is still finishing up the - // previous marking cycle (e.g., clearing the marking bitmap). - // If that is the case we cannot start a new cycle and - // we'll have to wait for the concurrent marking thread to finish - // what it is doing. In this case we will postpone the marking cycle - // initiation decision for the next pause. When we eventually decide - // to start a cycle, we will set _in_concurrent_start_gc which - // will stay true until the end of the concurrent start pause doing the - // concurrent start work. - volatile bool _in_concurrent_start_gc; - - // At the end of a pause we check the heap occupancy and we decide - // whether we will start a marking cycle during the next pause. If - // we decide that we want to do that, set this parameter. This parameter will - // stay set until the beginning of a subsequent pause (not necessarily - // the next one) when we decide that we will indeed start a marking cycle and - // do the concurrent start phase work. + // _initiate_conc_mark_if_possible indicates that there has been a request to start + // a concurrent cycle but we have not been able to fulfill it because another one + // has been in progress when the request came in. + // + // This flag remembers that there is an unfullfilled request. volatile bool _initiate_conc_mark_if_possible; - // Marking is in progress. Set from start of the concurrent start pause to the - // end of the Remark pause. - bool _mark_in_progress; - // Marking or rebuilding remembered set work is in progress. Set from the end - // of the concurrent start pause to the end of the Cleanup pause. - bool _mark_or_rebuild_in_progress; - - // The marking bitmap is currently being cleared or about to be cleared. - bool _clear_bitmap_in_progress; - - // Set during a full gc pause. - bool _in_full_gc; - public: G1CollectorState() : - _in_young_only_phase(true), - _in_young_gc_before_mixed(false), - - _in_concurrent_start_gc(false), - _initiate_conc_mark_if_possible(false), - - _mark_in_progress(false), - _mark_or_rebuild_in_progress(false), - _clear_bitmap_in_progress(false), - _in_full_gc(false) { } + _phase(Phase::YoungNormal), + _initiate_conc_mark_if_possible(false) { } // Phase setters - void set_in_young_only_phase(bool v) { _in_young_only_phase = v; } + inline void set_in_normal_young_gc(); + inline void set_in_space_reclamation_phase(); + inline void set_in_full_gc(); - // Pause setters - void set_in_young_gc_before_mixed(bool v) { _in_young_gc_before_mixed = v; } - void set_in_concurrent_start_gc(bool v) { _in_concurrent_start_gc = v; } - void set_in_full_gc(bool v) { _in_full_gc = v; } + inline void set_in_concurrent_start_gc(); + inline void set_in_prepare_mixed_gc(); - void set_initiate_conc_mark_if_possible(bool v) { _initiate_conc_mark_if_possible = v; } - - void set_mark_in_progress(bool v) { _mark_in_progress = v; } - void set_mark_or_rebuild_in_progress(bool v) { _mark_or_rebuild_in_progress = v; } - void set_clear_bitmap_in_progress(bool v) { _clear_bitmap_in_progress = v; } + inline void set_initiate_conc_mark_if_possible(bool v); // Phase getters - bool in_young_only_phase() const { return _in_young_only_phase && !_in_full_gc; } - bool in_mixed_phase() const { return !_in_young_only_phase && !_in_full_gc; } + inline bool is_in_young_only_phase() const; + inline bool is_in_mixed_phase() const; // Specific pauses - bool in_young_gc_before_mixed() const { return _in_young_gc_before_mixed; } - bool in_full_gc() const { return _in_full_gc; } - bool in_concurrent_start_gc() const { return _in_concurrent_start_gc; } + inline bool is_in_concurrent_start_gc() const; + inline bool is_in_prepare_mixed_gc() const; + inline bool is_in_full_gc() const; - bool initiate_conc_mark_if_possible() const { return _initiate_conc_mark_if_possible; } + inline bool initiate_conc_mark_if_possible() const; - bool mark_in_progress() const { return _mark_in_progress; } - bool mark_or_rebuild_in_progress() const { return _mark_or_rebuild_in_progress; } - bool clear_bitmap_in_progress() const { return _clear_bitmap_in_progress; } + bool is_in_concurrent_cycle() const; + bool is_in_marking() const; + bool is_in_mark_or_rebuild() const; + bool is_in_reset_for_next_cycle() const; + + enum class Pause : uint { + Normal, + ConcurrentStartFull, + ConcurrentStartUndo, + PrepareMixed, + Cleanup, + Remark, + Mixed, + Full + }; // Calculate GC Pause Type from internal state. - G1GCPauseType young_gc_pause_type(bool concurrent_operation_is_full_mark) const; + Pause gc_pause_type(bool concurrent_operation_is_full_mark) const; + + static const char* to_string(Pause type); + + // Pause kind queries + inline static void assert_is_young_pause(Pause type); + + inline static bool is_young_only_pause(Pause type); + inline static bool is_concurrent_start_pause(Pause type); + inline static bool is_prepare_mixed_pause(Pause type); + inline static bool is_mixed_pause(Pause type); + + inline static bool is_concurrent_cycle_pause(Pause type); }; +ENUMERATOR_RANGE(G1CollectorState::Pause, G1CollectorState::Pause::Normal, G1CollectorState::Pause::Full) + #endif // SHARE_GC_G1_G1COLLECTORSTATE_HPP diff --git a/src/hotspot/share/gc/g1/g1CollectorState.inline.hpp b/src/hotspot/share/gc/g1/g1CollectorState.inline.hpp new file mode 100644 index 00000000000..0c6c9c879c3 --- /dev/null +++ b/src/hotspot/share/gc/g1/g1CollectorState.inline.hpp @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_G1_G1COLLECTORSTATE_INLINE_HPP +#define SHARE_GC_G1_G1COLLECTORSTATE_INLINE_HPP + +#include "gc/g1/g1CollectorState.hpp" + +#include "gc/g1/g1CollectedHeap.hpp" +#include "gc/g1/g1ConcurrentMark.inline.hpp" + +inline void G1CollectorState::set_in_normal_young_gc() { + _phase = Phase::YoungNormal; +} +inline void G1CollectorState::set_in_space_reclamation_phase() { + _phase = Phase::Mixed; +} +inline void G1CollectorState::set_in_full_gc() { + _phase = Phase::FullGC; +} + +inline void G1CollectorState::set_in_concurrent_start_gc() { + _phase = Phase::YoungConcurrentStart; + _initiate_conc_mark_if_possible = false; +} +inline void G1CollectorState::set_in_prepare_mixed_gc() { + _phase = Phase::YoungPrepareMixed; +} + +inline void G1CollectorState::set_initiate_conc_mark_if_possible(bool v) { + _initiate_conc_mark_if_possible = v; +} + +inline bool G1CollectorState::is_in_young_only_phase() const { + return _phase == Phase::YoungNormal || + _phase == Phase::YoungConcurrentStart || + _phase == Phase::YoungPrepareMixed; +} +inline bool G1CollectorState::is_in_mixed_phase() const { + return _phase == Phase::Mixed; +} + +inline bool G1CollectorState::is_in_prepare_mixed_gc() const { + return _phase == Phase::YoungPrepareMixed; +} +inline bool G1CollectorState::is_in_full_gc() const { + return _phase == Phase::FullGC; +} +inline bool G1CollectorState::is_in_concurrent_start_gc() const { + return _phase == Phase::YoungConcurrentStart; +} + +inline bool G1CollectorState::initiate_conc_mark_if_possible() const { + return _initiate_conc_mark_if_possible; +} + +inline bool G1CollectorState::is_in_concurrent_cycle() const { + G1ConcurrentMark* cm = G1CollectedHeap::heap()->concurrent_mark(); + return cm->is_in_concurrent_cycle(); +} +inline bool G1CollectorState::is_in_marking() const { + G1ConcurrentMark* cm = G1CollectedHeap::heap()->concurrent_mark(); + return cm->is_in_marking(); +} +inline bool G1CollectorState::is_in_mark_or_rebuild() const { + G1ConcurrentMark* cm = G1CollectedHeap::heap()->concurrent_mark(); + return is_in_marking() || cm->is_in_rebuild_or_scrub(); +} +inline bool G1CollectorState::is_in_reset_for_next_cycle() const { + G1ConcurrentMark* cm = G1CollectedHeap::heap()->concurrent_mark(); + return cm->is_in_reset_for_next_cycle(); +} + +inline void G1CollectorState::assert_is_young_pause(Pause type) { + assert(type != Pause::Full, "must be"); + assert(type != Pause::Remark, "must be"); + assert(type != Pause::Cleanup, "must be"); +} + +inline bool G1CollectorState::is_young_only_pause(Pause type) { + assert_is_young_pause(type); + return type == Pause::ConcurrentStartUndo || + type == Pause::ConcurrentStartFull || + type == Pause::PrepareMixed || + type == Pause::Normal; +} + +inline bool G1CollectorState::is_mixed_pause(Pause type) { + assert_is_young_pause(type); + return type == Pause::Mixed; +} + +inline bool G1CollectorState::is_prepare_mixed_pause(Pause type) { + assert_is_young_pause(type); + return type == Pause::PrepareMixed; +} + +inline bool G1CollectorState::is_concurrent_start_pause(Pause type) { + assert_is_young_pause(type); + return type == Pause::ConcurrentStartFull || type == Pause::ConcurrentStartUndo; +} + +inline bool G1CollectorState::is_concurrent_cycle_pause(Pause type) { + return type == Pause::Cleanup || type == Pause::Remark; +} + +#endif // SHARE_GC_G1_G1COLLECTORSTATE_INLINE_HPP diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp index ec5649f4fe2..dbb5ba509a2 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp @@ -30,8 +30,7 @@ #include "gc/g1/g1CardSetMemory.hpp" #include "gc/g1/g1CardTableClaimTable.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" -#include "gc/g1/g1CollectionSetChooser.hpp" -#include "gc/g1/g1CollectorState.hpp" +#include "gc/g1/g1CollectorState.inline.hpp" #include "gc/g1/g1ConcurrentMark.inline.hpp" #include "gc/g1/g1ConcurrentMarkRemarkTasks.hpp" #include "gc/g1/g1ConcurrentMarkThread.inline.hpp" @@ -373,63 +372,56 @@ void G1CMMarkStack::set_empty() { G1CMRootMemRegions::G1CMRootMemRegions(uint const max_regions) : _root_regions(MemRegion::create_array(max_regions, mtGC)), _max_regions(max_regions), - _num_root_regions(0), - _claimed_root_regions(0), - _scan_in_progress(false), - _should_abort(false) { } + _num_regions(0), + _num_claimed_regions(0) { } G1CMRootMemRegions::~G1CMRootMemRegions() { MemRegion::destroy_array(_root_regions, _max_regions); } void G1CMRootMemRegions::reset() { - _num_root_regions.store_relaxed(0); + assert_at_safepoint(); + assert(G1CollectedHeap::heap()->collector_state()->is_in_concurrent_start_gc(), "must be"); + + _num_regions.store_relaxed(0); + _num_claimed_regions.store_relaxed(0); } void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) { assert_at_safepoint(); - size_t idx = _num_root_regions.fetch_then_add(1u); - assert(idx < _max_regions, "Trying to add more root MemRegions than there is space %zu", _max_regions); + uint idx = _num_regions.fetch_then_add(1u); + assert(idx < _max_regions, "Trying to add more root MemRegions than there is space %u", _max_regions); assert(start != nullptr && end != nullptr && start <= end, "Start (" PTR_FORMAT ") should be less or equal to " "end (" PTR_FORMAT ")", p2i(start), p2i(end)); _root_regions[idx].set_start(start); _root_regions[idx].set_end(end); } -void G1CMRootMemRegions::prepare_for_scan() { - assert(!scan_in_progress(), "pre-condition"); - - _scan_in_progress.store_relaxed(num_root_regions() > 0); - - _claimed_root_regions.store_relaxed(0); - _should_abort.store_relaxed(false); -} - const MemRegion* G1CMRootMemRegions::claim_next() { - if (_should_abort.load_relaxed()) { - // If someone has set the should_abort flag, we return null to - // force the caller to bail out of their loop. + uint local_num_regions = num_regions(); + if (num_claimed_regions() >= local_num_regions) { return nullptr; } - uint local_num_root_regions = num_root_regions(); - if (_claimed_root_regions.load_relaxed() >= local_num_root_regions) { - return nullptr; - } - - size_t claimed_index = _claimed_root_regions.fetch_then_add(1u); - if (claimed_index < local_num_root_regions) { + uint claimed_index = _num_claimed_regions.fetch_then_add(1u); + if (claimed_index < local_num_regions) { return &_root_regions[claimed_index]; } return nullptr; } -uint G1CMRootMemRegions::num_root_regions() const { - return (uint)_num_root_regions.load_relaxed(); +bool G1CMRootMemRegions::work_completed() const { + return num_remaining_regions() == 0; +} + +uint G1CMRootMemRegions::num_remaining_regions() const { + uint total = num_regions(); + uint claimed = num_claimed_regions(); + return (total > claimed) ? total - claimed : 0; } bool G1CMRootMemRegions::contains(const MemRegion mr) const { - uint local_num_root_regions = num_root_regions(); + uint local_num_root_regions = num_regions(); for (uint i = 0; i < local_num_root_regions; i++) { if (_root_regions[i].equals(mr)) { return true; @@ -438,42 +430,6 @@ bool G1CMRootMemRegions::contains(const MemRegion mr) const { return false; } -void G1CMRootMemRegions::notify_scan_done() { - MutexLocker x(G1RootRegionScan_lock, Mutex::_no_safepoint_check_flag); - _scan_in_progress.store_relaxed(false); - G1RootRegionScan_lock->notify_all(); -} - -void G1CMRootMemRegions::cancel_scan() { - notify_scan_done(); -} - -void G1CMRootMemRegions::scan_finished() { - assert(scan_in_progress(), "pre-condition"); - - if (!_should_abort.load_relaxed()) { - assert(_claimed_root_regions.load_relaxed() >= num_root_regions(), - "we should have claimed all root regions, claimed %zu, length = %u", - _claimed_root_regions.load_relaxed(), num_root_regions()); - } - - notify_scan_done(); -} - -bool G1CMRootMemRegions::wait_until_scan_finished() { - if (!scan_in_progress()) { - return false; - } - - { - MonitorLocker ml(G1RootRegionScan_lock, Mutex::_no_safepoint_check_flag); - while (scan_in_progress()) { - ml.wait(); - } - } - return true; -} - G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* bitmap_storage) : _cm_thread(nullptr), @@ -484,6 +440,7 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, _heap(_g1h->reserved()), _root_regions(_g1h->max_num_regions()), + _root_region_scan_aborted(false), _global_mark_stack(), @@ -573,14 +530,31 @@ void G1ConcurrentMark::fully_initialize() { reset_at_marking_complete(); } -bool G1ConcurrentMark::in_progress() const { - return is_fully_initialized() ? _cm_thread->in_progress() : false; +bool G1ConcurrentMark::is_in_concurrent_cycle() const { + return is_fully_initialized() ? _cm_thread->is_in_progress() : false; +} + +bool G1ConcurrentMark::is_in_marking() const { + return is_fully_initialized() ? cm_thread()->is_in_marking() : false; +} + +bool G1ConcurrentMark::is_in_rebuild_or_scrub() const { + return cm_thread()->is_in_rebuild_or_scrub(); +} + +bool G1ConcurrentMark::is_in_reset_for_next_cycle() const { + return cm_thread()->is_in_reset_for_next_cycle(); } PartialArrayStateManager* G1ConcurrentMark::partial_array_state_manager() const { return _partial_array_state_manager; } +G1ConcurrentMarkThread* G1ConcurrentMark::cm_thread() const { + assert(is_fully_initialized(), "must be"); + return _cm_thread; +} + void G1ConcurrentMark::reset() { _has_aborted.store_relaxed(false); @@ -598,6 +572,7 @@ void G1ConcurrentMark::reset() { _region_mark_stats[i].clear(); } + _root_region_scan_aborted.store_relaxed(false); _root_regions.reset(); } @@ -617,7 +592,7 @@ void G1ConcurrentMark::humongous_object_eagerly_reclaimed(G1HeapRegion* r) { // Need to clear mark bit of the humongous object. Doing this unconditionally is fine. mark_bitmap()->clear(r->bottom()); - if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) { + if (!_g1h->collector_state()->is_in_mark_or_rebuild()) { return; } @@ -677,14 +652,14 @@ void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurr #if TASKQUEUE_STATS void G1ConcurrentMark::print_and_reset_taskqueue_stats() { - _task_queues->print_and_reset_taskqueue_stats("G1ConcurrentMark Oop Queue"); + _task_queues->print_and_reset_taskqueue_stats("Concurrent Mark"); auto get_pa_stats = [&](uint i) { return _tasks[i]->partial_array_task_stats(); }; PartialArrayTaskStats::log_set(_max_num_tasks, get_pa_stats, - "G1ConcurrentMark Partial Array Task Stats"); + "Concurrent Mark Partial Array"); for (uint i = 0; i < _max_num_tasks; ++i) { get_pa_stats(i)->reset(); @@ -715,7 +690,6 @@ public: private: // Heap region closure used for clearing the _mark_bitmap. class G1ClearBitmapHRClosure : public G1HeapRegionClosure { - private: G1ConcurrentMark* _cm; G1CMBitMap* _bitmap; bool _suspendible; // If suspendible, do yield checks. @@ -725,7 +699,7 @@ private: } bool is_clear_concurrent_undo() { - return suspendible() && _cm->cm_thread()->in_undo_mark(); + return suspendible() && _cm->cm_thread()->is_in_undo_cycle(); } bool has_aborted() { @@ -781,8 +755,7 @@ private: // as asserts here to minimize their overhead on the product. However, we // will have them as guarantees at the beginning / end of the bitmap // clearing to get some checking in the product. - assert(!suspendible() || _cm->in_progress(), "invariant"); - assert(!suspendible() || !G1CollectedHeap::heap()->collector_state()->mark_or_rebuild_in_progress(), "invariant"); + assert(!suspendible() || _cm->is_in_reset_for_next_cycle(), "invariant"); // Abort iteration if necessary. if (has_aborted()) { @@ -813,10 +786,6 @@ public: SuspendibleThreadSetJoiner sts_join(_suspendible); G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&_cl, &_hr_claimer, worker_id); } - - bool is_complete() { - return _cl.is_complete(); - } }; void G1ConcurrentMark::clear_bitmap(WorkerThreads* workers, bool may_yield) { @@ -831,29 +800,19 @@ void G1ConcurrentMark::clear_bitmap(WorkerThreads* workers, bool may_yield) { log_debug(gc, ergo)("Running %s with %u workers for %zu work units.", cl.name(), num_workers, num_chunks); workers->run_task(&cl, num_workers); - guarantee(may_yield || cl.is_complete(), "Must have completed iteration when not yielding."); } void G1ConcurrentMark::cleanup_for_next_mark() { // Make sure that the concurrent mark thread looks to still be in // the current cycle. - guarantee(is_fully_initialized(), "should be initializd"); - guarantee(in_progress(), "invariant"); - - // We are finishing up the current cycle by clearing the next - // marking bitmap and getting it ready for the next cycle. During - // this time no other cycle can start. So, let's make sure that this - // is the case. - guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant"); + guarantee(is_in_reset_for_next_cycle(), "invariant"); clear_bitmap(_concurrent_workers, true); reset_partial_array_state_manager(); - // Repeat the asserts from above. - guarantee(is_fully_initialized(), "should be initializd"); - guarantee(in_progress(), "invariant"); - guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant"); + // Should not have changed state yet (even if a Full GC interrupted us). + guarantee(is_in_reset_for_next_cycle(), "invariant"); } void G1ConcurrentMark::reset_partial_array_state_manager() { @@ -898,9 +857,26 @@ public: }; class G1PreConcurrentStartTask::NoteStartOfMarkTask : public G1AbstractSubTask { + + class NoteStartOfMarkHRClosure : public G1HeapRegionClosure { + G1ConcurrentMark* _cm; + + public: + NoteStartOfMarkHRClosure() : G1HeapRegionClosure(), _cm(G1CollectedHeap::heap()->concurrent_mark()) { } + + bool do_heap_region(G1HeapRegion* r) override { + if (r->is_old_or_humongous() && !r->is_collection_set_candidate() && !r->in_collection_set()) { + _cm->update_top_at_mark_start(r); + } else { + _cm->reset_top_at_mark_start(r); + } + return false; + } + } _region_cl; + G1HeapRegionClaimer _claimer; public: - NoteStartOfMarkTask() : G1AbstractSubTask(G1GCPhaseTimes::NoteStartOfMark), _claimer(0) { } + NoteStartOfMarkTask() : G1AbstractSubTask(G1GCPhaseTimes::NoteStartOfMark), _region_cl(), _claimer(0) { } double worker_cost() const override { // The work done per region is very small, therefore we choose this magic number to cap the number @@ -909,8 +885,13 @@ public: return _claimer.n_regions() / regions_per_thread; } - void set_max_workers(uint max_workers) override; - void do_work(uint worker_id) override; + void set_max_workers(uint max_workers) override { + _claimer.set_n_workers(max_workers); + } + + void do_work(uint worker_id) override { + G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&_region_cl, &_claimer, worker_id); + } }; void G1PreConcurrentStartTask::ResetMarkingStateTask::do_work(uint worker_id) { @@ -918,31 +899,6 @@ void G1PreConcurrentStartTask::ResetMarkingStateTask::do_work(uint worker_id) { _cm->reset(); } -class NoteStartOfMarkHRClosure : public G1HeapRegionClosure { - G1ConcurrentMark* _cm; - -public: - NoteStartOfMarkHRClosure() : G1HeapRegionClosure(), _cm(G1CollectedHeap::heap()->concurrent_mark()) { } - - bool do_heap_region(G1HeapRegion* r) override { - if (r->is_old_or_humongous() && !r->is_collection_set_candidate() && !r->in_collection_set()) { - _cm->update_top_at_mark_start(r); - } else { - _cm->reset_top_at_mark_start(r); - } - return false; - } -}; - -void G1PreConcurrentStartTask::NoteStartOfMarkTask::do_work(uint worker_id) { - NoteStartOfMarkHRClosure start_cl; - G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&start_cl, &_claimer, worker_id); -} - -void G1PreConcurrentStartTask::NoteStartOfMarkTask::set_max_workers(uint max_workers) { - _claimer.set_n_workers(max_workers); -} - G1PreConcurrentStartTask::G1PreConcurrentStartTask(GCCause::Cause cause, G1ConcurrentMark* cm) : G1BatchedTask("Pre Concurrent Start", G1CollectedHeap::heap()->phase_times()) { add_serial_task(new ResetMarkingStateTask(cm)); @@ -962,8 +918,7 @@ void G1ConcurrentMark::pre_concurrent_start(GCCause::Cause cause) { _gc_tracer_cm->set_gc_cause(cause); } - -void G1ConcurrentMark::post_concurrent_mark_start() { +void G1ConcurrentMark::start_full_concurrent_cycle() { // Start Concurrent Marking weak-reference discovery. ReferenceProcessor* rp = _g1h->ref_processor_cm(); rp->start_discovery(false /* always_clear */); @@ -974,16 +929,34 @@ void G1ConcurrentMark::post_concurrent_mark_start() { satb_mq_set.set_active_all_threads(true, /* new active value */ false /* expected_active */); - _root_regions.prepare_for_scan(); - // update_g1_committed() will be called at the end of an evac pause // when marking is on. So, it's also called at the end of the // concurrent start pause to update the heap end, if the heap expands // during it. No need to call it here. + + // Signal the thread to start work. + cm_thread()->start_full_cycle(); } -void G1ConcurrentMark::post_concurrent_undo_start() { - root_regions()->cancel_scan(); +void G1ConcurrentMark::start_undo_concurrent_cycle() { + assert_at_safepoint_on_vm_thread(); + // At this time this GC is not a concurrent start gc any more, can only check for young only gc/phase. + assert(_g1h->collector_state()->is_in_young_only_phase(), "must be"); + + abort_root_region_scan_at_safepoint(); + + // Signal the thread to start work. + cm_thread()->start_undo_cycle(); +} + +void G1ConcurrentMark::notify_concurrent_cycle_completed() { + cm_thread()->set_idle(); +} + +void G1ConcurrentMark::stop() { + if (is_fully_initialized()) { + cm_thread()->stop(); + } } /* @@ -1082,6 +1055,16 @@ uint G1ConcurrentMark::calc_active_marking_workers() { return result; } +bool G1ConcurrentMark::has_root_region_scan_aborted() const { + return _root_region_scan_aborted.load_relaxed(); +} + +#ifndef PRODUCT +void G1ConcurrentMark::assert_root_region_scan_completed_or_aborted() { + assert(root_regions()->work_completed() || has_root_region_scan_aborted(), "must be"); +} +#endif + void G1ConcurrentMark::scan_root_region(const MemRegion* region, uint worker_id) { #ifdef ASSERT HeapWord* last = region->last(); @@ -1108,45 +1091,87 @@ void G1ConcurrentMark::scan_root_region(const MemRegion* region, uint worker_id) class G1CMRootRegionScanTask : public WorkerTask { G1ConcurrentMark* _cm; + bool _should_yield; + public: - G1CMRootRegionScanTask(G1ConcurrentMark* cm) : - WorkerTask("G1 Root Region Scan"), _cm(cm) { } + G1CMRootRegionScanTask(G1ConcurrentMark* cm, bool should_yield) : + WorkerTask("G1 Root Region Scan"), _cm(cm), _should_yield(should_yield) { } void work(uint worker_id) { - G1CMRootMemRegions* root_regions = _cm->root_regions(); - const MemRegion* region = root_regions->claim_next(); - while (region != nullptr) { + SuspendibleThreadSetJoiner sts_join(_should_yield); + + while (true) { + if (_cm->has_root_region_scan_aborted()) { + return; + } + G1CMRootMemRegions* root_regions = _cm->root_regions(); + const MemRegion* region = root_regions->claim_next(); + if (region == nullptr) { + return; + } _cm->scan_root_region(region, worker_id); - region = root_regions->claim_next(); + if (_should_yield) { + SuspendibleThreadSet::yield(); + // If we yielded, a GC may have processed all root regions, + // so this loop will naturally exit on the next claim_next() call. + // Same if a Full GC signalled abort of the concurrent mark. + } } } }; -void G1ConcurrentMark::scan_root_regions() { - // scan_in_progress() will have been set to true only if there was - // at least one root region to scan. So, if it's false, we - // should not attempt to do any further work. - if (root_regions()->scan_in_progress()) { - assert(!has_aborted(), "Aborting before root region scanning is finished not supported."); - +bool G1ConcurrentMark::scan_root_regions(WorkerThreads* workers, bool concurrent) { + // We first check whether there is any work to do as we might have already aborted + // the concurrent cycle, or ran into a GC that did the actual work when we reach here. + // We want to avoid spinning up the worker threads if that happened. + // (Note that due to races reading the abort-flag, we might spin up the threads anyway). + // + // Abort happens if a Full GC occurs right after starting the concurrent cycle or + // a young gc doing the work. + // + // Concurrent gc threads enter an STS when starting the task, so they stop, then + // continue after that safepoint. + // + // Must not use G1CMRootMemRegions::work_completed() here because we need to get a + // consistent view of the value containing the number of remaining regions across the + // usages below. The safepoint/gc may already be running and modifying it + // while this code is still executing. + uint num_remaining = root_regions()->num_remaining_regions(); + bool do_scan = num_remaining > 0 && !has_root_region_scan_aborted(); + if (do_scan) { // Assign one worker to each root-region but subject to the max constraint. - const uint num_workers = MIN2(root_regions()->num_root_regions(), + // The constraint is also important to avoid accesses beyond the allocated per-worker + // marking helper data structures. We might get passed different WorkerThreads with + // different number of threads (potential worker ids) than helper data structures when + // completing this work during GC. + const uint num_workers = MIN2(num_remaining, _max_concurrent_workers); + assert(num_workers > 0, "no more remaining root regions to process"); - G1CMRootRegionScanTask task(this); + G1CMRootRegionScanTask task(this, concurrent); log_debug(gc, ergo)("Running %s using %u workers for %u work units.", - task.name(), num_workers, root_regions()->num_root_regions()); - _concurrent_workers->run_task(&task, num_workers); - - // It's possible that has_aborted() is true here without actually - // aborting the survivor scan earlier. This is OK as it's - // mainly used for sanity checking. - root_regions()->scan_finished(); + task.name(), num_workers, num_remaining); + workers->run_task(&task, num_workers); } + + // At the end of this method, we can re-read num_remaining() in the assert: either + // we got non-zero above and we processed all root regions (and it must be zero + // after the worker task synchronization) or it had already been zero. We also + // can't have started another concurrent cycle that could have set it to something else + // while still in the concurrent cycle (if called concurrently). + assert_root_region_scan_completed_or_aborted(); + + return do_scan; } -bool G1ConcurrentMark::wait_until_root_region_scan_finished() { - return root_regions()->wait_until_scan_finished(); +void G1ConcurrentMark::scan_root_regions_concurrently() { + assert(Thread::current() == cm_thread(), "must be on Concurrent Mark Thread"); + scan_root_regions(_concurrent_workers, true /* concurrent */); +} + +bool G1ConcurrentMark::complete_root_regions_scan_in_safepoint() { + assert_at_safepoint_on_vm_thread(); + return scan_root_regions(_g1h->workers(), false /* concurrent */); } void G1ConcurrentMark::add_root_region(G1HeapRegion* r) { @@ -1157,9 +1182,16 @@ bool G1ConcurrentMark::is_root_region(G1HeapRegion* r) { return root_regions()->contains(MemRegion(top_at_mark_start(r), r->top())); } -void G1ConcurrentMark::root_region_scan_abort_and_wait() { - root_regions()->abort(); - root_regions()->wait_until_scan_finished(); +void G1ConcurrentMark::abort_root_region_scan() { + assert_not_at_safepoint(); + + _root_region_scan_aborted.store_relaxed(true); +} + +void G1ConcurrentMark::abort_root_region_scan_at_safepoint() { + assert_at_safepoint_on_vm_thread(); + + _root_region_scan_aborted.store_relaxed(true); } void G1ConcurrentMark::concurrent_cycle_start() { @@ -1175,8 +1207,6 @@ uint G1ConcurrentMark::completed_mark_cycles() const { } void G1ConcurrentMark::concurrent_cycle_end(bool mark_cycle_completed) { - _g1h->collector_state()->set_clear_bitmap_in_progress(false); - _g1h->trace_heap_after_gc(_gc_tracer_cm); if (mark_cycle_completed) { @@ -1317,14 +1347,13 @@ void G1ConcurrentMark::remark() { _g1h->workers()->run_task(&cl, num_workers); log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u", - _g1h->num_committed_regions(), cl.total_selected_for_rebuild()); + _g1h->num_committed_regions(), cl.total_selected_for_rebuild()); _needs_remembered_set_rebuild = (cl.total_selected_for_rebuild() > 0); if (_needs_remembered_set_rebuild) { - // Prune rebuild candidates based on G1HeapWastePercent. - // Improves rebuild time in addition to remembered set memory usage. - G1CollectionSetChooser::build(_g1h->workers(), _g1h->num_committed_regions(), _g1h->policy()->candidates()); + GrowableArrayCHeap* selected = cl.sort_and_prune_old_selected(); + _g1h->policy()->candidates()->set_candidates_from_marking(selected); } } @@ -1361,6 +1390,9 @@ void G1ConcurrentMark::remark() { G1ObjectCountIsAliveClosure is_alive(_g1h); _gc_tracer_cm->report_object_count_after_gc(&is_alive, _g1h->workers()); } + + // Successfully completed marking, advance state. + cm_thread()->set_full_cycle_rebuild_and_scrub(); } else { // We overflowed. Restart concurrent marking. _restart_for_overflow.store_relaxed(true); @@ -1381,6 +1413,8 @@ void G1ConcurrentMark::remark() { _g1h->update_perf_counter_cpu_time(); policy->record_concurrent_mark_remark_end(); + + return; } void G1ConcurrentMark::compute_new_sizes() { @@ -1443,6 +1477,10 @@ void G1ConcurrentMark::cleanup() { GCTraceTime(Debug, gc, phases) debug("Finalize Concurrent Mark Cleanup", _gc_timer_cm); policy->record_concurrent_mark_cleanup_end(needs_remembered_set_rebuild()); } + + // Advance state. + cm_thread()->set_full_cycle_reset_for_next_cycle(); + return; } // 'Keep Alive' oop closure used by both serial parallel reference processing. @@ -1869,7 +1907,7 @@ public: void G1ConcurrentMark::verify_no_collection_set_oops() { assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(), "should be at a safepoint or initializing"); - if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) { + if (!is_fully_initialized() || !_g1h->collector_state()->is_in_mark_or_rebuild()) { return; } @@ -1930,12 +1968,15 @@ void G1ConcurrentMark::print_stats() { } bool G1ConcurrentMark::concurrent_cycle_abort() { + assert_at_safepoint_on_vm_thread(); + assert(_g1h->collector_state()->is_in_full_gc(), "must be"); + // If we start the compaction before the CM threads finish // scanning the root regions we might trip them over as we'll - // be moving objects / updating references. So let's wait until - // they are done. By telling them to abort, they should complete - // early. - root_region_scan_abort_and_wait(); + // be moving objects / updating references. Since the root region + // scan synchronized with the safepoint, just tell it to abort. + // It will notice when the threads start up again later. + abort_root_region_scan_at_safepoint(); // We haven't started a concurrent cycle no need to do anything; we might have // aborted the marking because of shutting down though. In this case the marking @@ -1947,7 +1988,7 @@ bool G1ConcurrentMark::concurrent_cycle_abort() { // has been signalled is already rare), and this work should be negligible compared // to actual full gc work. - if (!is_fully_initialized() || (!cm_thread()->in_progress() && !_g1h->concurrent_mark_is_terminating())) { + if (!is_fully_initialized() || (!cm_thread()->is_in_progress() && !cm_thread()->should_terminate())) { return false; } @@ -1965,7 +2006,7 @@ bool G1ConcurrentMark::concurrent_cycle_abort() { } void G1ConcurrentMark::abort_marking_threads() { - assert(!_root_regions.scan_in_progress(), "still doing root region scan"); + assert_root_region_scan_completed_or_aborted(); _has_aborted.store_relaxed(true); _first_overflow_barrier_sync.abort(); _second_overflow_barrier_sync.abort(); @@ -2132,8 +2173,7 @@ void G1CMTask::reset_for_restart() { void G1CMTask::register_partial_array_splitter() { ::new (&_partial_array_splitter) PartialArraySplitter(_cm->partial_array_state_manager(), - _cm->max_num_tasks(), - ObjArrayMarkingStride); + _cm->max_num_tasks()); } void G1CMTask::unregister_partial_array_splitter() { @@ -2311,20 +2351,16 @@ void G1CMTask::drain_local_queue(bool partially) { } } -size_t G1CMTask::start_partial_array_processing(oop obj) { - assert(should_be_sliced(obj), "Must be an array object %d and large %zu", obj->is_objArray(), obj->size()); - - objArrayOop obj_array = objArrayOop(obj); - size_t array_length = obj_array->length(); - - size_t initial_chunk_size = _partial_array_splitter.start(_task_queue, obj_array, nullptr, array_length); +size_t G1CMTask::start_partial_array_processing(objArrayOop obj) { + assert(obj->length() >= (int)ObjArrayMarkingStride, "Must be a large array object %d", obj->length()); // Mark objArray klass metadata - if (_cm_oop_closure->do_metadata()) { - _cm_oop_closure->do_klass(obj_array->klass()); - } + process_klass(obj->klass()); - process_array_chunk(obj_array, 0, initial_chunk_size); + size_t array_length = obj->length(); + size_t initial_chunk_size = _partial_array_splitter.start(_task_queue, obj, nullptr, array_length, ObjArrayMarkingStride); + + process_array_chunk(obj, 0, initial_chunk_size); // Include object header size return objArrayOopDesc::object_size(checked_cast(initial_chunk_size)); @@ -2880,7 +2916,7 @@ G1CMTask::G1CMTask(uint worker_id, _cm(cm), _mark_bitmap(nullptr), _task_queue(task_queue), - _partial_array_splitter(_cm->partial_array_state_manager(), _cm->max_num_tasks(), ObjArrayMarkingStride), + _partial_array_splitter(_cm->partial_array_state_manager(), _cm->max_num_tasks()), _mark_stats_cache(mark_stats, G1RegionMarkStatsCache::RegionMarkStatsCacheSize), _calls(0), _time_target_ms(0.0), diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp index 11da6dae5b3..f9287f673d2 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp @@ -288,57 +288,36 @@ private: class G1CMRootMemRegions { // The set of root MemRegions. MemRegion* _root_regions; - size_t const _max_regions; + uint const _max_regions; - Atomic _num_root_regions; // Actual number of root regions. + Atomic _num_regions; // Actual number of root regions. + Atomic _num_claimed_regions; // Number of root regions currently claimed. - Atomic _claimed_root_regions; // Number of root regions currently claimed. - - Atomic _scan_in_progress; - Atomic _should_abort; - - void notify_scan_done(); + uint num_regions() const { return _num_regions.load_relaxed(); } + uint num_claimed_regions() const { return _num_claimed_regions.load_relaxed(); } public: G1CMRootMemRegions(uint const max_regions); ~G1CMRootMemRegions(); - // Reset the data structure to allow addition of new root regions. - void reset(); - void add(HeapWord* start, HeapWord* end); - // Reset the claiming / scanning of the root regions. - void prepare_for_scan(); - - // Forces get_next() to return null so that the iteration aborts early. - void abort() { _should_abort.store_relaxed(true); } - - // Return true if the CM thread are actively scanning root regions, - // false otherwise. - bool scan_in_progress() { return _scan_in_progress.load_relaxed(); } + // Reset data structure to initial state. + void reset(); // Claim the next root MemRegion to scan atomically, or return null if // all have been claimed. const MemRegion* claim_next(); - // The number of root regions to scan. - uint num_root_regions() const; + // Number of root regions to still process. + uint num_remaining_regions() const; + + // Returns whether all root regions have been processed or the processing been aborted. + bool work_completed() const; // Is the given memregion contained in the root regions; the MemRegion must // match exactly. bool contains(const MemRegion mr) const; - - void cancel_scan(); - - // Flag that we're done with root region scanning and notify anyone - // who's waiting on it. If aborted is false, assume that all regions - // have been claimed. - void scan_finished(); - - // If CM threads are still scanning root regions, wait until they - // are done. Return true if we had to wait, false otherwise. - bool wait_until_scan_finished(); }; // This class manages data structures and methods for doing liveness analysis in @@ -352,6 +331,8 @@ class G1ConcurrentMark : public CHeapObj { friend class G1CMRemarkTask; friend class G1CMRootRegionScanTask; friend class G1CMTask; + friend class G1ClearBitMapTask; + friend class G1CollectorState; friend class G1ConcurrentMarkThread; G1ConcurrentMarkThread* _cm_thread; // The thread doing the work @@ -365,6 +346,7 @@ class G1ConcurrentMark : public CHeapObj { // Root region tracking and claiming G1CMRootMemRegions _root_regions; + Atomic _root_region_scan_aborted; // For grey objects G1CMMarkStack _global_mark_stack; // Grey objects behind global finger @@ -524,6 +506,15 @@ class G1ConcurrentMark : public CHeapObj { Atomic* _top_at_rebuild_starts; // True when Remark pause selected regions for rebuilding. bool _needs_remembered_set_rebuild; + + G1ConcurrentMarkThread* cm_thread() const; + + // Concurrent cycle state queries. + bool is_in_concurrent_cycle() const; + bool is_in_marking() const; + bool is_in_rebuild_or_scrub() const; + bool is_in_reset_for_next_cycle() const; + public: // To be called when an object is marked the first time, e.g. after a successful // mark_in_bitmap call. Updates various statistics data. @@ -557,7 +548,7 @@ public: void fully_initialize(); bool is_fully_initialized() const { return _cm_thread != nullptr; } - bool in_progress() const; + uint max_num_tasks() const {return _max_num_tasks; } // Clear statistics gathered during the concurrent cycle for the given region after @@ -589,7 +580,7 @@ public: // Notifies marking threads to abort. This is a best-effort notification. Does not // guarantee or update any state after the call. Root region scan must not be - // running. + // running or being aborted. void abort_marking_threads(); // Total cpu time spent in mark worker threads in seconds. @@ -602,8 +593,6 @@ public: G1RegionToSpaceMapper* bitmap_storage); ~G1ConcurrentMark(); - G1ConcurrentMarkThread* cm_thread() { return _cm_thread; } - G1CMBitMap* mark_bitmap() const { return (G1CMBitMap*)&_mark_bitmap; } // Calculates the number of concurrent GC threads to be used in the marking phase. @@ -632,20 +621,40 @@ public: // These two methods do the work that needs to be done at the start and end of the // concurrent start pause. void pre_concurrent_start(GCCause::Cause cause); - void post_concurrent_mark_start(); - void post_concurrent_undo_start(); - // Scan all the root regions and mark everything reachable from - // them. - void scan_root_regions(); - bool wait_until_root_region_scan_finished(); + // Start the particular type of concurrent cycle. After this call threads may be running. + void start_full_concurrent_cycle(); + void start_undo_concurrent_cycle(); + + void notify_concurrent_cycle_completed(); + + // Stop active components/the concurrent mark thread. + void stop(); + void add_root_region(G1HeapRegion* r); bool is_root_region(G1HeapRegion* r); - void root_region_scan_abort_and_wait(); + + // Scan all the root regions concurrently and mark everything reachable from + // them. + void scan_root_regions_concurrently(); + // Complete root region scan work in the safepoint, return if we did some work. + bool complete_root_regions_scan_in_safepoint(); + + // Abort an active concurrent root region scan outside safepoint. + void abort_root_region_scan(); + + bool has_root_region_scan_aborted() const; private: + // Abort an active concurrent root region scan during safepoint. + void abort_root_region_scan_at_safepoint(); + + void assert_root_region_scan_completed_or_aborted() PRODUCT_RETURN; G1CMRootMemRegions* root_regions() { return &_root_regions; } + // Perform root region scan until all root regions have been processed, or + // the process has been aborted. Returns true if we did some work. + bool scan_root_regions(WorkerThreads* workers, bool concurrent); // Scan a single root MemRegion to mark everything reachable from it. void scan_root_region(const MemRegion* region, uint worker_id); @@ -657,8 +666,10 @@ public: // Do concurrent preclean work. void preclean(); + // Executes the Remark pause. void remark(); + // Executes the Cleanup pause. void cleanup(); // Mark in the marking bitmap. Used during evacuation failure to @@ -833,12 +844,10 @@ private: // mark bitmap scan, and so needs to be pushed onto the mark stack. bool is_below_finger(oop obj, HeapWord* global_finger) const; - template void process_grey_task_entry(G1TaskQueueEntry task_entry, bool stolen); - static bool should_be_sliced(oop obj); // Start processing the given objArrayOop by first pushing its continuations and // then scanning the first chunk including the header. - size_t start_partial_array_processing(oop obj); + size_t start_partial_array_processing(objArrayOop obj); // Process the given continuation. Returns the number of words scanned. size_t process_partial_array(const G1TaskQueueEntry& task, bool stolen); // Apply the closure to the given range of elements in the objArray. @@ -907,6 +916,9 @@ public: template inline bool deal_with_reference(T* p); + // Scan the klass and visit its children. + inline void process_klass(Klass* klass); + // Scans an object and visits its children. inline void process_entry(G1TaskQueueEntry task_entry, bool stolen); diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp index 21167d5cae9..094f4dca994 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp @@ -106,9 +106,27 @@ inline void G1CMMarkStack::iterate(Fn fn) const { } #endif +inline void G1CMTask::process_klass(Klass* klass) { + _cm_oop_closure->do_klass(klass); +} + // It scans an object and visits its children. inline void G1CMTask::process_entry(G1TaskQueueEntry task_entry, bool stolen) { - process_grey_task_entry(task_entry, stolen); + assert(task_entry.is_partial_array_state() || _mark_bitmap->is_marked(cast_from_oop(task_entry.to_oop())), + "Any stolen object should be a slice or marked"); + + if (task_entry.is_partial_array_state()) { + _words_scanned += process_partial_array(task_entry, stolen); + } else { + oop obj = task_entry.to_oop(); + if (should_be_sliced(obj)) { + _words_scanned += start_partial_array_processing(objArrayOop(obj)); + } else { + _words_scanned += obj->oop_iterate_size(_cm_oop_closure); + } + } + + check_limits(); } inline void G1CMTask::push(G1TaskQueueEntry task_entry) { @@ -160,27 +178,6 @@ inline bool G1CMTask::is_below_finger(oop obj, HeapWord* global_finger) const { return objAddr < global_finger; } -template -inline void G1CMTask::process_grey_task_entry(G1TaskQueueEntry task_entry, bool stolen) { - assert(scan || (!task_entry.is_partial_array_state() && task_entry.to_oop()->is_typeArray()), "Skipping scan of grey non-typeArray"); - assert(task_entry.is_partial_array_state() || _mark_bitmap->is_marked(cast_from_oop(task_entry.to_oop())), - "Any stolen object should be a slice or marked"); - - if (scan) { - if (task_entry.is_partial_array_state()) { - _words_scanned += process_partial_array(task_entry, stolen); - } else { - oop obj = task_entry.to_oop(); - if (should_be_sliced(obj)) { - _words_scanned += start_partial_array_processing(obj); - } else { - _words_scanned += obj->oop_iterate_size(_cm_oop_closure); - } - } - } - check_limits(); -} - inline bool G1CMTask::should_be_sliced(oop obj) { return obj->is_objArray() && ((objArrayOop)obj)->length() >= (int)ObjArrayMarkingStride; } @@ -272,7 +269,6 @@ inline bool G1CMTask::make_reference_grey(oop obj) { // be pushed on the stack. So, some duplicate work, but no // correctness problems. if (is_below_finger(obj, global_finger)) { - G1TaskQueueEntry entry(obj); if (obj->is_typeArray()) { // Immediately process arrays of primitive types, rather // than pushing on the mark stack. This keeps us from @@ -284,8 +280,8 @@ inline bool G1CMTask::make_reference_grey(oop obj) { // by only doing a bookkeeping update and avoiding the // actual scan of the object - a typeArray contains no // references, and the metadata is built-in. - process_grey_task_entry(entry, false /* stolen */); } else { + G1TaskQueueEntry entry(obj); push(entry); } } diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMarkRemarkTasks.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMarkRemarkTasks.cpp index 4eb11f6d8f6..3eda7200e25 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentMarkRemarkTasks.cpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkRemarkTasks.cpp @@ -31,28 +31,32 @@ #include "gc/g1/g1RemSetTrackingPolicy.hpp" #include "logging/log.hpp" #include "runtime/mutexLocker.hpp" +#include "utilities/growableArray.hpp" struct G1UpdateRegionLivenessAndSelectForRebuildTask::G1OnRegionClosure : public G1HeapRegionClosure { G1CollectedHeap* _g1h; G1ConcurrentMark* _cm; - // The number of regions actually selected for rebuild. - uint _num_selected_for_rebuild; size_t _freed_bytes; uint _num_old_regions_removed; uint _num_humongous_regions_removed; - G1FreeRegionList* _local_cleanup_list; + + GrowableArrayCHeap _old_selected_for_rebuild; + uint _num_humongous_selected_for_rebuild; + + G1FreeRegionList* _cleanup_list; G1OnRegionClosure(G1CollectedHeap* g1h, G1ConcurrentMark* cm, G1FreeRegionList* local_cleanup_list) : _g1h(g1h), _cm(cm), - _num_selected_for_rebuild(0), _freed_bytes(0), _num_old_regions_removed(0), _num_humongous_regions_removed(0), - _local_cleanup_list(local_cleanup_list) {} + _old_selected_for_rebuild(16), + _num_humongous_selected_for_rebuild(0), + _cleanup_list(local_cleanup_list) {} void reclaim_empty_region_common(G1HeapRegion* hr) { assert(!hr->has_pinned_objects(), "precondition"); @@ -74,7 +78,7 @@ struct G1UpdateRegionLivenessAndSelectForRebuildTask::G1OnRegionClosure : public _num_humongous_regions_removed++; reclaim_empty_region_common(hr); - _g1h->free_humongous_region(hr, _local_cleanup_list); + _g1h->free_humongous_region(hr, _cleanup_list); }; _g1h->humongous_obj_regions_iterate(hr, on_humongous_region); @@ -85,7 +89,7 @@ struct G1UpdateRegionLivenessAndSelectForRebuildTask::G1OnRegionClosure : public _num_old_regions_removed++; reclaim_empty_region_common(hr); - _g1h->free_region(hr, _local_cleanup_list); + _g1h->free_region(hr, _cleanup_list); } bool do_heap_region(G1HeapRegion* hr) override { @@ -98,13 +102,13 @@ struct G1UpdateRegionLivenessAndSelectForRebuildTask::G1OnRegionClosure : public || hr->has_pinned_objects(); if (is_live) { const bool selected_for_rebuild = tracker->update_humongous_before_rebuild(hr); + auto on_humongous_region = [&] (G1HeapRegion* hr) { if (selected_for_rebuild) { - _num_selected_for_rebuild++; + _num_humongous_selected_for_rebuild++; } _cm->update_top_at_rebuild_start(hr); }; - _g1h->humongous_obj_regions_iterate(hr, on_humongous_region); } else { reclaim_empty_humongous_region(hr); @@ -118,7 +122,7 @@ struct G1UpdateRegionLivenessAndSelectForRebuildTask::G1OnRegionClosure : public if (is_live) { const bool selected_for_rebuild = tracker->update_old_before_rebuild(hr); if (selected_for_rebuild) { - _num_selected_for_rebuild++; + _old_selected_for_rebuild.push(hr); } _cm->update_top_at_rebuild_start(hr); } else { @@ -137,7 +141,8 @@ G1UpdateRegionLivenessAndSelectForRebuildTask::G1UpdateRegionLivenessAndSelectFo _g1h(g1h), _cm(cm), _hrclaimer(num_workers), - _total_selected_for_rebuild(0), + _old_selected_for_rebuild(128), + _num_humongous_selected_for_rebuild(0), _cleanup_list("Empty Regions After Mark List") {} G1UpdateRegionLivenessAndSelectForRebuildTask::~G1UpdateRegionLivenessAndSelectForRebuildTask() { @@ -153,8 +158,6 @@ void G1UpdateRegionLivenessAndSelectForRebuildTask::work(uint worker_id) { G1OnRegionClosure on_region_cl(_g1h, _cm, &local_cleanup_list); _g1h->heap_region_par_iterate_from_worker_offset(&on_region_cl, &_hrclaimer, worker_id); - _total_selected_for_rebuild.add_then_fetch(on_region_cl._num_selected_for_rebuild); - // Update the old/humongous region sets _g1h->remove_from_old_gen_sets(on_region_cl._num_old_regions_removed, on_region_cl._num_humongous_regions_removed); @@ -163,6 +166,9 @@ void G1UpdateRegionLivenessAndSelectForRebuildTask::work(uint worker_id) { MutexLocker x(G1RareEvent_lock, Mutex::_no_safepoint_check_flag); _g1h->decrement_summary_bytes(on_region_cl._freed_bytes); + _old_selected_for_rebuild.appendAll(&on_region_cl._old_selected_for_rebuild); + _num_humongous_selected_for_rebuild += on_region_cl._num_humongous_selected_for_rebuild; + _cleanup_list.add_ordered(&local_cleanup_list); assert(local_cleanup_list.is_empty(), "post-condition"); } @@ -172,3 +178,78 @@ uint G1UpdateRegionLivenessAndSelectForRebuildTask::desired_num_workers(uint num const uint num_regions_per_worker = 384; return (num_regions + num_regions_per_worker - 1) / num_regions_per_worker; } + +// Early prune (remove) regions meeting the G1HeapWastePercent criteria. That +// is, either until only the minimum amount of old collection set regions are +// available (for forward progress in evacuation) or the waste accumulated by the +// removed regions is above the maximum allowed waste. +// Updates number of candidates and reclaimable bytes given. +void G1UpdateRegionLivenessAndSelectForRebuildTask::prune(GrowableArrayCHeap* old_regions) { + G1Policy* p = G1CollectedHeap::heap()->policy(); + + uint num_candidates = (uint)old_regions->length(); + + uint min_old_cset_length = p->calc_min_old_cset_length(num_candidates); + uint num_pruned = 0; + size_t wasted_bytes = 0; + + if (min_old_cset_length >= num_candidates) { + // We take all of the candidate regions to provide some forward progress. + return; + } + + size_t allowed_waste = p->allowed_waste_in_collection_set(); + uint max_to_prune = num_candidates - min_old_cset_length; + + while (true) { + G1HeapRegion* r = old_regions->at(num_candidates - num_pruned - 1); + size_t const reclaimable = r->reclaimable_bytes(); + if (num_pruned >= max_to_prune || + wasted_bytes + reclaimable > allowed_waste) { + break; + } + r->rem_set()->clear(true /* cardset_only */); + + wasted_bytes += reclaimable; + num_pruned++; + } + + log_debug(gc, ergo, cset)("Pruned %u regions out of %u, leaving %zu bytes waste (allowed %zu)", + num_pruned, + num_candidates, + wasted_bytes, + allowed_waste); + + old_regions->trunc_to(num_candidates - num_pruned); +} + +static int compare_region_gc_efficiency(G1HeapRegion** rr1, G1HeapRegion** rr2) { + G1HeapRegion* r1 = *rr1; + G1HeapRegion* r2 = *rr2; + + assert(r1 != nullptr, "must be"); + assert(r2 != nullptr, "must be"); + + G1Policy* p = G1CollectedHeap::heap()->policy(); + double gc_efficiency1 = p->predict_gc_efficiency(r1); + double gc_efficiency2 = p->predict_gc_efficiency(r2); + + if (gc_efficiency1 > gc_efficiency2) { + return -1; + } else if (gc_efficiency1 < gc_efficiency2) { + return 1; + } else { + return 0; + } +} + +GrowableArrayCHeap* G1UpdateRegionLivenessAndSelectForRebuildTask::sort_and_prune_old_selected() { + // Nothing to do for the humongous candidates here. Old selected need to be pruned. + + if (_old_selected_for_rebuild.length() != 0) { + _old_selected_for_rebuild.sort(compare_region_gc_efficiency); + prune(&_old_selected_for_rebuild); + } + + return &_old_selected_for_rebuild; +} diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMarkRemarkTasks.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMarkRemarkTasks.hpp index a256693ff1d..6905419e2cc 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentMarkRemarkTasks.hpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkRemarkTasks.hpp @@ -29,7 +29,7 @@ #include "gc/g1/g1HeapRegionManager.hpp" #include "gc/g1/g1HeapRegionSet.hpp" #include "gc/shared/workerThread.hpp" -#include "runtime/atomic.hpp" +#include "utilities/growableArray.hpp" class G1CollectedHeap; class G1ConcurrentMark; @@ -42,13 +42,15 @@ class G1UpdateRegionLivenessAndSelectForRebuildTask : public WorkerTask { G1ConcurrentMark* _cm; G1HeapRegionClaimer _hrclaimer; - Atomic _total_selected_for_rebuild; + GrowableArrayCHeap _old_selected_for_rebuild; + uint _num_humongous_selected_for_rebuild; // Reclaimed empty regions G1FreeRegionList _cleanup_list; struct G1OnRegionClosure; + void prune(GrowableArrayCHeap* old_regions); public: G1UpdateRegionLivenessAndSelectForRebuildTask(G1CollectedHeap* g1h, G1ConcurrentMark* cm, @@ -59,9 +61,14 @@ public: void work(uint worker_id) override; uint total_selected_for_rebuild() const { - return _total_selected_for_rebuild.load_relaxed(); + return (uint)_old_selected_for_rebuild.length() + _num_humongous_selected_for_rebuild; } + // Sort selected old regions by efficiency and prune them based on G1HeapWastePercent. + // This pruning improves rebuild time in addition to remembered set memory usage. + // Returns the set of regions selected in efficiency order. + GrowableArrayCHeap* sort_and_prune_old_selected(); + static uint desired_num_workers(uint num_regions); }; diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp index 629cbae935e..b8c97acd1b0 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -112,33 +112,30 @@ class G1ConcPhaseTimer : public GCTraceConcTimeImpl title("Concurrent %s Cycle", _state == FullMark ? "Mark" : "Undo"); + FormatBuffer<128> title("Concurrent %s Cycle", is_in_full_concurrent_cycle() ? "Mark" : "Undo"); GCTraceConcTime(Info, gc) tt(title); concurrent_cycle_start(); - if (_state == FullMark) { + if (_state == FullCycleMarking) { concurrent_mark_cycle_do(); } else { - assert(_state == UndoMark, "Must do undo mark but is %d", _state); + assert(_state == UndoCycleResetForNextCycle, "Must do undo mark but is %d", _state); concurrent_undo_cycle_do(); } - concurrent_cycle_end(_state == FullMark && !_cm->has_aborted()); + concurrent_cycle_end(is_in_full_concurrent_cycle() && !_cm->has_aborted()); update_perf_counter_cpu_time(); } - _cm->root_regions()->cancel_scan(); } void G1ConcurrentMarkThread::stop_service() { - if (in_progress()) { - // We are not allowed to abort the marking threads during root region scan. - // Needs to be done separately. - _cm->root_region_scan_abort_and_wait(); + if (is_in_progress()) { + _cm->abort_root_region_scan(); _cm->abort_marking_threads(); } @@ -149,7 +146,7 @@ void G1ConcurrentMarkThread::stop_service() { bool G1ConcurrentMarkThread::wait_for_next_cycle() { MonitorLocker ml(G1CGC_lock, Mutex::_no_safepoint_check_flag); - while (!in_progress() && !should_terminate()) { + while (!is_in_progress() && !should_terminate()) { ml.wait(); } @@ -164,7 +161,7 @@ bool G1ConcurrentMarkThread::phase_clear_cld_claimed_marks() { bool G1ConcurrentMarkThread::phase_scan_root_regions() { G1ConcPhaseTimer p(_cm, "Concurrent Scan Root Regions"); - _cm->scan_root_regions(); + _cm->scan_root_regions_concurrently(); update_perf_counter_cpu_time(); return _cm->has_aborted(); } diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.hpp index 22be7d9ffbb..e75298fdcb4 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.hpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,25 +32,34 @@ class G1Policy; // The concurrent mark thread triggers the various steps of the concurrent marking // cycle, including various marking cleanup. +// +// The concurrent cycle may either be "Full" (i.e. include marking, rebuilding and +// scrubbing, resetting for the next cycle) or "Undo", i.e. shortened to just the +// reset part. class G1ConcurrentMarkThread: public ConcurrentGCThread { G1ConcurrentMark* _cm; enum ServiceState : uint { Idle, - FullMark, - UndoMark + FullCycleMarking, + FullCycleRebuildOrScrub, + FullCycleResetForNextCycle, + UndoCycleResetForNextCycle }; volatile ServiceState _state; + // Returns whether we are in a "Full" cycle. + bool is_in_full_concurrent_cycle() const; + // Wait for next cycle. Returns the command passed over. bool wait_for_next_cycle(); bool mark_loop_needs_restart() const; - // Phases and subphases for the full concurrent marking cycle in order. + // Phases and subphases for the full concurrent cycle in order. // - // All these methods return true if the marking should be aborted. + // All these methods return true if the cycle should be aborted. bool phase_clear_cld_claimed_marks(); bool phase_scan_root_regions(); @@ -88,22 +97,25 @@ class G1ConcurrentMarkThread: public ConcurrentGCThread { double total_mark_cpu_time_s(); // Cpu time used by all marking worker threads in seconds. double worker_threads_cpu_time_s(); - - G1ConcurrentMark* cm() { return _cm; } - + // State management. void set_idle(); - void start_full_mark(); - void start_undo_mark(); + void start_full_cycle(); + void start_undo_cycle(); - bool idle() const; + void set_full_cycle_rebuild_and_scrub(); + void set_full_cycle_reset_for_next_cycle(); + + bool is_idle() const; // Returns true from the moment a concurrent cycle is - // initiated (during the concurrent start pause when started() is set) - // to the moment when the cycle completes (just after the next - // marking bitmap has been cleared and in_progress() is - // cleared). - bool in_progress() const; + // initiated (during the concurrent start pause when calling one of the + // start_*_cycle() methods) to the moment when the cycle completes. + bool is_in_progress() const; - bool in_undo_mark() const; + bool is_in_marking() const; + bool is_in_rebuild_or_scrub() const; + bool is_in_reset_for_next_cycle() const; + + bool is_in_undo_cycle() const; // Update the perf data counter for concurrent mark. void update_perf_counter_cpu_time(); diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.inline.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.inline.hpp index 254eaf62bb2..8cb7881e000 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.inline.hpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,7 +32,7 @@ // Total virtual time so far. inline double G1ConcurrentMarkThread::total_mark_cpu_time_s() { - return os::thread_cpu_time(this) + worker_threads_cpu_time_s(); + return static_cast(os::thread_cpu_time(this)) + worker_threads_cpu_time_s(); } // Marking virtual time so far @@ -40,29 +40,64 @@ inline double G1ConcurrentMarkThread::worker_threads_cpu_time_s() { return _cm->worker_threads_cpu_time_s(); } +inline bool G1ConcurrentMarkThread::is_in_full_concurrent_cycle() const { + ServiceState state = _state; + return (state == FullCycleMarking || state == FullCycleRebuildOrScrub || state == FullCycleResetForNextCycle); +} + inline void G1ConcurrentMarkThread::set_idle() { - assert(_state == FullMark || _state == UndoMark, "must not be starting a new cycle"); + // Concurrent cycle may be aborted any time. + assert(!is_idle(), "must not be idle"); _state = Idle; } -inline void G1ConcurrentMarkThread::start_full_mark() { - assert(_state == Idle, "cycle in progress"); - _state = FullMark; +inline void G1ConcurrentMarkThread::start_full_cycle() { + assert(SafepointSynchronize::is_at_safepoint(), "must be"); + assert(is_idle(), "cycle in progress"); + _state = FullCycleMarking; } -inline void G1ConcurrentMarkThread::start_undo_mark() { - assert(_state == Idle, "cycle in progress"); - _state = UndoMark; +inline void G1ConcurrentMarkThread::start_undo_cycle() { + assert(SafepointSynchronize::is_at_safepoint(), "must be"); + assert(is_idle(), "cycle in progress"); + _state = UndoCycleResetForNextCycle; } -inline bool G1ConcurrentMarkThread::idle() const { return _state == Idle; } - -inline bool G1ConcurrentMarkThread::in_progress() const { - return !idle(); +inline void G1ConcurrentMarkThread::set_full_cycle_rebuild_and_scrub() { + assert(SafepointSynchronize::is_at_safepoint(), "must be"); + assert(_state == FullCycleMarking, "must be"); + _state = FullCycleRebuildOrScrub; } -inline bool G1ConcurrentMarkThread::in_undo_mark() const { - return _state == UndoMark; +inline void G1ConcurrentMarkThread::set_full_cycle_reset_for_next_cycle() { + assert(SafepointSynchronize::is_at_safepoint(), "must be"); + assert(_state == FullCycleRebuildOrScrub, "must be"); + _state = FullCycleResetForNextCycle; +} + +inline bool G1ConcurrentMarkThread::is_in_marking() const { + return _state == FullCycleMarking; +} + +inline bool G1ConcurrentMarkThread::is_in_rebuild_or_scrub() const { + return _state == FullCycleRebuildOrScrub; +} + +inline bool G1ConcurrentMarkThread::is_in_reset_for_next_cycle() const { + ServiceState state = _state; + return state == FullCycleResetForNextCycle || state == UndoCycleResetForNextCycle; +} + +inline bool G1ConcurrentMarkThread::is_idle() const { + return _state == Idle; +} + +inline bool G1ConcurrentMarkThread::is_in_progress() const { + return !is_idle(); +} + +inline bool G1ConcurrentMarkThread::is_in_undo_cycle() const { + return _state == UndoCycleResetForNextCycle; } #endif // SHARE_GC_G1_G1CONCURRENTMARKTHREAD_INLINE_HPP diff --git a/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp b/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp index 8546e6e2d64..e12a8c284de 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -326,11 +326,14 @@ bool G1ConcurrentRefineSweepState::complete_work(bool concurrent, bool print_log if (print_log) { G1ConcurrentRefineStats* s = &_stats; - log_debug(gc, refine)("Refinement took %.2fms (pre-sweep %.2fms card refine %.2f) " + State state_bounded_by_sweeprt = (_state == State::SweepRT || _state == State::CompleteRefineWork) + ? State::SweepRT : _state; + + log_debug(gc, refine)("Refinement took %.2fms (pre-sweep %.2fms card refine %.2fms) " "(scanned %zu clean %zu (%.2f%%) not_clean %zu (%.2f%%) not_parsable %zu " "refers_to_cset %zu (%.2f%%) still_refers_to_cset %zu (%.2f%%) no_cross_region %zu pending %zu)", get_duration(State::Idle, _state).seconds() * 1000.0, - get_duration(State::Idle, State::SweepRT).seconds() * 1000.0, + get_duration(State::Idle, state_bounded_by_sweeprt).seconds() * 1000.0, TimeHelper::counter_to_millis(s->refine_duration()), s->cards_scanned(), s->cards_clean(), diff --git a/src/hotspot/share/gc/g1/g1ConcurrentStartToMixedTimeTracker.hpp b/src/hotspot/share/gc/g1/g1ConcurrentStartToMixedTimeTracker.hpp index 57372e695c8..f8bad4bdcd7 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentStartToMixedTimeTracker.hpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentStartToMixedTimeTracker.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,7 +30,7 @@ // Used to track time from the end of concurrent start to the first mixed GC. // After calling the concurrent start/mixed gc notifications, the result can be -// obtained in last_marking_time() once, after which the tracking resets. +// obtained in get_and_reset_last_marking_time() once, after which the tracking resets. // Any pauses recorded by add_pause() will be subtracted from that results. class G1ConcurrentStartToMixedTimeTracker { private: @@ -60,7 +60,7 @@ public: } } - double last_marking_time() { + double get_and_reset_last_marking_time() { assert(has_result(), "Do not have all measurements yet."); double result = (_mixed_start_time - _concurrent_start_end_time) - _total_pause_time; reset(); @@ -80,6 +80,8 @@ public: } } + bool is_active() const { return _active; } + // Returns whether we have a result that can be retrieved. bool has_result() const { return _mixed_start_time > 0.0 && _concurrent_start_end_time > 0.0; } }; diff --git a/src/hotspot/share/gc/g1/g1FullCollector.cpp b/src/hotspot/share/gc/g1/g1FullCollector.cpp index e8498250f85..c835dd159a6 100644 --- a/src/hotspot/share/gc/g1/g1FullCollector.cpp +++ b/src/hotspot/share/gc/g1/g1FullCollector.cpp @@ -353,7 +353,13 @@ void G1FullCollector::phase1_mark_live_objects() { scope()->tracer()->report_object_count_after_gc(&_is_alive, _heap->workers()); } #if TASKQUEUE_STATS - marking_task_queues()->print_and_reset_taskqueue_stats("Marking Task Queue"); + marking_task_queues()->print_and_reset_taskqueue_stats("Full GC"); + + auto get_stats = [&](uint i) { + return marker(i)->partial_array_splitter().stats(); + }; + PartialArrayTaskStats::log_set(_num_workers, get_stats, + "Full GC Partial Array"); #endif } diff --git a/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp b/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp index 5dbf70f36b3..93d8da0d842 100644 --- a/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp +++ b/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp @@ -22,7 +22,7 @@ * */ -#include "gc/g1/g1CollectedHeap.hpp" +#include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp" #include "gc/g1/g1FullCollector.inline.hpp" #include "gc/g1/g1FullGCCompactionPoint.hpp" diff --git a/src/hotspot/share/gc/g1/g1FullGCMarker.cpp b/src/hotspot/share/gc/g1/g1FullGCMarker.cpp index 2b0b78ac1ce..3be4ab8d839 100644 --- a/src/hotspot/share/gc/g1/g1FullGCMarker.cpp +++ b/src/hotspot/share/gc/g1/g1FullGCMarker.cpp @@ -39,7 +39,7 @@ G1FullGCMarker::G1FullGCMarker(G1FullCollector* collector, _worker_id(worker_id), _bitmap(collector->mark_bitmap()), _task_queue(), - _partial_array_splitter(collector->partial_array_state_manager(), collector->workers(), ObjArrayMarkingStride), + _partial_array_splitter(collector->partial_array_state_manager(), collector->workers()), _mark_closure(worker_id, this, ClassLoaderData::_claim_stw_fullgc_mark, G1CollectedHeap::heap()->ref_processor_stw()), _stack_closure(this), _cld_closure(mark_closure(), ClassLoaderData::_claim_stw_fullgc_mark), @@ -60,14 +60,26 @@ void G1FullGCMarker::process_partial_array(PartialArrayState* state, bool stolen process_array_chunk(obj_array, claim._start, claim._end); } +static uintx calc_array_stride(uint array_len, uint num_threads) { + precond(num_threads > 0); + + const size_t stride = (array_len + num_threads - 1) / num_threads; + return clamp(stride, ArrayMarkingMinStride, ObjArrayMarkingStride); +} + void G1FullGCMarker::start_partial_array_processing(objArrayOop obj) { mark_closure()->do_klass(obj->klass()); // Don't push empty arrays to avoid unnecessary work. - size_t array_length = obj->length(); - if (array_length > 0) { - size_t initial_chunk_size = _partial_array_splitter.start(task_queue(), obj, nullptr, array_length); - process_array_chunk(obj, 0, initial_chunk_size); + const int array_length = obj->length(); + + if (array_length == 0) { + return; } + + const uintx stride = calc_array_stride(array_length, _collector->workers()); + const size_t initial_chunk_size = _partial_array_splitter.start(task_queue(), obj, nullptr, array_length, stride); + + process_array_chunk(obj, 0, initial_chunk_size); } void G1FullGCMarker::complete_marking(G1ScannerTasksQueueSet* task_queues, diff --git a/src/hotspot/share/gc/g1/g1FullGCMarker.hpp b/src/hotspot/share/gc/g1/g1FullGCMarker.hpp index 5973cc841c5..82fe3655319 100644 --- a/src/hotspot/share/gc/g1/g1FullGCMarker.hpp +++ b/src/hotspot/share/gc/g1/g1FullGCMarker.hpp @@ -89,6 +89,7 @@ public: ~G1FullGCMarker(); G1MarkTasksQueue* task_queue() { return &_task_queue; } + PartialArraySplitter& partial_array_splitter() { return _partial_array_splitter; } // Marking entry points template inline void mark_and_push(T* p); diff --git a/src/hotspot/share/gc/g1/g1GCPauseType.hpp b/src/hotspot/share/gc/g1/g1GCPauseType.hpp deleted file mode 100644 index 254edb28fea..00000000000 --- a/src/hotspot/share/gc/g1/g1GCPauseType.hpp +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_GC_G1_G1GCPAUSETYPES_HPP -#define SHARE_GC_G1_G1GCPAUSETYPES_HPP - -#include "utilities/debug.hpp" -#include "utilities/enumIterator.hpp" - -enum class G1GCPauseType : uint { - YoungGC, - LastYoungGC, - ConcurrentStartMarkGC, - ConcurrentStartUndoGC, - Cleanup, - Remark, - MixedGC, - FullGC -}; - -ENUMERATOR_RANGE(G1GCPauseType, G1GCPauseType::YoungGC, G1GCPauseType::FullGC) - -class G1GCPauseTypeHelper { -public: - - static void assert_is_young_pause(G1GCPauseType type) { - assert(type != G1GCPauseType::FullGC, "must be"); - assert(type != G1GCPauseType::Remark, "must be"); - assert(type != G1GCPauseType::Cleanup, "must be"); - } - - static bool is_young_only_pause(G1GCPauseType type) { - assert_is_young_pause(type); - return type == G1GCPauseType::ConcurrentStartUndoGC || - type == G1GCPauseType::ConcurrentStartMarkGC || - type == G1GCPauseType::LastYoungGC || - type == G1GCPauseType::YoungGC; - } - - static bool is_mixed_pause(G1GCPauseType type) { - assert_is_young_pause(type); - return type == G1GCPauseType::MixedGC; - } - - static bool is_last_young_pause(G1GCPauseType type) { - assert_is_young_pause(type); - return type == G1GCPauseType::LastYoungGC; - } - - static bool is_concurrent_start_pause(G1GCPauseType type) { - assert_is_young_pause(type); - return type == G1GCPauseType::ConcurrentStartMarkGC || type == G1GCPauseType::ConcurrentStartUndoGC; - } - - static const char* to_string(G1GCPauseType type) { - static const char* pause_strings[] = { "Normal", - "Prepare Mixed", - "Concurrent Start", // Do not distinguish between the different - "Concurrent Start", // Concurrent Start pauses. - "Cleanup", - "Remark", - "Mixed", - "Full" }; - return pause_strings[static_cast(type)]; - } -}; - -#endif // SHARE_GC_G1_G1GCPAUSETYPES_HPP diff --git a/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp b/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp index a5013ddbb40..023790a2422 100644 --- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp +++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp @@ -180,7 +180,7 @@ void G1GCPhaseTimes::reset() { _cur_post_evacuate_cleanup_2_time_ms = 0.0; _cur_resize_heap_time_ms = 0.0; _cur_ref_proc_time_ms = 0.0; - _root_region_scan_wait_time_ms = 0.0; + _root_region_scan_time_ms = 0.0; _external_accounted_time_ms = 0.0; _recorded_prepare_heap_roots_time_ms = 0.0; _recorded_young_cset_choice_time_ms = 0.0; @@ -549,8 +549,8 @@ void G1GCPhaseTimes::print_other(double accounted_ms) const { // In addition, these are not included in G1GCPhaseTimes::_gc_pause_time_ms. // See G1YoungCollector::collect(). void G1GCPhaseTimes::print(bool evacuation_failed) { - if (_root_region_scan_wait_time_ms > 0.0) { - debug_time("Root Region Scan Waiting", _root_region_scan_wait_time_ms); + if (_root_region_scan_time_ms > 0.0) { + debug_time("Root Region Scan", _root_region_scan_time_ms); } // Check if some time has been recorded for verification and only then print diff --git a/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp b/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp index 8223148b791..b57bf0d617e 100644 --- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp +++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp @@ -191,7 +191,7 @@ class G1GCPhaseTimes : public CHeapObj { double _cur_ref_proc_time_ms; // Not included in _gc_pause_time_ms - double _root_region_scan_wait_time_ms; + double _root_region_scan_time_ms; double _external_accounted_time_ms; @@ -325,8 +325,8 @@ class G1GCPhaseTimes : public CHeapObj { _cur_prepare_concurrent_task_time_ms = ms; } - void record_root_region_scan_wait_time(double time_ms) { - _root_region_scan_wait_time_ms = time_ms; + void record_root_region_scan_time(double time_ms) { + _root_region_scan_time_ms = time_ms; } void record_serial_free_cset_time_ms(double time_ms) { @@ -399,8 +399,8 @@ class G1GCPhaseTimes : public CHeapObj { return _cur_resize_heap_time_ms; } - double root_region_scan_wait_time_ms() { - return _root_region_scan_wait_time_ms; + double root_region_scan_time_ms() { + return _root_region_scan_time_ms; } double young_cset_choice_time_ms() { diff --git a/src/hotspot/share/gc/g1/g1HeapRegion.hpp b/src/hotspot/share/gc/g1/g1HeapRegion.hpp index 2b4b640d52b..ec9cab26049 100644 --- a/src/hotspot/share/gc/g1/g1HeapRegion.hpp +++ b/src/hotspot/share/gc/g1/g1HeapRegion.hpp @@ -567,41 +567,15 @@ public: // G1HeapRegionClosure is used for iterating over regions. // Terminates the iteration when the "do_heap_region" method returns "true". class G1HeapRegionClosure : public StackObj { - friend class G1HeapRegionManager; - friend class G1CollectionSet; - friend class G1CollectionSetCandidates; - - bool _is_complete; - void set_incomplete() { _is_complete = false; } - public: - G1HeapRegionClosure(): _is_complete(true) {} - // Typically called on each region until it returns true. virtual bool do_heap_region(G1HeapRegion* r) = 0; - - // True after iteration if the closure was applied to all heap regions - // and returned "false" in all cases. - bool is_complete() { return _is_complete; } }; class G1HeapRegionIndexClosure : public StackObj { - friend class G1HeapRegionManager; - friend class G1CollectionSet; - friend class G1CollectionSetCandidates; - - bool _is_complete; - void set_incomplete() { _is_complete = false; } - public: - G1HeapRegionIndexClosure(): _is_complete(true) {} - // Typically called on each region until it returns true. virtual bool do_heap_region_index(uint region_index) = 0; - - // True after iteration if the closure was applied to all heap regions - // and returned "false" in all cases. - bool is_complete() { return _is_complete; } }; #endif // SHARE_GC_G1_G1HEAPREGION_HPP diff --git a/src/hotspot/share/gc/g1/g1HeapRegion.inline.hpp b/src/hotspot/share/gc/g1/g1HeapRegion.inline.hpp index 4f242b7a537..f92e37fee3c 100644 --- a/src/hotspot/share/gc/g1/g1HeapRegion.inline.hpp +++ b/src/hotspot/share/gc/g1/g1HeapRegion.inline.hpp @@ -42,6 +42,11 @@ #include "utilities/globalDefinitions.hpp" inline HeapWord* G1HeapRegion::block_start(const void* addr) const { + if (is_young()) { + // We are here because of BlockLocationPrinter. + // Can be invoked in any context, so this region might not be parsable. + return nullptr; + } return block_start(addr, parsable_bottom_acquire()); } @@ -64,6 +69,7 @@ inline HeapWord* G1HeapRegion::advance_to_block_containing_addr(const void* addr inline HeapWord* G1HeapRegion::block_start(const void* addr, HeapWord* const pb) const { assert(addr >= bottom() && addr < top(), "invalid address"); + assert(!is_young(), "Only non-young regions have BOT"); HeapWord* first_block = _bot->block_start_reaching_into_card(addr); return advance_to_block_containing_addr(addr, pb, first_block); } diff --git a/src/hotspot/share/gc/g1/g1HeapRegionManager.cpp b/src/hotspot/share/gc/g1/g1HeapRegionManager.cpp index fdd3b919590..3c0318827ef 100644 --- a/src/hotspot/share/gc/g1/g1HeapRegionManager.cpp +++ b/src/hotspot/share/gc/g1/g1HeapRegionManager.cpp @@ -511,7 +511,6 @@ void G1HeapRegionManager::iterate(G1HeapRegionClosure* blk) const { guarantee(at(i) != nullptr, "Tried to access region %u that has a null G1HeapRegion*", i); bool res = blk->do_heap_region(at(i)); if (res) { - blk->set_incomplete(); return; } } @@ -526,7 +525,6 @@ void G1HeapRegionManager::iterate(G1HeapRegionIndexClosure* blk) const { } bool res = blk->do_heap_region_index(i); if (res) { - blk->set_incomplete(); return; } } diff --git a/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp b/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp index 4dd0a509bcd..1b9704e8ad3 100644 --- a/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp +++ b/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -366,6 +366,12 @@ static size_t target_heap_capacity(size_t used_bytes, uintx free_ratio) { } size_t G1HeapSizingPolicy::full_collection_resize_amount(bool& expand, size_t allocation_word_size) { + // User-requested Full GCs introduce GC load unrelated to heap size; reset CPU + // usage tracking so heap resizing heuristics are driven only by GC pressure. + if (GCCause::is_user_requested_gc(_g1h->gc_cause())) { + reset_cpu_usage_tracking_data(); + } + const size_t capacity_after_gc = _g1h->capacity(); // Capacity, free and used after the GC counted as full regions to // include the waste in the following calculations. diff --git a/src/hotspot/share/gc/g1/g1HeapVerifier.cpp b/src/hotspot/share/gc/g1/g1HeapVerifier.cpp index a2a9bc8e857..714a2473a08 100644 --- a/src/hotspot/share/gc/g1/g1HeapVerifier.cpp +++ b/src/hotspot/share/gc/g1/g1HeapVerifier.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #include "code/nmethod.hpp" #include "gc/g1/g1Allocator.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" +#include "gc/g1/g1CollectorState.inline.hpp" #include "gc/g1/g1ConcurrentMarkThread.hpp" #include "gc/g1/g1HeapRegion.inline.hpp" #include "gc/g1/g1HeapRegionRemSet.hpp" @@ -236,7 +237,7 @@ private: VerifyOption _vo; bool _failures; - bool is_in_full_gc() const { return G1CollectedHeap::heap()->collector_state()->in_full_gc(); } + bool is_in_full_gc() const { return G1CollectedHeap::heap()->collector_state()->is_in_full_gc(); } public: VerifyRegionClosure(VerifyOption vo) @@ -349,7 +350,7 @@ void G1HeapVerifier::verify(VerifyOption vo) { bool failures = rootsCl.failures() || codeRootsCl.failures(); - if (!_g1h->policy()->collector_state()->in_full_gc()) { + if (!_g1h->policy()->collector_state()->is_in_full_gc()) { // If we're verifying during a full GC then the region sets // will have been torn down at the start of the GC. Therefore // verifying the region sets will fail. So we only verify @@ -494,7 +495,7 @@ public: }; void G1HeapVerifier::verify_marking_state() { - assert(G1CollectedHeap::heap()->collector_state()->in_concurrent_start_gc(), "must be"); + assert(G1CollectedHeap::heap()->collector_state()->is_in_concurrent_start_gc(), "must be"); // Verify TAMSes, bitmaps and liveness statistics. // diff --git a/src/hotspot/share/gc/g1/g1IHOPControl.cpp b/src/hotspot/share/gc/g1/g1IHOPControl.cpp index 43698e9f12b..1e1c52477f9 100644 --- a/src/hotspot/share/gc/g1/g1IHOPControl.cpp +++ b/src/hotspot/share/gc/g1/g1IHOPControl.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,18 +38,18 @@ double G1IHOPControl::predict(const TruncatedSeq* seq) const { bool G1IHOPControl::have_enough_data_for_prediction() const { assert(_is_adaptive, "precondition"); - return ((size_t)_marking_times_s.num() >= G1AdaptiveIHOPNumInitialSamples) && - ((size_t)_allocation_rate_s.num() >= G1AdaptiveIHOPNumInitialSamples); + return ((size_t)_marking_start_to_mixed_time_s.num() >= G1AdaptiveIHOPNumInitialSamples) && + ((size_t)_old_gen_alloc_rate.num() >= G1AdaptiveIHOPNumInitialSamples); } -double G1IHOPControl::last_marking_length_s() const { - return _marking_times_s.last(); +double G1IHOPControl::last_marking_start_to_mixed_time_s() const { + return _marking_start_to_mixed_time_s.last(); } -size_t G1IHOPControl::actual_target_threshold() const { +size_t G1IHOPControl::effective_target_occupancy() const { assert(_is_adaptive, "precondition"); - // The actual target threshold takes the heap reserve and the expected waste in + // The effective target occupancy takes the heap reserve and the expected waste in // free space into account. // _heap_reserve is that part of the total heap capacity that is reserved for // eventual promotion failure. @@ -79,9 +79,9 @@ G1IHOPControl::G1IHOPControl(double ihop_percent, _last_allocation_time_s(0.0), _old_gen_alloc_tracker(old_gen_alloc_tracker), _predictor(predictor), - _marking_times_s(10, 0.05), - _allocation_rate_s(10, 0.05), - _last_unrestrained_young_size(0) { + _marking_start_to_mixed_time_s(10, 0.05), + _old_gen_alloc_rate(10, 0.05), + _expected_young_gen_at_first_mixed_gc(0) { assert(_initial_ihop_percent >= 0.0 && _initial_ihop_percent <= 100.0, "IHOP percent out of range: %.3f", ihop_percent); assert(!_is_adaptive || _predictor != nullptr, "precondition"); @@ -98,85 +98,104 @@ void G1IHOPControl::report_statistics(G1NewTracer* new_tracer, size_t non_young_ send_trace_event(new_tracer, non_young_occupancy); } -void G1IHOPControl::update_allocation_info(double allocation_time_s, size_t additional_buffer_size) { +void G1IHOPControl::update_allocation_info(double allocation_time_s, size_t expected_young_gen_size) { assert(allocation_time_s > 0, "Invalid allocation time: %.3f", allocation_time_s); _last_allocation_time_s = allocation_time_s; double alloc_rate = _old_gen_alloc_tracker->last_period_old_gen_growth() / allocation_time_s; - _allocation_rate_s.add(alloc_rate); - _last_unrestrained_young_size = additional_buffer_size; + _old_gen_alloc_rate.add(alloc_rate); + _expected_young_gen_at_first_mixed_gc = expected_young_gen_size; } -void G1IHOPControl::update_marking_length(double marking_length_s) { - assert(marking_length_s >= 0.0, "Invalid marking length: %.3f", marking_length_s); - _marking_times_s.add(marking_length_s); +void G1IHOPControl::add_marking_start_to_mixed_length(double length_s) { + assert(length_s >= 0.0, "Invalid marking length: %.3f", length_s); + _marking_start_to_mixed_time_s.add(length_s); } -size_t G1IHOPControl::get_conc_mark_start_threshold() { +// Determine the old generation occupancy threshold at which to start +// concurrent marking such that reclamation (first Mixed GC) begins +// before the heap reaches a critical occupancy level. +size_t G1IHOPControl::old_gen_threshold_for_conc_mark_start() { guarantee(_target_occupancy > 0, "Target occupancy must be initialized"); if (!_is_adaptive || !have_enough_data_for_prediction()) { return (size_t)(_initial_ihop_percent * _target_occupancy / 100.0); } - double pred_marking_time = predict(&_marking_times_s); - double pred_rate = predict(&_allocation_rate_s); - size_t pred_bytes = (size_t)(pred_marking_time * pred_rate); - size_t predicted_needed = pred_bytes + _last_unrestrained_young_size; - size_t internal_threshold = actual_target_threshold(); + // During the time between marking start and the first Mixed GC, + // additional memory will be consumed: + // - Old gen grows due to allocations: + // old_gen_alloc_bytes = old_gen_alloc_rate * marking_start_to_mixed_time + // - Young gen will occupy a certain size at the first Mixed GC: + // expected_young_gen_at_first_mixed_gc + double marking_start_to_mixed_time = predict(&_marking_start_to_mixed_time_s); + double old_gen_alloc_rate = predict(&_old_gen_alloc_rate); + size_t old_gen_alloc_bytes = (size_t)(marking_start_to_mixed_time * old_gen_alloc_rate); - return predicted_needed < internal_threshold - ? internal_threshold - predicted_needed + // Therefore, the total heap occupancy at the first Mixed GC is: + // current_old_gen + old_gen_growth + expected_young_gen_at_first_mixed_gc + // + // To ensure this does not exceed the target_heap_occupancy, we work + // backwards to compute the old gen occupancy at which marking must start: + // mark_start_threshold = target_heap_occupancy - + // (old_gen_growth + expected_young_gen_at_first_mixed_gc) + + size_t predicted_needed = old_gen_alloc_bytes + _expected_young_gen_at_first_mixed_gc; + size_t target_heap_occupancy = effective_target_occupancy(); + + return predicted_needed < target_heap_occupancy + ? target_heap_occupancy - predicted_needed : 0; } void G1IHOPControl::print_log(size_t non_young_occupancy) { assert(_target_occupancy > 0, "Target occupancy still not updated yet."); - size_t cur_conc_mark_start_threshold = get_conc_mark_start_threshold(); - log_debug(gc, ihop)("Basic information (value update), threshold: %zuB (%1.2f), target occupancy: %zuB, non-young occupancy: %zuB, " - "recent allocation size: %zuB, recent allocation duration: %1.2fms, recent old gen allocation rate: %1.2fB/s, recent marking phase length: %1.2fms", - cur_conc_mark_start_threshold, - percent_of(cur_conc_mark_start_threshold, _target_occupancy), + size_t old_gen_mark_start_threshold = old_gen_threshold_for_conc_mark_start(); + log_debug(gc, ihop)("Basic information (value update), old-gen threshold: %zuB (%1.2f%%), target occupancy: %zuB, old-gen occupancy: %zuB (%1.2f%%), " + "recent old-gen allocation size: %zuB, recent allocation duration: %1.2fms, recent old-gen allocation rate: %1.2fB/s, recent marking phase length: %1.2fms", + old_gen_mark_start_threshold, + percent_of(old_gen_mark_start_threshold, _target_occupancy), _target_occupancy, non_young_occupancy, + percent_of(non_young_occupancy, _target_occupancy), _old_gen_alloc_tracker->last_period_old_gen_bytes(), _last_allocation_time_s * 1000.0, _last_allocation_time_s > 0.0 ? _old_gen_alloc_tracker->last_period_old_gen_bytes() / _last_allocation_time_s : 0.0, - last_marking_length_s() * 1000.0); + last_marking_start_to_mixed_time_s() * 1000.0); if (!_is_adaptive) { return; } - size_t actual_threshold = actual_target_threshold(); - log_debug(gc, ihop)("Adaptive IHOP information (value update), threshold: %zuB (%1.2f), internal target threshold: %zuB, " - "non-young occupancy: %zuB, additional buffer size: %zuB, predicted old gen allocation rate: %1.2fB/s, " - "predicted marking phase length: %1.2fms, prediction active: %s", - cur_conc_mark_start_threshold, - percent_of(cur_conc_mark_start_threshold, actual_threshold), - actual_threshold, + size_t effective_target = effective_target_occupancy(); + log_debug(gc, ihop)("Adaptive IHOP information (value update), prediction active: %s, old-gen threshold: %zuB (%1.2f%%), internal target occupancy: %zuB, " + "old-gen occupancy: %zuB, additional buffer size: %zuB, predicted old-gen allocation rate: %1.2fB/s, " + "predicted marking phase length: %1.2fms", + BOOL_TO_STR(have_enough_data_for_prediction()), + old_gen_mark_start_threshold, + percent_of(old_gen_mark_start_threshold, effective_target), + effective_target, non_young_occupancy, - _last_unrestrained_young_size, - predict(&_allocation_rate_s), - predict(&_marking_times_s) * 1000.0, - have_enough_data_for_prediction() ? "true" : "false"); + _expected_young_gen_at_first_mixed_gc, + predict(&_old_gen_alloc_rate), + predict(&_marking_start_to_mixed_time_s) * 1000.0); } void G1IHOPControl::send_trace_event(G1NewTracer* tracer, size_t non_young_occupancy) { assert(_target_occupancy > 0, "Target occupancy still not updated yet."); - tracer->report_basic_ihop_statistics(get_conc_mark_start_threshold(), + tracer->report_basic_ihop_statistics(old_gen_threshold_for_conc_mark_start(), _target_occupancy, non_young_occupancy, _old_gen_alloc_tracker->last_period_old_gen_bytes(), _last_allocation_time_s, - last_marking_length_s()); + last_marking_start_to_mixed_time_s()); if (_is_adaptive) { - tracer->report_adaptive_ihop_statistics(get_conc_mark_start_threshold(), - actual_target_threshold(), + tracer->report_adaptive_ihop_statistics(old_gen_threshold_for_conc_mark_start(), + effective_target_occupancy(), non_young_occupancy, - _last_unrestrained_young_size, - predict(&_allocation_rate_s), - predict(&_marking_times_s), + _expected_young_gen_at_first_mixed_gc, + predict(&_old_gen_alloc_rate), + predict(&_marking_start_to_mixed_time_s), have_enough_data_for_prediction()); } } diff --git a/src/hotspot/share/gc/g1/g1IHOPControl.hpp b/src/hotspot/share/gc/g1/g1IHOPControl.hpp index 24061c026d1..ff209012f02 100644 --- a/src/hotspot/share/gc/g1/g1IHOPControl.hpp +++ b/src/hotspot/share/gc/g1/g1IHOPControl.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -58,8 +58,11 @@ class G1IHOPControl : public CHeapObj { const G1OldGenAllocationTracker* _old_gen_alloc_tracker; const G1Predictions* _predictor; - TruncatedSeq _marking_times_s; - TruncatedSeq _allocation_rate_s; + // Wall-clock time in seconds from marking start to the first mixed GC, + // excluding GC Pause time. + TruncatedSeq _marking_start_to_mixed_time_s; + // Old generation allocation rate in bytes per second. + TruncatedSeq _old_gen_alloc_rate; // The most recent unrestrained size of the young gen. This is used as an additional // factor in the calculation of the threshold, as the threshold is based on @@ -68,18 +71,18 @@ class G1IHOPControl : public CHeapObj { // Since we cannot know what young gen sizes are used in the future, we will just // use the current one. We expect that this one will be one with a fairly large size, // as there is no marking or mixed gc that could impact its size too much. - size_t _last_unrestrained_young_size; + size_t _expected_young_gen_at_first_mixed_gc; // Get a new prediction bounded below by zero from the given sequence. double predict(const TruncatedSeq* seq) const; bool have_enough_data_for_prediction() const; - double last_marking_length_s() const; + double last_marking_start_to_mixed_time_s() const; - // The "actual" target threshold the algorithm wants to keep during and at the - // end of marking. This is typically lower than the requested threshold, as the + // The "effective" target occupancy the algorithm wants to keep until the start + // of Mixed GCs. This is typically lower than the target occupancy, as the // algorithm needs to consider restrictions by the environment. - size_t actual_target_threshold() const; + size_t effective_target_occupancy() const; void print_log(size_t non_young_occupancy); void send_trace_event(G1NewTracer* tracer, size_t non_young_occupancy); @@ -95,22 +98,24 @@ class G1IHOPControl : public CHeapObj { // Adjust target occupancy. void update_target_occupancy(size_t new_target_occupancy); - // Update information about time during which allocations in the Java heap occurred, - // how large these allocations were in bytes, and an additional buffer. - // The allocations should contain any amount of space made unusable for further - // allocation, e.g. any waste caused by TLAB allocation, space at the end of - // humongous objects that can not be used for allocation, etc. - // Together with the target occupancy, this additional buffer should contain the - // difference between old gen size and total heap size at the start of reclamation, - // and space required for that reclamation. - void update_allocation_info(double allocation_time_s, size_t additional_buffer_size); + void update_target_after_marking_phase(); + + // Update allocation rate information and current expected young gen size for the + // first mixed gc needed for the predictor. Allocation rate is given as the + // separately passed in allocation increment and the time passed (mutator time) + // for the latest allocation increment here. Allocation size is the memory needed + // during the mutator before and the first mixed gc pause itself. + // Contents include young gen at that point, and the memory required for evacuating + // the collection set in that first mixed gc (including waste caused by PLAB + // allocation etc.). + void update_allocation_info(double allocation_time_s, size_t expected_young_gen_size); // Update the time spent in the mutator beginning from the end of concurrent start to // the first mixed gc. - void update_marking_length(double marking_length_s); + void add_marking_start_to_mixed_length(double length_s); // Get the current non-young occupancy at which concurrent marking should start. - size_t get_conc_mark_start_threshold(); + size_t old_gen_threshold_for_conc_mark_start(); void report_statistics(G1NewTracer* tracer, size_t non_young_occupancy); }; diff --git a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp index 75a8ef1a336..52c8d4d4389 100644 --- a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp +++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp @@ -78,7 +78,7 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, _surviving_young_words(nullptr), _surviving_words_length(collection_set->young_region_length() + 1), _old_gen_is_full(false), - _partial_array_splitter(g1h->partial_array_state_manager(), num_workers, ParGCArrayScanChunk), + _partial_array_splitter(g1h->partial_array_state_manager(), num_workers), _string_dedup_requests(), _max_num_optional_regions(collection_set->num_optional_regions()), _numa(g1h->numa()), @@ -253,7 +253,7 @@ void G1ParScanThreadState::start_partial_objarray(oop from_obj, size_t array_length = to_array->length(); size_t initial_chunk_size = // The source array is unused when processing states. - _partial_array_splitter.start(_task_queue, nullptr, to_array, array_length); + _partial_array_splitter.start(_task_queue, nullptr, to_array, array_length, ParGCArrayScanChunk); assert(_scanner.skip_card_mark_set(), "must be"); // Process the initial chunk. No need to process the type in the @@ -650,7 +650,7 @@ oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m, Kla // Mark the failing object in the marking bitmap and later use the bitmap to handle // evacuation failure recovery. - _g1h->mark_evac_failure_object(_worker_id, old, word_sz); + _g1h->mark_evac_failure_object(old); _evacuation_failed_info.register_copy_failure(word_sz); @@ -741,7 +741,7 @@ void G1ParScanThreadStateSet::print_partial_array_task_stats() { return state_for_worker(i)->partial_array_task_stats(); }; PartialArrayTaskStats::log_set(_num_workers, get_stats, - "Partial Array Task Stats"); + "Young GC Partial Array"); } #endif // TASKQUEUE_STATS diff --git a/src/hotspot/share/gc/g1/g1PeriodicGCTask.cpp b/src/hotspot/share/gc/g1/g1PeriodicGCTask.cpp index f280d76f3c7..b5ff4272764 100644 --- a/src/hotspot/share/gc/g1/g1PeriodicGCTask.cpp +++ b/src/hotspot/share/gc/g1/g1PeriodicGCTask.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,6 +23,7 @@ */ #include "gc/g1/g1CollectedHeap.inline.hpp" +#include "gc/g1/g1CollectorState.inline.hpp" #include "gc/g1/g1ConcurrentMark.inline.hpp" #include "gc/g1/g1ConcurrentMarkThread.inline.hpp" #include "gc/g1/g1GCCounters.hpp" @@ -38,8 +39,8 @@ bool G1PeriodicGCTask::should_start_periodic_gc(G1CollectedHeap* g1h, // Ensure no GC safepoints while we're doing the checks, to avoid data races. SuspendibleThreadSetJoiner sts; - // If we are currently in a concurrent mark we are going to uncommit memory soon. - if (g1h->concurrent_mark()->in_progress()) { + // If we are currently in a concurrent cycle we are going to uncommit memory soon. + if (g1h->collector_state()->is_in_concurrent_cycle()) { log_debug(gc, periodic)("Concurrent cycle in progress. Skipping."); return false; } diff --git a/src/hotspot/share/gc/g1/g1Policy.cpp b/src/hotspot/share/gc/g1/g1Policy.cpp index 98e6acc1d77..78a533d62c0 100644 --- a/src/hotspot/share/gc/g1/g1Policy.cpp +++ b/src/hotspot/share/gc/g1/g1Policy.cpp @@ -28,7 +28,7 @@ #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectionSet.hpp" #include "gc/g1/g1CollectionSetCandidates.inline.hpp" -#include "gc/g1/g1CollectionSetChooser.hpp" +#include "gc/g1/g1CollectorState.inline.hpp" #include "gc/g1/g1ConcurrentMark.hpp" #include "gc/g1/g1ConcurrentMarkThread.inline.hpp" #include "gc/g1/g1ConcurrentRefine.hpp" @@ -177,7 +177,7 @@ uint G1Policy::calculate_desired_eden_length_by_mmu() const { void G1Policy::update_young_length_bounds() { assert(!Universe::is_fully_initialized() || SafepointSynchronize::is_at_safepoint(), "must be"); - bool for_young_only_phase = collector_state()->in_young_only_phase(); + bool for_young_only_phase = collector_state()->is_in_young_only_phase(); update_young_length_bounds(_analytics->predict_pending_cards(for_young_only_phase), _analytics->predict_card_rs_length(for_young_only_phase), _analytics->predict_code_root_rs_length(for_young_only_phase)); @@ -505,7 +505,7 @@ uint G1Policy::calculate_desired_eden_length_before_mixed(double base_time_ms, double G1Policy::predict_survivor_regions_evac_time() const { double survivor_regions_evac_time = predict_young_region_other_time_ms(_g1h->survivor()->length()); for (G1HeapRegion* r : _g1h->survivor()->regions()) { - survivor_regions_evac_time += predict_region_copy_time_ms(r, _g1h->collector_state()->in_young_only_phase()); + survivor_regions_evac_time += predict_region_copy_time_ms(r, _g1h->collector_state()->is_in_young_only_phase()); } return survivor_regions_evac_time; @@ -561,8 +561,7 @@ void G1Policy::revise_young_list_target_length(size_t pending_cards, size_t card void G1Policy::record_full_collection_start() { record_pause_start_time(); // Release the future to-space so that it is available for compaction into. - collector_state()->set_in_young_only_phase(false); - collector_state()->set_in_full_gc(true); + collector_state()->set_in_full_gc(); _collection_set->abandon_all_candidates(); } @@ -571,17 +570,10 @@ void G1Policy::record_full_collection_end(size_t allocation_word_size) { // since last pause. double end_sec = os::elapsedTime(); - collector_state()->set_in_full_gc(false); - // "Nuke" the heuristics that control the young/mixed GC // transitions and make sure we start with young GCs after the Full GC. - collector_state()->set_in_young_only_phase(true); - collector_state()->set_in_young_gc_before_mixed(false); + collector_state()->set_in_normal_young_gc(); collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", allocation_word_size)); - collector_state()->set_in_concurrent_start_gc(false); - collector_state()->set_mark_in_progress(false); - collector_state()->set_mark_or_rebuild_in_progress(false); - collector_state()->set_clear_bitmap_in_progress(false); _eden_surv_rate_group->start_adding_regions(); // also call this on any additional surv rate groups @@ -593,7 +585,7 @@ void G1Policy::record_full_collection_end(size_t allocation_word_size) { _old_gen_alloc_tracker.reset_after_gc(_g1h->humongous_regions_count() * G1HeapRegion::GrainBytes); double start_time_sec = cur_pause_start_sec(); - record_pause(G1GCPauseType::FullGC, start_time_sec, end_sec); + record_pause(Pause::Full, start_time_sec, end_sec); } static void log_refinement_stats(const G1ConcurrentRefineStats& stats) { @@ -698,18 +690,12 @@ void G1Policy::record_young_collection_start() { assert(_g1h->collection_set()->verify_young_ages(), "region age verification failed"); } -void G1Policy::record_concurrent_mark_init_end() { - assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now"); - collector_state()->set_in_concurrent_start_gc(false); -} - void G1Policy::record_concurrent_mark_remark_end() { double end_time_sec = os::elapsedTime(); double start_time_sec = cur_pause_start_sec(); double elapsed_time_ms = (end_time_sec - start_time_sec) * 1000.0; _analytics->report_concurrent_mark_remark_times_ms(elapsed_time_ms); - record_pause(G1GCPauseType::Remark, start_time_sec, end_time_sec); - collector_state()->set_mark_in_progress(false); + record_pause(Pause::Remark, start_time_sec, end_time_sec); } G1CollectionSetCandidates* G1Policy::candidates() const { @@ -739,7 +725,7 @@ double G1Policy::constant_other_time_ms(double pause_time_ms) const { } bool G1Policy::about_to_start_mixed_phase() const { - return _g1h->concurrent_mark()->in_progress() || collector_state()->in_young_gc_before_mixed(); + return collector_state()->is_in_concurrent_cycle() || collector_state()->is_in_prepare_mixed_gc(); } bool G1Policy::need_to_start_conc_mark(const char* source, size_t allocation_word_size) { @@ -747,21 +733,21 @@ bool G1Policy::need_to_start_conc_mark(const char* source, size_t allocation_wor return false; } - size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold(); + size_t marking_initiating_old_gen_threshold = _ihop_control->old_gen_threshold_for_conc_mark_start(); size_t non_young_occupancy = _g1h->non_young_occupancy_after_allocation(allocation_word_size); bool result = false; - if (non_young_occupancy > marking_initiating_used_threshold) { - result = collector_state()->in_young_only_phase(); + if (non_young_occupancy > marking_initiating_old_gen_threshold) { + result = collector_state()->is_in_young_only_phase(); log_debug(gc, ergo, ihop)("%s non-young occupancy: %zuB allocation request: %zuB threshold: %zuB (%1.2f) source: %s", result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)", - non_young_occupancy, allocation_word_size * HeapWordSize, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1h->capacity() * 100, source); + non_young_occupancy, allocation_word_size * HeapWordSize, marking_initiating_old_gen_threshold, (double) marking_initiating_old_gen_threshold / _g1h->capacity() * 100, source); } return result; } bool G1Policy::concurrent_operation_is_full_mark(const char* msg, size_t allocation_word_size) { - return collector_state()->in_concurrent_start_gc() && + return collector_state()->is_in_concurrent_start_gc() && ((_g1h->gc_cause() != GCCause::_g1_humongous_allocation) || need_to_start_conc_mark(msg, allocation_word_size)); } @@ -800,11 +786,12 @@ void G1Policy::record_young_collection_end(bool concurrent_operation_is_full_mar double end_time_sec = Ticks::now().seconds(); double pause_time_ms = (end_time_sec - start_time_sec) * 1000.0; - G1GCPauseType this_pause = collector_state()->young_gc_pause_type(concurrent_operation_is_full_mark); - bool is_young_only_pause = G1GCPauseTypeHelper::is_young_only_pause(this_pause); + Pause this_pause = collector_state()->gc_pause_type(concurrent_operation_is_full_mark); + bool is_young_only_pause = G1CollectorState::is_young_only_pause(this_pause); - if (G1GCPauseTypeHelper::is_concurrent_start_pause(this_pause)) { - record_concurrent_mark_init_end(); + if (G1CollectorState::is_concurrent_start_pause(this_pause)) { + assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now"); + collector_state()->set_in_normal_young_gc(); } else { maybe_start_marking(allocation_word_size); } @@ -945,20 +932,19 @@ void G1Policy::record_young_collection_end(bool concurrent_operation_is_full_mar record_pause(this_pause, start_time_sec, end_time_sec); - if (G1GCPauseTypeHelper::is_last_young_pause(this_pause)) { - assert(!G1GCPauseTypeHelper::is_concurrent_start_pause(this_pause), + if (G1CollectorState::is_prepare_mixed_pause(this_pause)) { + assert(!G1CollectorState::is_concurrent_start_pause(this_pause), "The young GC before mixed is not allowed to be concurrent start GC"); // This has been the young GC before we start doing mixed GCs. We already // decided to start mixed GCs much earlier, so there is nothing to do except // advancing the state. - collector_state()->set_in_young_only_phase(false); - collector_state()->set_in_young_gc_before_mixed(false); - } else if (G1GCPauseTypeHelper::is_mixed_pause(this_pause)) { + collector_state()->set_in_space_reclamation_phase(); + } else if (G1CollectorState::is_mixed_pause(this_pause)) { // This is a mixed GC. Here we decide whether to continue doing more // mixed GCs or not. if (!next_gc_should_be_mixed()) { log_debug(gc, ergo)("do not continue mixed GCs (candidate old regions not available)"); - collector_state()->set_in_young_only_phase(true); + collector_state()->set_in_normal_young_gc(); assert(!candidates()->has_more_marking_candidates(), "only end mixed if all candidates from marking were processed"); @@ -971,23 +957,18 @@ void G1Policy::record_young_collection_end(bool concurrent_operation_is_full_mar _eden_surv_rate_group->start_adding_regions(); - assert(!(G1GCPauseTypeHelper::is_concurrent_start_pause(this_pause) && collector_state()->mark_or_rebuild_in_progress()), - "If the last pause has been concurrent start, we should not have been in the marking window"); - if (G1GCPauseTypeHelper::is_concurrent_start_pause(this_pause)) { - collector_state()->set_mark_in_progress(concurrent_operation_is_full_mark); - collector_state()->set_mark_or_rebuild_in_progress(concurrent_operation_is_full_mark); - } + assert(!(G1CollectorState::is_concurrent_start_pause(this_pause) && collector_state()->is_in_concurrent_cycle()), + "If the last pause has been concurrent start, we should not have been in the marking cycle"); _free_regions_at_end_of_collection = _g1h->num_free_regions(); + _old_gen_alloc_tracker.reset_after_gc(_g1h->humongous_regions_count() * G1HeapRegion::GrainBytes); // Do not update dynamic IHOP due to G1 periodic collection as it is highly likely // that in this case we are not running in a "normal" operating mode. if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) { update_young_length_bounds(); - _old_gen_alloc_tracker.reset_after_gc(_g1h->humongous_regions_count() * G1HeapRegion::GrainBytes); - if (update_ihop_prediction(app_time_ms / 1000.0, - G1GCPauseTypeHelper::is_young_only_pause(this_pause))) { + if (update_ihop_prediction(app_time_ms / 1000.0, is_young_only_pause)) { _ihop_control->report_statistics(_g1h->gc_tracer_stw(), _g1h->non_young_occupancy_after_allocation(allocation_word_size)); } } else { @@ -1044,14 +1025,13 @@ bool G1Policy::update_ihop_prediction(double mutator_time_s, bool report = false; - double marking_to_mixed_time = -1.0; if (!this_gc_was_young_only && _concurrent_start_to_mixed.has_result()) { - marking_to_mixed_time = _concurrent_start_to_mixed.last_marking_time(); + double marking_to_mixed_time = _concurrent_start_to_mixed.get_and_reset_last_marking_time(); assert(marking_to_mixed_time > 0.0, "Concurrent start to mixed time must be larger than zero but is %.3f", marking_to_mixed_time); if (marking_to_mixed_time > min_valid_time) { - _ihop_control->update_marking_length(marking_to_mixed_time); + _ihop_control->add_marking_start_to_mixed_length(marking_to_mixed_time); report = true; } } @@ -1081,7 +1061,7 @@ void G1Policy::record_young_gc_pause_end(bool evacuation_failed) { double G1Policy::predict_base_time_ms(size_t pending_cards, size_t card_rs_length, size_t code_root_rs_length) const { - bool in_young_only_phase = collector_state()->in_young_only_phase(); + bool in_young_only_phase = collector_state()->is_in_young_only_phase(); // Cards from the refinement table and the cards from the young gen remset are // unique to each other as they are located on the card table. @@ -1105,7 +1085,7 @@ double G1Policy::predict_base_time_ms(size_t pending_cards, } double G1Policy::predict_base_time_ms(size_t pending_cards, size_t card_rs_length) const { - bool for_young_only_phase = collector_state()->in_young_only_phase(); + bool for_young_only_phase = collector_state()->is_in_young_only_phase(); size_t code_root_rs_length = _analytics->predict_code_root_rs_length(for_young_only_phase); return predict_base_time_ms(pending_cards, card_rs_length, code_root_rs_length); } @@ -1145,7 +1125,18 @@ double G1Policy::predict_eden_copy_time_ms(uint count, size_t* bytes_to_copy) co if (bytes_to_copy != nullptr) { *bytes_to_copy = expected_bytes; } - return _analytics->predict_object_copy_time_ms(expected_bytes, collector_state()->in_young_only_phase()); + return _analytics->predict_object_copy_time_ms(expected_bytes, collector_state()->is_in_young_only_phase()); +} + +bool G1Policy::should_update_surv_rate_group_predictors() { + return collector_state()->is_in_young_only_phase() && !collector_state()->is_in_mark_or_rebuild(); +} + +void G1Policy::cset_regions_freed() { + bool update = should_update_surv_rate_group_predictors(); + + _eden_surv_rate_group->all_surviving_words_recorded(predictor(), update); + _survivor_surv_rate_group->all_surviving_words_recorded(predictor(), update); } double G1Policy::predict_region_copy_time_ms(G1HeapRegion* hr, bool for_young_only_phase) const { @@ -1235,7 +1226,7 @@ bool G1Policy::force_concurrent_start_if_outside_cycle(GCCause::Cause gc_cause) // We actually check whether we are marking here and not if we are in a // reclamation phase. This means that we will schedule a concurrent mark // even while we are still in the process of reclaiming memory. - bool during_cycle = _g1h->concurrent_mark()->in_progress(); + bool during_cycle = collector_state()->is_in_concurrent_cycle(); if (!during_cycle) { log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). " "GC cause: %s", @@ -1250,11 +1241,6 @@ bool G1Policy::force_concurrent_start_if_outside_cycle(GCCause::Cause gc_cause) } } -void G1Policy::initiate_conc_mark() { - collector_state()->set_in_concurrent_start_gc(true); - collector_state()->set_initiate_conc_mark_if_possible(false); -} - static const char* requester_for_mixed_abort(GCCause::Cause cause) { if (cause == GCCause::_wb_breakpoint) { return "run_to breakpoint"; @@ -1270,11 +1256,11 @@ void G1Policy::decide_on_concurrent_start_pause() { // We are about to decide on whether this pause will be a // concurrent start pause. - // First, collector_state()->in_concurrent_start_gc() should not be already set. We + // First, collector_state()->is_in_concurrent_start_gc() should not already be set. We // will set it here if we have to. However, it should be cleared by // the end of the pause (it's only set for the duration of a // concurrent start pause). - assert(!collector_state()->in_concurrent_start_gc(), "pre-condition"); + assert(!collector_state()->is_in_concurrent_start_gc(), "pre-condition"); if (collector_state()->initiate_conc_mark_if_possible()) { // We had noticed on a previous pause that the heap occupancy has @@ -1287,25 +1273,21 @@ void G1Policy::decide_on_concurrent_start_pause() { if ((cause != GCCause::_wb_breakpoint) && ConcurrentGCBreakpoints::is_controlled()) { log_debug(gc, ergo)("Do not initiate concurrent cycle (whitebox controlled)"); - } else if (!about_to_start_mixed_phase() && collector_state()->in_young_only_phase()) { + } else if (!about_to_start_mixed_phase() && collector_state()->is_in_young_only_phase()) { // Initiate a new concurrent start if there is no marking or reclamation going on. - initiate_conc_mark(); + collector_state()->set_in_concurrent_start_gc(); log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)"); } else if (_g1h->is_user_requested_concurrent_full_gc(cause) || GCCause::is_codecache_requested_gc(cause) || (cause == GCCause::_wb_breakpoint)) { - // Initiate a concurrent start. A concurrent start must be a young only - // GC, so the collector state must be updated to reflect this. - collector_state()->set_in_young_only_phase(true); - collector_state()->set_in_young_gc_before_mixed(false); - + // Force concurrent start. + collector_state()->set_in_concurrent_start_gc(); // We might have ended up coming here about to start a mixed phase with a collection set // active. The following remark might change the change the "evacuation efficiency" of // the regions in this set, leading to failing asserts later. // Since the concurrent cycle will recreate the collection set anyway, simply drop it here. abandon_collection_set_candidates(); abort_time_to_mixed_tracking(); - initiate_conc_mark(); log_debug(gc, ergo)("Initiate concurrent cycle (%s requested concurrent cycle)", requester_for_mixed_abort(cause)); } else { @@ -1326,10 +1308,10 @@ void G1Policy::decide_on_concurrent_start_pause() { } // Result consistency checks. // We do not allow concurrent start to be piggy-backed on a mixed GC. - assert(!collector_state()->in_concurrent_start_gc() || - collector_state()->in_young_only_phase(), "sanity"); - // We also do not allow mixed GCs during marking. - assert(!collector_state()->mark_or_rebuild_in_progress() || collector_state()->in_young_only_phase(), "sanity"); + assert(!collector_state()->is_in_concurrent_start_gc() || + collector_state()->is_in_young_only_phase(), "sanity"); + // We also do not allow mixed GCs during marking/rebuilding. + assert(!collector_state()->is_in_mark_or_rebuild() || collector_state()->is_in_young_only_phase(), "sanity %d %d", collector_state()->is_in_concurrent_cycle(), collector_state()->is_in_young_only_phase()); } void G1Policy::record_concurrent_mark_cleanup_end(bool has_rebuilt_remembered_sets) { @@ -1348,16 +1330,16 @@ void G1Policy::record_concurrent_mark_cleanup_end(bool has_rebuilt_remembered_se abort_time_to_mixed_tracking(); log_debug(gc, ergo)("request young-only gcs (candidate old regions not available)"); } - collector_state()->set_in_young_gc_before_mixed(mixed_gc_pending); - collector_state()->set_mark_or_rebuild_in_progress(false); - collector_state()->set_clear_bitmap_in_progress(true); + if (mixed_gc_pending) { + collector_state()->set_in_prepare_mixed_gc(); + } double end_sec = os::elapsedTime(); double start_sec = cur_pause_start_sec(); double elapsed_time_ms = (end_sec - start_sec) * 1000.0; _analytics->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms); - record_pause(G1GCPauseType::Cleanup, start_sec, end_sec); + record_pause(Pause::Cleanup, start_sec, end_sec); } void G1Policy::abandon_collection_set_candidates() { @@ -1373,25 +1355,25 @@ void G1Policy::maybe_start_marking(size_t allocation_word_size) { } } -void G1Policy::update_gc_pause_time_ratios(G1GCPauseType gc_type, double start_time_sec, double end_time_sec) { +void G1Policy::update_gc_pause_time_ratios(Pause gc_type, double start_time_sec, double end_time_sec) { double pause_time_sec = end_time_sec - start_time_sec; double pause_time_ms = pause_time_sec * 1000.0; _analytics->update_gc_time_ratios(end_time_sec, pause_time_ms); - if (gc_type == G1GCPauseType::Cleanup || gc_type == G1GCPauseType::Remark) { + if (G1CollectorState::is_concurrent_cycle_pause(gc_type)) { _analytics->append_prev_collection_pause_end_ms(pause_time_ms); } else { _analytics->set_prev_collection_pause_end_ms(end_time_sec * 1000.0); } } -void G1Policy::record_pause(G1GCPauseType gc_type, +void G1Policy::record_pause(Pause gc_type, double start, double end) { // Manage the MMU tracker. For some reason it ignores Full GCs. - if (gc_type != G1GCPauseType::FullGC) { + if (gc_type != Pause::Full) { _mmu_tracker->add_pause(start, end); } @@ -1403,21 +1385,21 @@ void G1Policy::record_pause(G1GCPauseType gc_type, _analytics->set_gc_cpu_time_at_pause_end_ms(elapsed_gc_cpu_time); } -void G1Policy::update_time_to_mixed_tracking(G1GCPauseType gc_type, +void G1Policy::update_time_to_mixed_tracking(Pause gc_type, double start, double end) { // Manage the mutator time tracking from concurrent start to first mixed gc. switch (gc_type) { - case G1GCPauseType::FullGC: + case Pause::Full: abort_time_to_mixed_tracking(); break; - case G1GCPauseType::Cleanup: - case G1GCPauseType::Remark: - case G1GCPauseType::YoungGC: - case G1GCPauseType::LastYoungGC: + case Pause::Cleanup: + case Pause::Remark: + case Pause::Normal: + case Pause::PrepareMixed: _concurrent_start_to_mixed.add_pause(end - start); break; - case G1GCPauseType::ConcurrentStartMarkGC: + case Pause::ConcurrentStartFull: // Do not track time-to-mixed time for periodic collections as they are likely // to be not representative to regular operation as the mutators are idle at // that time. Also only track full concurrent mark cycles. @@ -1425,12 +1407,12 @@ void G1Policy::update_time_to_mixed_tracking(G1GCPauseType gc_type, _concurrent_start_to_mixed.record_concurrent_start_end(end); } break; - case G1GCPauseType::ConcurrentStartUndoGC: + case Pause::ConcurrentStartUndo: assert(_g1h->gc_cause() == GCCause::_g1_humongous_allocation, "GC cause must be humongous allocation but is %d", _g1h->gc_cause()); break; - case G1GCPauseType::MixedGC: + case Pause::Mixed: _concurrent_start_to_mixed.record_mixed_gc_start(start); break; default: diff --git a/src/hotspot/share/gc/g1/g1Policy.hpp b/src/hotspot/share/gc/g1/g1Policy.hpp index 9513c79869e..5c5c2bc3572 100644 --- a/src/hotspot/share/gc/g1/g1Policy.hpp +++ b/src/hotspot/share/gc/g1/g1Policy.hpp @@ -56,7 +56,7 @@ class GCPolicyCounters; class STWGCTimer; class G1Policy: public CHeapObj { - private: + using Pause = G1CollectorState::Pause; static G1IHOPControl* create_ihop_control(const G1OldGenAllocationTracker* old_gen_alloc_tracker, const G1Predictions* predictor); @@ -114,9 +114,7 @@ class G1Policy: public CHeapObj { G1ConcurrentStartToMixedTimeTracker _concurrent_start_to_mixed; - bool should_update_surv_rate_group_predictors() { - return collector_state()->in_young_only_phase() && !collector_state()->mark_or_rebuild_in_progress(); - } + bool should_update_surv_rate_group_predictors(); double pending_cards_processing_time() const; public: @@ -160,12 +158,7 @@ public: // bytes_to_copy is non-null. double predict_eden_copy_time_ms(uint count, size_t* bytes_to_copy = nullptr) const; - void cset_regions_freed() { - bool update = should_update_surv_rate_group_predictors(); - - _eden_surv_rate_group->all_surviving_words_recorded(predictor(), update); - _survivor_surv_rate_group->all_surviving_words_recorded(predictor(), update); - } + void cset_regions_freed(); G1MMUTracker* mmu_tracker() { return _mmu_tracker; @@ -268,13 +261,13 @@ private: // Sets up marking if proper conditions are met. void maybe_start_marking(size_t allocation_word_size); // Manage time-to-mixed tracking. - void update_time_to_mixed_tracking(G1GCPauseType gc_type, double start, double end); + void update_time_to_mixed_tracking(Pause gc_type, double start, double end); // Record the given STW pause with the given start and end times (in s). - void record_pause(G1GCPauseType gc_type, + void record_pause(Pause gc_type, double start, double end); - void update_gc_pause_time_ratios(G1GCPauseType gc_type, double start_sec, double end_sec); + void update_gc_pause_time_ratios(Pause gc_type, double start_sec, double end_sec); // Indicate that we aborted marking before doing any mixed GCs. void abort_time_to_mixed_tracking(); @@ -320,9 +313,6 @@ public: void record_full_collection_start(); void record_full_collection_end(size_t allocation_word_size); - // Must currently be called while the world is stopped. - void record_concurrent_mark_init_end(); - void record_concurrent_mark_remark_end(); // Record start, end, and completion of cleanup. @@ -339,11 +329,6 @@ private: // regions and update the associated members. void update_survival_estimates_for_next_collection(); - // Set the state to start a concurrent marking cycle and clear - // _initiate_conc_mark_if_possible because it has now been - // acted on. - void initiate_conc_mark(); - public: // This sets the initiate_conc_mark_if_possible() flag to start a // new cycle, as long as we are not already in one. It's best if it diff --git a/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.hpp b/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.hpp index 4dcdd33846e..df76147f4b1 100644 --- a/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.hpp +++ b/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.hpp @@ -95,7 +95,7 @@ private: // Evict a given element of the statistics cache. void evict(uint idx); - size_t _num_cache_entries_mask; + const uint _num_cache_entries_mask; uint hash(uint idx) { return idx & _num_cache_entries_mask; diff --git a/src/hotspot/share/gc/g1/g1RemSet.cpp b/src/hotspot/share/gc/g1/g1RemSet.cpp index 0c9a0fad8f2..9f9f0ecdf3a 100644 --- a/src/hotspot/share/gc/g1/g1RemSet.cpp +++ b/src/hotspot/share/gc/g1/g1RemSet.cpp @@ -31,6 +31,7 @@ #include "gc/g1/g1CardTableEntryClosure.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectionSet.inline.hpp" +#include "gc/g1/g1CollectorState.inline.hpp" #include "gc/g1/g1ConcurrentRefine.hpp" #include "gc/g1/g1ConcurrentRefineSweepTask.hpp" #include "gc/g1/g1FromCardCache.hpp" @@ -1026,7 +1027,7 @@ class G1MergeHeapRootsTask : public WorkerTask { // the pause occurs during the Concurrent Cleanup for Next Mark phase. // Only at that point the region's bitmap may contain marks while being in the collection // set at the same time. - return _g1h->collector_state()->clear_bitmap_in_progress() && + return _g1h->collector_state()->is_in_reset_for_next_cycle() && hr->is_old(); } diff --git a/src/hotspot/share/gc/g1/g1RemSetTrackingPolicy.cpp b/src/hotspot/share/gc/g1/g1RemSetTrackingPolicy.cpp index 0c9973c520d..94f5466b8e0 100644 --- a/src/hotspot/share/gc/g1/g1RemSetTrackingPolicy.cpp +++ b/src/hotspot/share/gc/g1/g1RemSetTrackingPolicy.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,12 +23,16 @@ */ #include "gc/g1/g1CollectedHeap.inline.hpp" -#include "gc/g1/g1CollectionSetChooser.hpp" #include "gc/g1/g1HeapRegion.inline.hpp" #include "gc/g1/g1HeapRegionRemSet.inline.hpp" #include "gc/g1/g1RemSetTrackingPolicy.hpp" #include "runtime/safepoint.hpp" +static bool region_occupancy_low_enough_for_evac(size_t live_bytes) { + size_t mixed_gc_live_threshold_bytes = G1HeapRegion::GrainBytes * (size_t)G1MixedGCLiveThresholdPercent / 100; + return live_bytes < mixed_gc_live_threshold_bytes; +} + void G1RemSetTrackingPolicy::update_at_allocate(G1HeapRegion* r) { assert(r->is_young() || r->is_humongous() || r->is_old(), "Region %u with unexpected heap region type %s", r->hrm_index(), r->get_type_str()); @@ -75,7 +79,8 @@ bool G1RemSetTrackingPolicy::update_old_before_rebuild(G1HeapRegion* r) { bool selected_for_rebuild = false; - if (G1CollectionSetChooser::region_occupancy_low_enough_for_evac(r->live_bytes()) && + if (region_occupancy_low_enough_for_evac(r->live_bytes()) && + !G1CollectedHeap::heap()->is_old_gc_alloc_region(r) && !r->rem_set()->is_tracked()) { r->rem_set()->set_state_updating(); selected_for_rebuild = true; diff --git a/src/hotspot/share/gc/g1/g1RootClosures.cpp b/src/hotspot/share/gc/g1/g1RootClosures.cpp index f03681487cb..16c47cddea1 100644 --- a/src/hotspot/share/gc/g1/g1RootClosures.cpp +++ b/src/hotspot/share/gc/g1/g1RootClosures.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,6 +22,7 @@ * */ +#include "gc/g1/g1CollectorState.inline.hpp" #include "gc/g1/g1OopClosures.inline.hpp" #include "gc/g1/g1RootClosures.hpp" #include "gc/g1/g1SharedClosures.hpp" @@ -72,7 +73,7 @@ G1EvacuationRootClosures* G1EvacuationRootClosures::create_root_closures(G1Colle G1ParScanThreadState* pss, bool process_only_dirty_klasses) { G1EvacuationRootClosures* res = nullptr; - if (g1h->collector_state()->in_concurrent_start_gc()) { + if (g1h->collector_state()->is_in_concurrent_start_gc()) { if (ClassUnloadingWithConcurrentMark) { res = new G1ConcurrentStartMarkClosures(g1h, pss); } else { diff --git a/src/hotspot/share/gc/g1/g1RootProcessor.cpp b/src/hotspot/share/gc/g1/g1RootProcessor.cpp index dac237cb277..a534eefb428 100644 --- a/src/hotspot/share/gc/g1/g1RootProcessor.cpp +++ b/src/hotspot/share/gc/g1/g1RootProcessor.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,7 +27,6 @@ #include "code/codeCache.hpp" #include "gc/g1/g1BarrierSet.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" -#include "gc/g1/g1CollectorState.hpp" #include "gc/g1/g1GCParPhaseTimesTracker.hpp" #include "gc/g1/g1GCPhaseTimes.hpp" #include "gc/g1/g1HeapRegion.inline.hpp" diff --git a/src/hotspot/share/gc/g1/g1Trace.cpp b/src/hotspot/share/gc/g1/g1Trace.cpp index ed6a91f41ed..d6eadda5d50 100644 --- a/src/hotspot/share/gc/g1/g1Trace.cpp +++ b/src/hotspot/share/gc/g1/g1Trace.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,8 +22,8 @@ * */ +#include "gc/g1/g1CollectorState.inline.hpp" #include "gc/g1/g1EvacInfo.hpp" -#include "gc/g1/g1GCPauseType.hpp" #include "gc/g1/g1HeapRegionTraceType.hpp" #include "gc/g1/g1Trace.hpp" #include "gc/shared/gcHeapSummary.hpp" @@ -48,12 +48,12 @@ public: class G1YCTypeConstant : public JfrSerializer { public: void serialize(JfrCheckpointWriter& writer) { - constexpr EnumRange types{}; + constexpr EnumRange types{}; static const u4 nof_entries = static_cast(types.size()); writer.write_count(nof_entries); for (auto index : types) { writer.write_key(static_cast(index)); - writer.write(G1GCPauseTypeHelper::to_string(index)); + writer.write(G1CollectorState::to_string(index)); } } }; @@ -72,8 +72,8 @@ void G1NewTracer::initialize() { JFR_ONLY(register_jfr_type_constants();) } -void G1NewTracer::report_young_gc_pause(G1GCPauseType pause) { - G1GCPauseTypeHelper::assert_is_young_pause(pause); +void G1NewTracer::report_young_gc_pause(G1CollectorState::Pause pause) { + G1CollectorState::assert_is_young_pause(pause); _pause = pause; } @@ -128,7 +128,7 @@ void G1NewTracer::report_adaptive_ihop_statistics(size_t threshold, void G1NewTracer::send_g1_young_gc_event() { // Check that the pause type has been updated to something valid for this event. - G1GCPauseTypeHelper::assert_is_young_pause(_pause); + G1CollectorState::assert_is_young_pause(_pause); EventG1GarbageCollection e(UNTIMED); if (e.should_commit()) { diff --git a/src/hotspot/share/gc/g1/g1Trace.hpp b/src/hotspot/share/gc/g1/g1Trace.hpp index a2e11ed4496..bfcc275d2ca 100644 --- a/src/hotspot/share/gc/g1/g1Trace.hpp +++ b/src/hotspot/share/gc/g1/g1Trace.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,7 @@ #ifndef SHARE_GC_G1_G1TRACE_HPP #define SHARE_GC_G1_G1TRACE_HPP -#include "gc/g1/g1GCPauseType.hpp" +#include "gc/g1/g1CollectorState.hpp" #include "gc/shared/gcTrace.hpp" class G1EvacInfo; @@ -33,17 +33,17 @@ class G1HeapSummary; class G1EvacSummary; class G1NewTracer : public YoungGCTracer, public CHeapObj { - G1GCPauseType _pause; + G1CollectorState::Pause _pause; public: G1NewTracer() : YoungGCTracer(G1New), - _pause(G1GCPauseType::FullGC) // Initialize to something invalid. For this event, which + _pause(G1CollectorState::Pause::Full) // Initialize to something invalid. For this event, which // is about young collections, FullGC is not a valid value. { } void initialize(); - void report_young_gc_pause(G1GCPauseType pause); + void report_young_gc_pause(G1CollectorState::Pause pause); void report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions); void report_evacuation_info(G1EvacInfo* info); void report_evacuation_failed(EvacuationFailedInfo& ef_info); diff --git a/src/hotspot/share/gc/g1/g1VMOperations.cpp b/src/hotspot/share/gc/g1/g1VMOperations.cpp index 56ab3a4b0fe..891432e20a7 100644 --- a/src/hotspot/share/gc/g1/g1VMOperations.cpp +++ b/src/hotspot/share/gc/g1/g1VMOperations.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,6 +23,7 @@ */ #include "gc/g1/g1CollectedHeap.inline.hpp" +#include "gc/g1/g1CollectorState.inline.hpp" #include "gc/g1/g1ConcurrentMarkThread.inline.hpp" #include "gc/g1/g1Policy.hpp" #include "gc/g1/g1Trace.hpp" @@ -84,8 +85,9 @@ void VM_G1TryInitiateConcMark::doit() { GCCauseSetter x(g1h, _gc_cause); - _mark_in_progress = g1h->collector_state()->mark_in_progress(); - _cycle_already_in_progress = g1h->concurrent_mark()->in_progress(); + G1CollectorState* state = g1h->collector_state(); + _mark_in_progress = state->is_in_marking(); + _cycle_already_in_progress = state->is_in_concurrent_cycle(); if (!g1h->policy()->force_concurrent_start_if_outside_cycle(_gc_cause)) { // Failure to force the next GC pause to be a concurrent start indicates @@ -150,8 +152,9 @@ bool VM_G1PauseConcurrent::doit_prologue() { G1CollectedHeap* g1h = G1CollectedHeap::heap(); if (g1h->is_shutting_down()) { Heap_lock->unlock(); - // JVM shutdown has started. This ensures that any further operations will be properly aborted - // and will not interfere with the shutdown process. + // JVM shutdown has started. Abort concurrent marking to ensure that any further + // concurrent VM operations will not try to start and interfere with the shutdown + // process. g1h->concurrent_mark()->abort_marking_threads(); return false; } @@ -166,11 +169,11 @@ void VM_G1PauseConcurrent::doit_epilogue() { } void VM_G1PauseRemark::work() { - G1CollectedHeap* g1h = G1CollectedHeap::heap(); - g1h->concurrent_mark()->remark(); + G1ConcurrentMark* cm = G1CollectedHeap::heap()->concurrent_mark(); + cm->remark(); } void VM_G1PauseCleanup::work() { - G1CollectedHeap* g1h = G1CollectedHeap::heap(); - g1h->concurrent_mark()->cleanup(); + G1ConcurrentMark* cm = G1CollectedHeap::heap()->concurrent_mark(); + cm->cleanup(); } diff --git a/src/hotspot/share/gc/g1/g1YoungCollector.cpp b/src/hotspot/share/gc/g1/g1YoungCollector.cpp index a9db9a7c269..9c12127c864 100644 --- a/src/hotspot/share/gc/g1/g1YoungCollector.cpp +++ b/src/hotspot/share/gc/g1/g1YoungCollector.cpp @@ -31,7 +31,7 @@ #include "gc/g1/g1CardSetMemory.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectionSetCandidates.inline.hpp" -#include "gc/g1/g1CollectorState.hpp" +#include "gc/g1/g1CollectorState.inline.hpp" #include "gc/g1/g1ConcurrentMark.hpp" #include "gc/g1/g1EvacFailureRegions.inline.hpp" #include "gc/g1/g1EvacInfo.hpp" @@ -70,7 +70,7 @@ class G1YoungGCTraceTime { G1YoungCollector* _collector; - G1GCPauseType _pause_type; + G1CollectorState::Pause _pause_type; GCCause::Cause _pause_cause; static const uint MaxYoungGCNameLength = 128; @@ -93,7 +93,7 @@ class G1YoungGCTraceTime { os::snprintf_checked(_young_gc_name_data, MaxYoungGCNameLength, "Pause Young (%s) (%s)%s", - G1GCPauseTypeHelper::to_string(_pause_type), + G1CollectorState::to_string(_pause_type), GCCause::to_string(_pause_cause), evacuation_failed_string); return _young_gc_name_data; @@ -105,7 +105,7 @@ public: // Take snapshot of current pause type at start as it may be modified during gc. // The strings for all Concurrent Start pauses are the same, so the parameter // does not matter here. - _pause_type(_collector->collector_state()->young_gc_pause_type(false /* concurrent_operation_is_full_mark */)), + _pause_type(_collector->collector_state()->gc_pause_type(false /* concurrent_operation_is_full_mark */)), _pause_cause(cause), // Fake a "no cause" and manually add the correct string in update_young_gc_name() // to make the string look more natural. @@ -142,7 +142,7 @@ public: G1YoungGCJFRTracerMark(STWGCTimer* gc_timer_stw, G1NewTracer* gc_tracer_stw, GCCause::Cause cause) : G1JFRTracerMark(gc_timer_stw, gc_tracer_stw), _evacuation_info() { } - void report_pause_type(G1GCPauseType type) { + void report_pause_type(G1CollectorState::Pause type) { tracer()->report_young_gc_pause(type); } @@ -160,9 +160,9 @@ class G1YoungGCVerifierMark : public StackObj { static G1HeapVerifier::G1VerifyType young_collection_verify_type() { G1CollectorState* state = G1CollectedHeap::heap()->collector_state(); - if (state->in_concurrent_start_gc()) { + if (state->is_in_concurrent_start_gc()) { return G1HeapVerifier::G1VerifyConcurrentStart; - } else if (state->in_young_only_phase()) { + } else if (state->is_in_young_only_phase()) { return G1HeapVerifier::G1VerifyYoungNormal; } else { return G1HeapVerifier::G1VerifyMixed; @@ -244,19 +244,13 @@ G1YoungGCAllocationFailureInjector* G1YoungCollector::allocation_failure_injecto return _g1h->allocation_failure_injector(); } - -void G1YoungCollector::wait_for_root_region_scanning() { +void G1YoungCollector::complete_root_region_scan() { Ticks start = Ticks::now(); - // We have to wait until the CM threads finish scanning the - // root regions as it's the only way to ensure that all the - // objects on them have been correctly scanned before we start - // moving them during the GC. - bool waited = concurrent_mark()->wait_until_root_region_scan_finished(); - Tickspan wait_time; - if (waited) { - wait_time = (Ticks::now() - start); + // We have to complete root region scan as it's the only way to ensure that all the + // objects on them have been correctly scanned before we start moving them during the GC. + if (concurrent_mark()->complete_root_regions_scan_in_safepoint()) { + phase_times()->record_root_region_scan_time((Ticks::now() - start).seconds() * MILLIUNITS); } - phase_times()->record_root_region_scan_wait_time(wait_time.seconds() * MILLIUNITS); } class G1PrintCollectionSetClosure : public G1HeapRegionClosure { @@ -391,7 +385,7 @@ class G1PrepareEvacuationTask : public WorkerTask { if (!obj->is_typeArray()) { // All regions that were allocated before marking have a TAMS != bottom. bool allocated_before_mark_start = region->bottom() != _g1h->concurrent_mark()->top_at_mark_start(region); - bool mark_in_progress = _g1h->collector_state()->mark_in_progress(); + bool mark_in_progress = _g1h->collector_state()->is_in_marking(); if (allocated_before_mark_start && mark_in_progress) { return false; @@ -530,7 +524,7 @@ void G1YoungCollector::pre_evacuate_collection_set(G1EvacInfo* evacuation_info) // Needs log buffers flushed. calculate_collection_set(evacuation_info, policy()->max_pause_time_ms()); - if (collector_state()->in_concurrent_start_gc()) { + if (collector_state()->is_in_concurrent_start_gc()) { Ticks start = Ticks::now(); concurrent_mark()->pre_concurrent_start(_gc_cause); phase_times()->record_prepare_concurrent_task_time_ms((Ticks::now() - start).seconds() * 1000.0); @@ -896,17 +890,10 @@ public: assert(obj != nullptr, "the caller should have filtered out null values"); const G1HeapRegionAttr region_attr =_g1h->region_attr(obj); - if (!region_attr.is_in_cset_or_humongous_candidate()) { - return; - } + assert(!region_attr.is_humongous_candidate(), "Humongous candidates should never be considered alive"); if (region_attr.is_in_cset()) { assert(obj->is_forwarded(), "invariant" ); *p = obj->forwardee(); - } else { - assert(!obj->is_forwarded(), "invariant" ); - assert(region_attr.is_humongous_candidate(), - "Only allowed G1HeapRegionAttr state is IsHumongous, but is %d", region_attr.type()); - _g1h->set_humongous_is_live(obj); } } }; @@ -932,7 +919,8 @@ public: template void do_oop_work(T* p) { oop obj = RawAccess<>::oop_load(p); - if (_g1h->is_in_cset_or_humongous_candidate(obj)) { + assert(!_g1h->region_attr(obj).is_humongous_candidate(), "Humongous candidates should never be considered alive"); + if (_g1h->is_in_cset(obj)) { // If the referent object has been forwarded (either copied // to a new location or to itself in the event of an // evacuation failure) then we need to update the reference @@ -1043,7 +1031,7 @@ void G1YoungCollector::post_evacuate_cleanup_2(G1ParScanThreadStateSet* per_thre } void G1YoungCollector::enqueue_candidates_as_root_regions() { - assert(collector_state()->in_concurrent_start_gc(), "must be"); + assert(collector_state()->is_in_concurrent_start_gc(), "must be"); G1CollectionSetCandidates* candidates = collection_set()->candidates(); candidates->iterate_regions([&] (G1HeapRegion* r) { @@ -1070,6 +1058,7 @@ void G1YoungCollector::post_evacuate_collection_set(G1EvacInfo* evacuation_info, allocator()->release_gc_alloc_regions(evacuation_info); #if TASKQUEUE_STATS + _g1h->task_queues()->print_and_reset_taskqueue_stats("Young GC"); // Logging uses thread states, which are deleted by cleanup, so this must // be done before cleanup. per_thread_states->print_partial_array_task_stats(); @@ -1082,7 +1071,7 @@ void G1YoungCollector::post_evacuate_collection_set(G1EvacInfo* evacuation_info, // Regions in the collection set candidates are roots for the marking (they are // not marked through considering they are very likely to be reclaimed soon. // They need to be enqueued explicitly compared to survivor regions. - if (collector_state()->in_concurrent_start_gc()) { + if (collector_state()->is_in_concurrent_start_gc()) { enqueue_candidates_as_root_regions(); } @@ -1152,7 +1141,7 @@ void G1YoungCollector::collect() { // Wait for root region scan here to make sure that it is done before any // use of the STW workers to maximize cpu use (i.e. all cores are available // just to do that). - wait_for_root_region_scanning(); + complete_root_region_scan(); G1YoungGCVerifierMark vm(this); { @@ -1185,9 +1174,8 @@ void G1YoungCollector::collect() { // Need to report the collection pause now since record_collection_pause_end() // modifies it to the next state. - jtm.report_pause_type(collector_state()->young_gc_pause_type(_concurrent_operation_is_full_mark)); + jtm.report_pause_type(collector_state()->gc_pause_type(_concurrent_operation_is_full_mark)); policy()->record_young_collection_end(_concurrent_operation_is_full_mark, evacuation_alloc_failed(), _allocation_word_size); } - TASKQUEUE_STATS_ONLY(_g1h->task_queues()->print_and_reset_taskqueue_stats("Oop Queue");) } diff --git a/src/hotspot/share/gc/g1/g1YoungCollector.hpp b/src/hotspot/share/gc/g1/g1YoungCollector.hpp index 76d443b1a9f..ab32ca770a4 100644 --- a/src/hotspot/share/gc/g1/g1YoungCollector.hpp +++ b/src/hotspot/share/gc/g1/g1YoungCollector.hpp @@ -89,7 +89,7 @@ class G1YoungCollector { // returning the total time taken. Tickspan run_task_timed(WorkerTask* task); - void wait_for_root_region_scanning(); + void complete_root_region_scan(); void calculate_collection_set(G1EvacInfo* evacuation_info, double target_pause_time_ms); diff --git a/src/hotspot/share/gc/g1/g1YoungGCAllocationFailureInjector.cpp b/src/hotspot/share/gc/g1/g1YoungGCAllocationFailureInjector.cpp index 75cff2b339b..2b33a85da29 100644 --- a/src/hotspot/share/gc/g1/g1YoungGCAllocationFailureInjector.cpp +++ b/src/hotspot/share/gc/g1/g1YoungGCAllocationFailureInjector.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,6 +23,7 @@ */ #include "gc/g1/g1CollectedHeap.inline.hpp" +#include "gc/g1/g1CollectorState.inline.hpp" #include "gc/g1/g1YoungGCAllocationFailureInjector.inline.hpp" #include "gc/shared/gc_globals.hpp" @@ -61,9 +62,9 @@ void G1YoungGCAllocationFailureInjector::select_allocation_failure_regions() { bool G1YoungGCAllocationFailureInjector::arm_if_needed_for_gc_type(bool for_young_only_phase, bool during_concurrent_start, - bool mark_or_rebuild_in_progress) { + bool in_concurrent_cycle) { bool res = false; - if (mark_or_rebuild_in_progress) { + if (in_concurrent_cycle) { res |= G1GCAllocationFailureALotDuringConcMark; } if (during_concurrent_start) { @@ -89,14 +90,14 @@ void G1YoungGCAllocationFailureInjector::arm_if_needed() { // Now check if evacuation failure injection should be enabled for the current GC. G1CollectorState* collector_state = g1h->collector_state(); - const bool in_young_only_phase = collector_state->in_young_only_phase(); - const bool in_concurrent_start_gc = collector_state->in_concurrent_start_gc(); - const bool mark_or_rebuild_in_progress = collector_state->mark_or_rebuild_in_progress(); + const bool in_young_only_phase = collector_state->is_in_young_only_phase(); + const bool in_concurrent_start_gc = collector_state->is_in_concurrent_start_gc(); + const bool in_concurrent_cycle = collector_state->is_in_concurrent_cycle(); _inject_allocation_failure_for_current_gc &= arm_if_needed_for_gc_type(in_young_only_phase, in_concurrent_start_gc, - mark_or_rebuild_in_progress); + in_concurrent_cycle); if (_inject_allocation_failure_for_current_gc) { select_allocation_failure_regions(); diff --git a/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp b/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp index a0013d27172..14282383e29 100644 --- a/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp +++ b/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp @@ -29,7 +29,7 @@ #include "gc/g1/g1CardTableEntryClosure.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectionSetCandidates.inline.hpp" -#include "gc/g1/g1CollectorState.hpp" +#include "gc/g1/g1CollectorState.inline.hpp" #include "gc/g1/g1ConcurrentMark.inline.hpp" #include "gc/g1/g1EvacFailureRegions.inline.hpp" #include "gc/g1/g1EvacInfo.hpp" @@ -395,7 +395,7 @@ public: { ResourceMark rm; bool allocated_after_mark_start = r->bottom() == _g1h->concurrent_mark()->top_at_mark_start(r); - bool mark_in_progress = _g1h->collector_state()->mark_in_progress(); + bool mark_in_progress = _g1h->collector_state()->is_in_marking(); guarantee(obj->is_typeArray() || (allocated_after_mark_start || !mark_in_progress), "Only eagerly reclaiming primitive arrays is supported, other humongous objects only if allocated after mark start, but the object " PTR_FORMAT " (%s) is not (mark %d allocated after mark: %d).", @@ -501,7 +501,7 @@ class G1PostEvacuateCollectionSetCleanupTask2::ProcessEvacuationFailedRegionsTas // Concurrent mark does not mark through regions that we retain (they are root // regions wrt to marking), so we must clear their mark data (tams, bitmap, ...) // set eagerly or during evacuation failure. - bool clear_mark_data = !g1h->collector_state()->in_concurrent_start_gc() || + bool clear_mark_data = !g1h->collector_state()->is_in_concurrent_start_gc() || g1h->policy()->should_retain_evac_failed_region(r); if (clear_mark_data) { diff --git a/src/hotspot/share/gc/g1/jvmFlagConstraintsG1.cpp b/src/hotspot/share/gc/g1/jvmFlagConstraintsG1.cpp index b56e82fac3c..df6adeb8041 100644 --- a/src/hotspot/share/gc/g1/jvmFlagConstraintsG1.cpp +++ b/src/hotspot/share/gc/g1/jvmFlagConstraintsG1.cpp @@ -70,7 +70,7 @@ JVMFlag::Error G1RemSetHowlMaxNumBucketsConstraintFunc(uint value, bool verbose) } if (!is_power_of_2(G1RemSetHowlMaxNumBuckets)) { JVMFlag::printError(verbose, - "G1RemSetMaxHowlNumBuckets (%u) must be a power of two.\n", + "G1RemSetHowlMaxNumBuckets (%u) must be a power of two.\n", value); return JVMFlag::VIOLATES_CONSTRAINT; } diff --git a/src/hotspot/share/gc/parallel/mutableNUMASpace.hpp b/src/hotspot/share/gc/parallel/mutableNUMASpace.hpp index 4b8bca430f8..36de38c34bb 100644 --- a/src/hotspot/share/gc/parallel/mutableNUMASpace.hpp +++ b/src/hotspot/share/gc/parallel/mutableNUMASpace.hpp @@ -26,6 +26,7 @@ #define SHARE_GC_PARALLEL_MUTABLENUMASPACE_HPP #include "gc/parallel/mutableSpace.hpp" +#include "gc/shared/gc_globals.hpp" #include "gc/shared/gcUtil.hpp" #include "runtime/globals.hpp" #include "utilities/growableArray.hpp" diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp index f49419595e1..b77294a2ac1 100644 --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp @@ -272,7 +272,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(size_t size) { HeapWord* ParallelScavengeHeap::mem_allocate_cas_noexpand(size_t size, bool is_tlab) { // Try young-gen first. - HeapWord* result = young_gen()->allocate(size); + HeapWord* result = young_gen()->cas_allocate(size); if (result != nullptr) { return result; } @@ -932,7 +932,7 @@ void ParallelScavengeHeap::resize_after_full_gc() { } HeapWord* ParallelScavengeHeap::allocate_loaded_archive_space(size_t size) { - return _old_gen->allocate(size); + return _old_gen->cas_allocate_with_expansion(size); } void ParallelScavengeHeap::complete_loaded_archive_space(MemRegion archive_space) { diff --git a/src/hotspot/share/gc/parallel/parallel_globals.hpp b/src/hotspot/share/gc/parallel/parallel_globals.hpp index 64e2effdeae..ba4a79f9254 100644 --- a/src/hotspot/share/gc/parallel/parallel_globals.hpp +++ b/src/hotspot/share/gc/parallel/parallel_globals.hpp @@ -31,6 +31,19 @@ product_pd, \ range, \ constraint) \ + product(uintx, NUMAChunkResizeWeight, 20, \ + "Percentage (0-100) used to weight the current sample when " \ + "computing exponentially decaying average for " \ + "AdaptiveNUMAChunkSizing") \ + range(0, 100) \ + \ + product(size_t, NUMASpaceResizeRate, 1*G, \ + "Do not reallocate more than this amount per collection") \ + range(0, max_uintx) \ + \ + product(bool, UseAdaptiveNUMAChunkSizing, true, \ + "Enable adaptive chunk sizing for NUMA") \ + \ product(bool, UseMaximumCompactionOnSystemGC, true, \ "Use maximum compaction in the Parallel Old garbage collector " \ "for a system GC") diff --git a/src/hotspot/share/gc/parallel/psCompactionManager.cpp b/src/hotspot/share/gc/parallel/psCompactionManager.cpp index b8ea47eeb09..048355bfad3 100644 --- a/src/hotspot/share/gc/parallel/psCompactionManager.cpp +++ b/src/hotspot/share/gc/parallel/psCompactionManager.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -58,7 +58,7 @@ PreservedMarksSet* ParCompactionManager::_preserved_marks_set = nullptr; ParCompactionManager::ParCompactionManager(PreservedMarks* preserved_marks, ReferenceProcessor* ref_processor, uint parallel_gc_threads) - :_partial_array_splitter(_partial_array_state_manager, parallel_gc_threads, ObjArrayMarkingStride), + :_partial_array_splitter(_partial_array_state_manager, parallel_gc_threads), _mark_and_push_closure(this, ref_processor) { ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); @@ -126,7 +126,7 @@ void ParCompactionManager::push_objArray(oop obj) { objArrayOop obj_array = objArrayOop(obj); size_t array_length = obj_array->length(); size_t initial_chunk_size = - _partial_array_splitter.start(&_marking_stack, obj_array, nullptr, array_length); + _partial_array_splitter.start(&_marking_stack, obj_array, nullptr, array_length, ObjArrayMarkingStride); follow_array(obj_array, 0, initial_chunk_size); } @@ -203,13 +203,13 @@ void ParCompactionManager::remove_all_shadow_regions() { #if TASKQUEUE_STATS void ParCompactionManager::print_and_reset_taskqueue_stats() { - marking_stacks()->print_and_reset_taskqueue_stats("Marking Stacks"); + marking_stacks()->print_and_reset_taskqueue_stats("Full GC"); auto get_pa_stats = [&](uint i) { return _manager_array[i]->partial_array_task_stats(); }; PartialArrayTaskStats::log_set(ParallelGCThreads, get_pa_stats, - "Partial Array Task Stats"); + "Full GC Partial Array"); uint parallel_gc_threads = ParallelScavengeHeap::heap()->workers().max_workers(); for (uint i = 0; i < parallel_gc_threads; ++i) { get_pa_stats(i)->reset(); diff --git a/src/hotspot/share/gc/parallel/psOldGen.hpp b/src/hotspot/share/gc/parallel/psOldGen.hpp index 7e3975036d4..c8e6ada3ebd 100644 --- a/src/hotspot/share/gc/parallel/psOldGen.hpp +++ b/src/hotspot/share/gc/parallel/psOldGen.hpp @@ -110,7 +110,7 @@ class PSOldGen : public CHeapObj { void shrink(size_t bytes); // Used by GC-workers during GC or for CDS at startup. - HeapWord* allocate(size_t word_size) { + HeapWord* cas_allocate_with_expansion(size_t word_size) { HeapWord* res; do { res = cas_allocate_noexpand(word_size); diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.cpp b/src/hotspot/share/gc/parallel/psParallelCompact.cpp index d03bc3cda45..ca1fd2c120b 100644 --- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp +++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp @@ -232,14 +232,10 @@ bool ParallelCompactData::initialize(MemRegion reserved_heap) assert(region_align_down(_heap_start) == _heap_start, "region start not aligned"); + assert(is_aligned(heap_size, RegionSize), "precondition"); - return initialize_region_data(heap_size); -} - -PSVirtualSpace* -ParallelCompactData::create_vspace(size_t count, size_t element_size) -{ - const size_t raw_bytes = count * element_size; + const size_t count = heap_size >> Log2RegionSize; + const size_t raw_bytes = count * sizeof(RegionData); const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10); const size_t granularity = os::vm_allocation_granularity(); const size_t rs_align = MAX2(page_sz, granularity); @@ -253,7 +249,7 @@ ParallelCompactData::create_vspace(size_t count, size_t element_size) if (!rs.is_reserved()) { // Failed to reserve memory. - return nullptr; + return false; } os::trace_page_sizes("Parallel Compact Data", raw_bytes, raw_bytes, rs.base(), @@ -261,34 +257,23 @@ ParallelCompactData::create_vspace(size_t count, size_t element_size) MemTracker::record_virtual_memory_tag(rs, mtGC); - PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz); + PSVirtualSpace* region_vspace = new PSVirtualSpace(rs, page_sz); - if (!vspace->expand_by(_reserved_byte_size)) { + if (!region_vspace->expand_by(_reserved_byte_size)) { // Failed to commit memory. - delete vspace; + delete region_vspace; // Release memory reserved in the space. MemoryReserver::release(rs); - return nullptr; + return false; } - return vspace; -} - -bool ParallelCompactData::initialize_region_data(size_t heap_size) -{ - assert(is_aligned(heap_size, RegionSize), "precondition"); - - const size_t count = heap_size >> Log2RegionSize; - _region_vspace = create_vspace(count, sizeof(RegionData)); - if (_region_vspace != nullptr) { - _region_data = (RegionData*)_region_vspace->reserved_low_addr(); - _region_count = count; - return true; - } - return false; + _region_vspace = region_vspace; + _region_data = (RegionData*)_region_vspace->reserved_low_addr(); + _region_count = count; + return true; } void ParallelCompactData::clear_range(size_t beg_region, size_t end_region) { diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.hpp b/src/hotspot/share/gc/parallel/psParallelCompact.hpp index f5ab041fa97..25f4f66de6f 100644 --- a/src/hotspot/share/gc/parallel/psParallelCompact.hpp +++ b/src/hotspot/share/gc/parallel/psParallelCompact.hpp @@ -395,9 +395,6 @@ public: #endif // #ifdef ASSERT private: - bool initialize_region_data(size_t heap_size); - PSVirtualSpace* create_vspace(size_t count, size_t element_size); - HeapWord* _heap_start; #ifdef ASSERT HeapWord* _heap_end; diff --git a/src/hotspot/share/gc/parallel/psPromotionManager.cpp b/src/hotspot/share/gc/parallel/psPromotionManager.cpp index d6208755374..ac22430aa4c 100644 --- a/src/hotspot/share/gc/parallel/psPromotionManager.cpp +++ b/src/hotspot/share/gc/parallel/psPromotionManager.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -138,13 +138,13 @@ bool PSPromotionManager::post_scavenge(YoungGCTracer& gc_tracer) { #if TASKQUEUE_STATS void PSPromotionManager::print_and_reset_taskqueue_stats() { - stack_array_depth()->print_and_reset_taskqueue_stats("Oop Queue"); + stack_array_depth()->print_and_reset_taskqueue_stats("Young GC"); auto get_pa_stats = [&](uint i) { return manager_array(i)->partial_array_task_stats(); }; PartialArrayTaskStats::log_set(ParallelGCThreads, get_pa_stats, - "Partial Array Task Stats"); + "Young GC Partial Array"); for (uint i = 0; i < ParallelGCThreads; ++i) { get_pa_stats(i)->reset(); } @@ -158,7 +158,7 @@ PartialArrayTaskStats* PSPromotionManager::partial_array_task_stats() { // Most members are initialized either by initialize() or reset(). PSPromotionManager::PSPromotionManager() - : _partial_array_splitter(_partial_array_state_manager, ParallelGCThreads, ParGCArrayScanChunk) + : _partial_array_splitter(_partial_array_state_manager, ParallelGCThreads) { // We set the old lab's start array. _old_lab.set_start_array(old_gen()->start_array()); @@ -273,7 +273,7 @@ void PSPromotionManager::push_objArray(oop old_obj, oop new_obj) { size_t array_length = to_array->length(); size_t initial_chunk_size = // The source array is unused when processing states. - _partial_array_splitter.start(&_claimed_stack_depth, nullptr, to_array, array_length); + _partial_array_splitter.start(&_claimed_stack_depth, nullptr, to_array, array_length, ParGCArrayScanChunk); process_array_chunk(to_array, 0, initial_chunk_size); } diff --git a/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp b/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp index 9e904e44b22..68370a33a54 100644 --- a/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp +++ b/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp @@ -204,13 +204,13 @@ inline HeapWord* PSPromotionManager::allocate_in_old_gen(Klass* klass, // Do we allocate directly, or flush and refill? if (obj_size > (OldPLABSize / 2)) { // Allocate this object directly - result = old_gen()->allocate(obj_size); + result = old_gen()->cas_allocate_with_expansion(obj_size); promotion_trace_event(cast_to_oop(result), klass, obj_size, age, true, nullptr); } else { // Flush and fill _old_lab.flush(); - HeapWord* lab_base = old_gen()->allocate(OldPLABSize); + HeapWord* lab_base = old_gen()->cas_allocate_with_expansion(OldPLABSize); if (lab_base != nullptr) { _old_lab.initialize(MemRegion(lab_base, OldPLABSize)); // Try the old lab allocation again. diff --git a/src/hotspot/share/gc/parallel/psYoungGen.hpp b/src/hotspot/share/gc/parallel/psYoungGen.hpp index 10aa037da98..ed10806ac99 100644 --- a/src/hotspot/share/gc/parallel/psYoungGen.hpp +++ b/src/hotspot/share/gc/parallel/psYoungGen.hpp @@ -128,7 +128,7 @@ class PSYoungGen : public CHeapObj { size_t max_gen_size() const { return _max_gen_size; } // Allocation - HeapWord* allocate(size_t word_size) { + HeapWord* cas_allocate(size_t word_size) { HeapWord* result = eden_space()->cas_allocate(word_size); return result; } diff --git a/src/hotspot/share/gc/serial/cSpaceCounters.cpp b/src/hotspot/share/gc/serial/cSpaceCounters.cpp deleted file mode 100644 index f6bcee99423..00000000000 --- a/src/hotspot/share/gc/serial/cSpaceCounters.cpp +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "gc/serial/cSpaceCounters.hpp" -#include "memory/allocation.inline.hpp" -#include "memory/resourceArea.hpp" - -CSpaceCounters::CSpaceCounters(const char* name, int ordinal, size_t max_size, - ContiguousSpace* s, GenerationCounters* gc) - : _space(s) { - if (UsePerfData) { - EXCEPTION_MARK; - ResourceMark rm; - - const char* cns = PerfDataManager::name_space(gc->name_space(), "space", - ordinal); - - _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1, mtGC); - strcpy(_name_space, cns); - - const char* cname = PerfDataManager::counter_name(_name_space, "name"); - PerfDataManager::create_string_constant(SUN_GC, cname, name, CHECK); - - cname = PerfDataManager::counter_name(_name_space, "maxCapacity"); - _max_capacity = PerfDataManager::create_variable(SUN_GC, cname, - PerfData::U_Bytes, - (jlong)max_size, - CHECK); - - cname = PerfDataManager::counter_name(_name_space, "capacity"); - _capacity = PerfDataManager::create_variable(SUN_GC, cname, - PerfData::U_Bytes, - _space->capacity(), - CHECK); - - cname = PerfDataManager::counter_name(_name_space, "used"); - _used = PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes, - _space->used(), - CHECK); - - cname = PerfDataManager::counter_name(_name_space, "initCapacity"); - PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes, - _space->capacity(), CHECK); - } -} - -CSpaceCounters::~CSpaceCounters() { - FREE_C_HEAP_ARRAY(char, _name_space); -} - -void CSpaceCounters::update_capacity() { - _capacity->set_value(_space->capacity()); -} - -void CSpaceCounters::update_used() { - _used->set_value(_space->used()); -} - -void CSpaceCounters::update_all() { - update_used(); - update_capacity(); -} diff --git a/src/hotspot/share/gc/serial/cSpaceCounters.hpp b/src/hotspot/share/gc/serial/cSpaceCounters.hpp deleted file mode 100644 index 22a51cbbd20..00000000000 --- a/src/hotspot/share/gc/serial/cSpaceCounters.hpp +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_GC_SERIAL_CSPACECOUNTERS_HPP -#define SHARE_GC_SERIAL_CSPACECOUNTERS_HPP - -#include "gc/shared/generationCounters.hpp" -#include "gc/shared/space.hpp" -#include "runtime/perfData.hpp" - -// A CSpaceCounters is a holder class for performance counters -// that track a space; - -class CSpaceCounters: public CHeapObj { - private: - PerfVariable* _capacity; - PerfVariable* _used; - PerfVariable* _max_capacity; - - // Constant PerfData types don't need to retain a reference. - // However, it's a good idea to document them here. - // PerfConstant* _size; - - ContiguousSpace* _space; - char* _name_space; - - public: - - CSpaceCounters(const char* name, int ordinal, size_t max_size, - ContiguousSpace* s, GenerationCounters* gc); - - ~CSpaceCounters(); - - void update_capacity(); - void update_used(); - void update_all(); - - const char* name_space() const { return _name_space; } -}; - -#endif // SHARE_GC_SERIAL_CSPACECOUNTERS_HPP diff --git a/src/hotspot/share/gc/serial/defNewGeneration.cpp b/src/hotspot/share/gc/serial/defNewGeneration.cpp index 9ccc7b95529..ec3726d1dce 100644 --- a/src/hotspot/share/gc/serial/defNewGeneration.cpp +++ b/src/hotspot/share/gc/serial/defNewGeneration.cpp @@ -39,6 +39,7 @@ #include "gc/shared/gcTimer.hpp" #include "gc/shared/gcTrace.hpp" #include "gc/shared/gcTraceTime.inline.hpp" +#include "gc/shared/hSpaceCounters.hpp" #include "gc/shared/oopStorageSet.inline.hpp" #include "gc/shared/referencePolicy.hpp" #include "gc/shared/referenceProcessorPhaseTimes.hpp" @@ -248,12 +249,12 @@ DefNewGeneration::DefNewGeneration(ReservedSpace rs, min_size, max_size, _virtual_space.committed_size()); _gc_counters = new CollectorCounters(policy, 0); - _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space, - _gen_counters); - _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space, - _gen_counters); - _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space, - _gen_counters); + _eden_counters = new HSpaceCounters(_gen_counters->name_space(), "eden", 0, + _max_eden_size, _eden_space->capacity()); + _from_counters = new HSpaceCounters(_gen_counters->name_space(), "s0", 1, + _max_survivor_size, _from_space->capacity()); + _to_counters = new HSpaceCounters(_gen_counters->name_space(), "s1", 2, + _max_survivor_size, _to_space->capacity()); update_counters(); _old_gen = nullptr; @@ -319,7 +320,7 @@ void DefNewGeneration::swap_spaces() { _to_space = s; if (UsePerfData) { - CSpaceCounters* c = _from_counters; + HSpaceCounters* c = _from_counters; _from_counters = _to_counters; _to_counters = c; } @@ -348,38 +349,6 @@ void DefNewGeneration::expand_eden_by(size_t delta_bytes) { post_resize(); } -size_t DefNewGeneration::calculate_thread_increase_size(int threads_count) const { - size_t thread_increase_size = 0; - // Check an overflow at 'threads_count * NewSizeThreadIncrease'. - if (threads_count > 0 && NewSizeThreadIncrease <= max_uintx / threads_count) { - thread_increase_size = threads_count * NewSizeThreadIncrease; - } - return thread_increase_size; -} - -size_t DefNewGeneration::adjust_for_thread_increase(size_t new_size_candidate, - size_t new_size_before, - size_t alignment, - size_t thread_increase_size) const { - size_t desired_new_size = new_size_before; - - if (NewSizeThreadIncrease > 0 && thread_increase_size > 0) { - - // 1. Check an overflow at 'new_size_candidate + thread_increase_size'. - if (new_size_candidate <= max_uintx - thread_increase_size) { - new_size_candidate += thread_increase_size; - - // 2. Check an overflow at 'align_up'. - size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1)); - if (new_size_candidate <= aligned_max) { - desired_new_size = align_up(new_size_candidate, alignment); - } - } - } - - return desired_new_size; -} - size_t DefNewGeneration::calculate_desired_young_gen_bytes() const { size_t old_size = SerialHeap::heap()->old_gen()->capacity(); size_t new_size_before = _virtual_space.committed_size(); @@ -391,14 +360,8 @@ size_t DefNewGeneration::calculate_desired_young_gen_bytes() const { // All space sizes must be multiples of Generation::GenGrain. size_t alignment = Generation::GenGrain; - int threads_count = Threads::number_of_non_daemon_threads(); - size_t thread_increase_size = calculate_thread_increase_size(threads_count); - size_t new_size_candidate = old_size / NewRatio; - // Compute desired new generation size based on NewRatio and NewSizeThreadIncrease - // and reverts to previous value if any overflow happens - size_t desired_new_size = adjust_for_thread_increase(new_size_candidate, new_size_before, - alignment, thread_increase_size); + size_t desired_new_size = align_up(new_size_candidate, alignment); // Adjust new generation size desired_new_size = clamp(desired_new_size, min_new_size, max_new_size); @@ -821,9 +784,9 @@ void DefNewGeneration::gc_epilogue() { void DefNewGeneration::update_counters() { if (UsePerfData) { - _eden_counters->update_all(); - _from_counters->update_all(); - _to_counters->update_all(); + _eden_counters->update_all(_eden_space->capacity(), _eden_space->used()); + _from_counters->update_all(_from_space->capacity(), _from_space->used()); + _to_counters->update_all(_to_space->capacity(), _to_space->used()); _gen_counters->update_capacity(_virtual_space.committed_size()); } } diff --git a/src/hotspot/share/gc/serial/defNewGeneration.hpp b/src/hotspot/share/gc/serial/defNewGeneration.hpp index e0c7b6bba37..21241ec00ef 100644 --- a/src/hotspot/share/gc/serial/defNewGeneration.hpp +++ b/src/hotspot/share/gc/serial/defNewGeneration.hpp @@ -25,7 +25,6 @@ #ifndef SHARE_GC_SERIAL_DEFNEWGENERATION_HPP #define SHARE_GC_SERIAL_DEFNEWGENERATION_HPP -#include "gc/serial/cSpaceCounters.hpp" #include "gc/serial/generation.hpp" #include "gc/serial/tenuredGeneration.hpp" #include "gc/shared/ageTable.hpp" @@ -38,7 +37,7 @@ #include "utilities/stack.hpp" class ContiguousSpace; -class CSpaceCounters; +class HSpaceCounters; class OldGenScanClosure; class YoungGenScanClosure; class DefNewTracer; @@ -102,9 +101,9 @@ class DefNewGeneration: public Generation { // Performance Counters GenerationCounters* _gen_counters; - CSpaceCounters* _eden_counters; - CSpaceCounters* _from_counters; - CSpaceCounters* _to_counters; + HSpaceCounters* _eden_counters; + HSpaceCounters* _from_counters; + HSpaceCounters* _to_counters; // sizing information size_t _max_eden_size; @@ -230,15 +229,6 @@ class DefNewGeneration: public Generation { // Initialize eden/from/to spaces. void init_spaces(); - // Return adjusted new size for NewSizeThreadIncrease. - // If any overflow happens, revert to previous new size. - size_t adjust_for_thread_increase(size_t new_size_candidate, - size_t new_size_before, - size_t alignment, - size_t thread_increase_size) const; - - size_t calculate_thread_increase_size(int threads_count) const; - // Scavenge support void swap_spaces(); diff --git a/src/hotspot/share/gc/serial/tenuredGeneration.cpp b/src/hotspot/share/gc/serial/tenuredGeneration.cpp index f68847ed1a6..95a996a98c1 100644 --- a/src/hotspot/share/gc/serial/tenuredGeneration.cpp +++ b/src/hotspot/share/gc/serial/tenuredGeneration.cpp @@ -32,6 +32,7 @@ #include "gc/shared/gcTimer.hpp" #include "gc/shared/gcTrace.hpp" #include "gc/shared/genArguments.hpp" +#include "gc/shared/hSpaceCounters.hpp" #include "gc/shared/space.hpp" #include "gc/shared/spaceDecorator.hpp" #include "logging/log.hpp" @@ -330,9 +331,9 @@ TenuredGeneration::TenuredGeneration(ReservedSpace rs, _gc_counters = new CollectorCounters("Serial full collection pauses", 1); - _space_counters = new CSpaceCounters(gen_name, 0, + _space_counters = new HSpaceCounters(_gen_counters->name_space(), gen_name, 0, _virtual_space.reserved_size(), - _the_space, _gen_counters); + _the_space->capacity()); } void TenuredGeneration::gc_prologue() { @@ -367,7 +368,7 @@ void TenuredGeneration::update_promote_stats() { void TenuredGeneration::update_counters() { if (UsePerfData) { - _space_counters->update_all(); + _space_counters->update_all(_the_space->capacity(), _the_space->used()); _gen_counters->update_capacity(_virtual_space.committed_size()); } } diff --git a/src/hotspot/share/gc/serial/tenuredGeneration.hpp b/src/hotspot/share/gc/serial/tenuredGeneration.hpp index ff73ab72b2c..1e3576d5ae7 100644 --- a/src/hotspot/share/gc/serial/tenuredGeneration.hpp +++ b/src/hotspot/share/gc/serial/tenuredGeneration.hpp @@ -25,7 +25,6 @@ #ifndef SHARE_GC_SERIAL_TENUREDGENERATION_HPP #define SHARE_GC_SERIAL_TENUREDGENERATION_HPP -#include "gc/serial/cSpaceCounters.hpp" #include "gc/serial/generation.hpp" #include "gc/serial/serialBlockOffsetTable.hpp" #include "gc/shared/generationCounters.hpp" @@ -34,6 +33,7 @@ class CardTableRS; class ContiguousSpace; +class HSpaceCounters; // TenuredGeneration models the heap containing old (promoted/tenured) objects // contained in a single contiguous space. This generation is covered by a card @@ -68,7 +68,7 @@ class TenuredGeneration: public Generation { ContiguousSpace* _the_space; // Actual space holding objects GenerationCounters* _gen_counters; - CSpaceCounters* _space_counters; + HSpaceCounters* _space_counters; // Avg amount promoted; used for avoiding promotion undo // This class does not update deviations if the sample is zero. diff --git a/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp b/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp index 53577bad1d8..afe7d2acfa7 100644 --- a/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp +++ b/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -708,7 +708,6 @@ int BarrierSetC2::arraycopy_payload_base_offset(bool is_array) { // 12 - 64-bit VM, compressed klass // 16 - 64-bit VM, normal klass if (base_off % BytesPerLong != 0) { - assert(UseCompressedClassPointers, ""); assert(!UseCompactObjectHeaders, ""); if (is_array) { // Exclude length to copy by 8 bytes words. @@ -758,8 +757,8 @@ Node* BarrierSetC2::obj_allocate(PhaseMacroExpand* macro, Node* mem, Node* toobi assert(UseTLAB, "Only for TLAB enabled allocations"); Node* thread = macro->transform_later(new ThreadLocalNode()); - Node* tlab_top_adr = macro->basic_plus_adr(macro->top()/*not oop*/, thread, in_bytes(JavaThread::tlab_top_offset())); - Node* tlab_end_adr = macro->basic_plus_adr(macro->top()/*not oop*/, thread, in_bytes(JavaThread::tlab_end_offset())); + Node* tlab_top_adr = macro->off_heap_plus_addr(thread, in_bytes(JavaThread::tlab_top_offset())); + Node* tlab_end_adr = macro->off_heap_plus_addr(thread, in_bytes(JavaThread::tlab_end_offset())); // Load TLAB end. // @@ -778,7 +777,7 @@ Node* BarrierSetC2::obj_allocate(PhaseMacroExpand* macro, Node* mem, Node* toobi macro->transform_later(old_tlab_top); // Add to heap top to get a new TLAB top - Node* new_tlab_top = new AddPNode(macro->top(), old_tlab_top, size_in_bytes); + Node* new_tlab_top = AddPNode::make_off_heap(old_tlab_top, size_in_bytes); macro->transform_later(new_tlab_top); // Check against TLAB end @@ -813,7 +812,10 @@ Node* BarrierSetC2::obj_allocate(PhaseMacroExpand* macro, Node* mem, Node* toobi return old_tlab_top; } -static const TypeFunc* clone_type() { +const TypeFunc* BarrierSetC2::_clone_type_Type = nullptr; + +void BarrierSetC2::make_clone_type() { + assert(BarrierSetC2::_clone_type_Type == nullptr, "should be"); // Create input type (domain) int argcnt = NOT_LP64(3) LP64_ONLY(4); const Type** const domain_fields = TypeTuple::fields(argcnt); @@ -829,7 +831,12 @@ static const TypeFunc* clone_type() { const Type** const range_fields = TypeTuple::fields(0); const TypeTuple* const range = TypeTuple::make(TypeFunc::Parms + 0, range_fields); - return TypeFunc::make(domain, range); + BarrierSetC2::_clone_type_Type = TypeFunc::make(domain, range); +} + +inline const TypeFunc* BarrierSetC2::clone_type() { + assert(BarrierSetC2::_clone_type_Type != nullptr, "should be initialized"); + return BarrierSetC2::_clone_type_Type; } #define XTOP LP64_ONLY(COMMA phase->top()) diff --git a/src/hotspot/share/gc/shared/c2/barrierSetC2.hpp b/src/hotspot/share/gc/shared/c2/barrierSetC2.hpp index 7b9cb985cff..a486a88c48f 100644 --- a/src/hotspot/share/gc/shared/c2/barrierSetC2.hpp +++ b/src/hotspot/share/gc/shared/c2/barrierSetC2.hpp @@ -270,6 +270,9 @@ public: // various GC barrier sets inherit from the BarrierSetC2 class to sprinkle // barriers into the accesses. class BarrierSetC2: public CHeapObj { +private: + static const TypeFunc* _clone_type_Type; + protected: virtual void resolve_address(C2Access& access) const; virtual Node* store_at_resolved(C2Access& access, C2AccessValue& val) const; @@ -379,6 +382,9 @@ public: static int arraycopy_payload_base_offset(bool is_array); + static void make_clone_type(); + static const TypeFunc* clone_type(); + #ifndef PRODUCT virtual void dump_barrier_data(const MachNode* mach, outputStream* st) const { st->print("%x", mach->barrier_data()); diff --git a/src/hotspot/share/gc/shared/collectedHeap.hpp b/src/hotspot/share/gc/shared/collectedHeap.hpp index f13c3ab7b6e..100866bb528 100644 --- a/src/hotspot/share/gc/shared/collectedHeap.hpp +++ b/src/hotspot/share/gc/shared/collectedHeap.hpp @@ -289,7 +289,7 @@ protected: DEBUG_ONLY(bool is_in_or_null(const void* p) const { return p == nullptr || is_in(p); }) void set_gc_cause(GCCause::Cause v); - GCCause::Cause gc_cause() { return _gc_cause; } + GCCause::Cause gc_cause() const { return _gc_cause; } oop obj_allocate(Klass* klass, size_t size, TRAPS); virtual oop array_allocate(Klass* klass, size_t size, int length, bool do_zero, TRAPS); diff --git a/src/hotspot/share/gc/shared/concurrentGCThread.cpp b/src/hotspot/share/gc/shared/concurrentGCThread.cpp index ed6c1b4d283..c7765631cd9 100644 --- a/src/hotspot/share/gc/shared/concurrentGCThread.cpp +++ b/src/hotspot/share/gc/shared/concurrentGCThread.cpp @@ -33,9 +33,8 @@ ConcurrentGCThread::ConcurrentGCThread() : _should_terminate(false), _has_terminated(false) {} -void ConcurrentGCThread::create_and_start(ThreadPriority prio) { +void ConcurrentGCThread::create_and_start() { if (os::create_thread(this, os::gc_thread)) { - os::set_priority(this, prio); os::start_thread(this); } } diff --git a/src/hotspot/share/gc/shared/concurrentGCThread.hpp b/src/hotspot/share/gc/shared/concurrentGCThread.hpp index 0c764546045..5322d676493 100644 --- a/src/hotspot/share/gc/shared/concurrentGCThread.hpp +++ b/src/hotspot/share/gc/shared/concurrentGCThread.hpp @@ -36,7 +36,7 @@ private: Atomic _has_terminated; protected: - void create_and_start(ThreadPriority prio = NearMaxPriority); + void create_and_start(); virtual void run_service() = 0; virtual void stop_service() = 0; diff --git a/src/hotspot/share/gc/shared/gcTrace.cpp b/src/hotspot/share/gc/shared/gcTrace.cpp index bad9c707b1e..5d0627e779e 100644 --- a/src/hotspot/share/gc/shared/gcTrace.cpp +++ b/src/hotspot/share/gc/shared/gcTrace.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -122,11 +122,10 @@ void GCTracer::report_gc_heap_summary(GCWhen::Type when, const GCHeapSummary& he void GCTracer::report_metaspace_summary(GCWhen::Type when, const MetaspaceSummary& summary) const { send_meta_space_summary_event(when, summary); - send_metaspace_chunk_free_list_summary(when, Metaspace::NonClassType, summary.metaspace_chunk_free_list_summary()); - if (UseCompressedClassPointers) { - send_metaspace_chunk_free_list_summary(when, Metaspace::ClassType, summary.class_chunk_free_list_summary()); - } +#if INCLUDE_CLASS_SPACE + send_metaspace_chunk_free_list_summary(when, Metaspace::ClassType, summary.class_chunk_free_list_summary()); +#endif } void YoungGCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) { diff --git a/src/hotspot/share/gc/shared/gc_globals.hpp b/src/hotspot/share/gc/shared/gc_globals.hpp index 65c970435e5..c9102944197 100644 --- a/src/hotspot/share/gc/shared/gc_globals.hpp +++ b/src/hotspot/share/gc/shared/gc_globals.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -256,11 +256,11 @@ "before pushing a continuation entry") \ range(1, INT_MAX/2) \ \ - product_pd(bool, NeverActAsServerClassMachine, \ - "(Deprecated) Never act like a server-class machine") \ - \ - product(bool, AlwaysActAsServerClassMachine, false, \ - "(Deprecated) Always act like a server-class machine") \ + product(uintx, ArrayMarkingMinStride, 64, DIAGNOSTIC, \ + "Minimum chunk size for split array processing during marking; " \ + "the effective stride is clamped between this value " \ + "and ObjArrayMarkingStride.") \ + constraint(ArrayMarkingMinStrideConstraintFunc,AfterErgo) \ \ product(bool, AggressiveHeap, false, \ "(Deprecated) Optimize heap options for long-running memory " \ @@ -480,11 +480,6 @@ "Ratio of old/new generation sizes") \ range(0, max_uintx-1) \ \ - product_pd(size_t, NewSizeThreadIncrease, \ - "Additional size added to desired new generation size per " \ - "non-daemon thread (in bytes)") \ - range(0, max_uintx) \ - \ product(uintx, QueuedAllocationWarningCount, 0, \ "Number of times an allocation that queues behind a GC " \ "will retry before printing a warning") \ diff --git a/src/hotspot/share/gc/shared/jvmFlagConstraintsGC.cpp b/src/hotspot/share/gc/shared/jvmFlagConstraintsGC.cpp index ea3d644d105..4d7ffce3a5d 100644 --- a/src/hotspot/share/gc/shared/jvmFlagConstraintsGC.cpp +++ b/src/hotspot/share/gc/shared/jvmFlagConstraintsGC.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -414,3 +414,15 @@ JVMFlag::Error GCCardSizeInBytesConstraintFunc(uint value, bool verbose) { return JVMFlag::SUCCESS; } } + +JVMFlag::Error ArrayMarkingMinStrideConstraintFunc(uintx value, bool verbose) { + if (value > ObjArrayMarkingStride) { + JVMFlag::printError(verbose, + "ArrayMarkingMinStride (%zu) must be " + "less than or equal to ObjArrayMarkingStride (%zu)\n", + value, ObjArrayMarkingStride); + return JVMFlag::VIOLATES_CONSTRAINT; + } else { + return JVMFlag::SUCCESS; + } +} diff --git a/src/hotspot/share/gc/shared/jvmFlagConstraintsGC.hpp b/src/hotspot/share/gc/shared/jvmFlagConstraintsGC.hpp index a89f42959e1..1d2f45397aa 100644 --- a/src/hotspot/share/gc/shared/jvmFlagConstraintsGC.hpp +++ b/src/hotspot/share/gc/shared/jvmFlagConstraintsGC.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -66,7 +66,8 @@ f(uintx, SurvivorRatioConstraintFunc) \ f(size_t, MetaspaceSizeConstraintFunc) \ f(size_t, MaxMetaspaceSizeConstraintFunc) \ - f(uint, GCCardSizeInBytesConstraintFunc) + f(uint, GCCardSizeInBytesConstraintFunc) \ + f(uintx, ArrayMarkingMinStrideConstraintFunc) SHARED_GC_CONSTRAINTS(DECLARE_CONSTRAINT) diff --git a/src/hotspot/share/gc/shared/partialArraySplitter.cpp b/src/hotspot/share/gc/shared/partialArraySplitter.cpp index d1833872683..04884d5e666 100644 --- a/src/hotspot/share/gc/shared/partialArraySplitter.cpp +++ b/src/hotspot/share/gc/shared/partialArraySplitter.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2024, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,10 +28,9 @@ #include "utilities/macros.hpp" PartialArraySplitter::PartialArraySplitter(PartialArrayStateManager* manager, - uint num_workers, - size_t chunk_size) + uint num_workers) : _allocator(manager), - _stepper(num_workers, chunk_size) + _stepper(num_workers) TASKQUEUE_STATS_ONLY(COMMA _stats()) {} diff --git a/src/hotspot/share/gc/shared/partialArraySplitter.hpp b/src/hotspot/share/gc/shared/partialArraySplitter.hpp index 87cc137e797..340f370d1d5 100644 --- a/src/hotspot/share/gc/shared/partialArraySplitter.hpp +++ b/src/hotspot/share/gc/shared/partialArraySplitter.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2024, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,8 +44,7 @@ class PartialArraySplitter { public: PartialArraySplitter(PartialArrayStateManager* manager, - uint num_workers, - size_t chunk_size); + uint num_workers); ~PartialArraySplitter() = default; NONCOPYABLE(PartialArraySplitter); @@ -60,6 +59,8 @@ public: // // length is their length in elements. // + // chunk_size the size of a single chunk. + // // If t is a ScannerTask, queue->push(t) must be a valid expression. The // result of that expression is ignored. // @@ -76,7 +77,8 @@ public: size_t start(Queue* queue, objArrayOop from_array, objArrayOop to_array, - size_t length); + size_t length, + size_t chunk_size); // Result type for claim(), carrying multiple values. Provides the claimed // chunk's start and end array indices. diff --git a/src/hotspot/share/gc/shared/partialArraySplitter.inline.hpp b/src/hotspot/share/gc/shared/partialArraySplitter.inline.hpp index abb0cf13101..7679358e218 100644 --- a/src/hotspot/share/gc/shared/partialArraySplitter.inline.hpp +++ b/src/hotspot/share/gc/shared/partialArraySplitter.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2024, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,14 +39,16 @@ template size_t PartialArraySplitter::start(Queue* queue, objArrayOop source, objArrayOop destination, - size_t length) { - PartialArrayTaskStepper::Step step = _stepper.start(length); + size_t length, + size_t chunk_size) { + precond(chunk_size > 0); + PartialArrayTaskStepper::Step step = _stepper.start(length, chunk_size); // Push initial partial scan tasks. if (step._ncreate > 0) { TASKQUEUE_STATS_ONLY(_stats.inc_split();); TASKQUEUE_STATS_ONLY(_stats.inc_pushed(step._ncreate);) PartialArrayState* state = - _allocator.allocate(source, destination, step._index, length, step._ncreate); + _allocator.allocate(source, destination, step._index, length, chunk_size, step._ncreate); for (uint i = 0; i < step._ncreate; ++i) { queue->push(ScannerTask(state)); } @@ -75,9 +77,10 @@ PartialArraySplitter::claim(PartialArrayState* state, Queue* queue, bool stolen) queue->push(ScannerTask(state)); } } + size_t chunk_size = state->chunk_size(); // Release state, decrementing refcount, now that we're done with it. _allocator.release(state); - return Claim{step._index, step._index + _stepper.chunk_size()}; + return Claim{step._index, step._index + chunk_size}; } #endif // SHARE_GC_SHARED_PARTIALARRAYSPLITTER_INLINE_HPP diff --git a/src/hotspot/share/gc/shared/partialArrayState.cpp b/src/hotspot/share/gc/shared/partialArrayState.cpp index aadbc46b7c1..d3b21c2fdaa 100644 --- a/src/hotspot/share/gc/shared/partialArrayState.cpp +++ b/src/hotspot/share/gc/shared/partialArrayState.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2024, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,10 +35,12 @@ PartialArrayState::PartialArrayState(oop src, oop dst, size_t index, size_t length, + size_t chunk_size, size_t initial_refcount) : _source(src), _destination(dst), _length(length), + _chunk_size(chunk_size), _index(index), _refcount(initial_refcount) { @@ -77,6 +79,7 @@ PartialArrayStateAllocator::~PartialArrayStateAllocator() { PartialArrayState* PartialArrayStateAllocator::allocate(oop src, oop dst, size_t index, size_t length, + size_t chunk_size, size_t initial_refcount) { void* p; FreeListEntry* head = _free_list; @@ -87,7 +90,7 @@ PartialArrayState* PartialArrayStateAllocator::allocate(oop src, oop dst, head->~FreeListEntry(); p = head; } - return ::new (p) PartialArrayState(src, dst, index, length, initial_refcount); + return ::new (p) PartialArrayState(src, dst, index, length, chunk_size, initial_refcount); } void PartialArrayStateAllocator::release(PartialArrayState* state) { diff --git a/src/hotspot/share/gc/shared/partialArrayState.hpp b/src/hotspot/share/gc/shared/partialArrayState.hpp index 3dafeb0f14c..75e297526ae 100644 --- a/src/hotspot/share/gc/shared/partialArrayState.hpp +++ b/src/hotspot/share/gc/shared/partialArrayState.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2024, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,6 +61,7 @@ class PartialArrayState { oop _source; oop _destination; size_t _length; + size_t _chunk_size; Atomic _index; Atomic _refcount; @@ -68,7 +69,7 @@ class PartialArrayState { PartialArrayState(oop src, oop dst, size_t index, size_t length, - size_t initial_refcount); + size_t chunk_size, size_t initial_refcount); public: // Deleted to require management by allocator object. @@ -89,6 +90,8 @@ public: // The length of the array oop. size_t length() const { return _length; } + size_t chunk_size() const { return _chunk_size; } + // A pointer to the start index for the next segment to process, for atomic // update. Atomic* index_addr() { return &_index; } @@ -130,6 +133,7 @@ public: // from the associated manager. PartialArrayState* allocate(oop src, oop dst, size_t index, size_t length, + size_t chunk_size, size_t initial_refcount); // Decrement the state's refcount. If the new refcount is zero, add the diff --git a/src/hotspot/share/gc/shared/partialArrayTaskStats.cpp b/src/hotspot/share/gc/shared/partialArrayTaskStats.cpp index ac8a380ec9a..090430963c6 100644 --- a/src/hotspot/share/gc/shared/partialArrayTaskStats.cpp +++ b/src/hotspot/share/gc/shared/partialArrayTaskStats.cpp @@ -64,7 +64,7 @@ static const char* const stats_hdr[] = { }; void PartialArrayTaskStats::print_header(outputStream* s, const char* title) { - s->print_cr("%s:", title); + s->print_cr("GC Task Stats %s", title); for (uint i = 0; i < ARRAY_SIZE(stats_hdr); ++i) { s->print_cr("%s", stats_hdr[i]); } diff --git a/src/hotspot/share/gc/shared/partialArrayTaskStepper.cpp b/src/hotspot/share/gc/shared/partialArrayTaskStepper.cpp index d91ba347d6c..f7d53c9348a 100644 --- a/src/hotspot/share/gc/shared/partialArrayTaskStepper.cpp +++ b/src/hotspot/share/gc/shared/partialArrayTaskStepper.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -48,8 +48,7 @@ static uint compute_task_fanout(uint task_limit) { return result; } -PartialArrayTaskStepper::PartialArrayTaskStepper(uint n_workers, size_t chunk_size) : - _chunk_size(chunk_size), +PartialArrayTaskStepper::PartialArrayTaskStepper(uint n_workers) : _task_limit(compute_task_limit(n_workers)), _task_fanout(compute_task_fanout(_task_limit)) {} diff --git a/src/hotspot/share/gc/shared/partialArrayTaskStepper.hpp b/src/hotspot/share/gc/shared/partialArrayTaskStepper.hpp index 11499ca2ffe..594cc7b245a 100644 --- a/src/hotspot/share/gc/shared/partialArrayTaskStepper.hpp +++ b/src/hotspot/share/gc/shared/partialArrayTaskStepper.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,19 +40,19 @@ class PartialArrayState; // substantially expand the task queues. class PartialArrayTaskStepper { public: - PartialArrayTaskStepper(uint n_workers, size_t chunk_size); + PartialArrayTaskStepper(uint n_workers); struct Step { size_t _index; // Array index for the step. uint _ncreate; // Number of new tasks to create. }; - // Called with the length of the array to be processed. Returns a Step with - // _index being the end of the initial chunk, which the caller should - // process. This is also the starting index for the next chunk to process. + // Called with the length of the array to be processed and chunk size. + // Returns a Step with _index being the end of the initial chunk, which the + // caller should process. This is also the starting index for the next chunk to process. // The _ncreate is the number of tasks to enqueue to continue processing the // array. If _ncreate is zero then _index will be length. - inline Step start(size_t length) const; + inline Step start(size_t length, size_t chunk_size) const; // Atomically increment state's index by chunk_size() to claim the next // chunk. Returns a Step with _index being the starting index of the @@ -60,21 +60,16 @@ public: // to enqueue. inline Step next(PartialArrayState* state) const; - // The size of chunks to claim for each task. - inline size_t chunk_size() const; - class TestSupport; // For unit tests private: - // Size (number of elements) of a chunk to process. - size_t _chunk_size; // Limit on the number of partial array tasks to create for a given array. uint _task_limit; // Maximum number of new tasks to create when processing an existing task. uint _task_fanout; // For unit tests. - inline Step next_impl(size_t length, Atomic* index_addr) const; + inline Step next_impl(size_t length, size_t chunk_size, Atomic* index_addr) const; }; #endif // SHARE_GC_SHARED_PARTIALARRAYTASKSTEPPER_HPP diff --git a/src/hotspot/share/gc/shared/partialArrayTaskStepper.inline.hpp b/src/hotspot/share/gc/shared/partialArrayTaskStepper.inline.hpp index 6946f7c69ff..538815698f2 100644 --- a/src/hotspot/share/gc/shared/partialArrayTaskStepper.inline.hpp +++ b/src/hotspot/share/gc/shared/partialArrayTaskStepper.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,13 +31,9 @@ #include "utilities/checkedCast.hpp" #include "utilities/debug.hpp" -size_t PartialArrayTaskStepper::chunk_size() const { - return _chunk_size; -} - PartialArrayTaskStepper::Step -PartialArrayTaskStepper::start(size_t length) const { - size_t end = length % _chunk_size; // End of initial chunk. +PartialArrayTaskStepper::start(size_t length, size_t chunk_size) const { + size_t end = length % chunk_size; // End of initial chunk. // If the initial chunk is the complete array, then don't need any partial // tasks. Otherwise, start with just one partial task; see new task // calculation in next(). @@ -45,24 +41,24 @@ PartialArrayTaskStepper::start(size_t length) const { } PartialArrayTaskStepper::Step -PartialArrayTaskStepper::next_impl(size_t length, Atomic* index_addr) const { +PartialArrayTaskStepper::next_impl(size_t length, size_t chunk_size, Atomic* index_addr) const { // The start of the next task is in the state's index. // Atomically increment by the chunk size to claim the associated chunk. // Because we limit the number of enqueued tasks to being no more than the // number of remaining chunks to process, we can use an atomic add for the // claim, rather than a CAS loop. - size_t start = index_addr->fetch_then_add(_chunk_size, memory_order_relaxed); + size_t start = index_addr->fetch_then_add(chunk_size, memory_order_relaxed); assert(start < length, "invariant: start %zu, length %zu", start, length); - assert(((length - start) % _chunk_size) == 0, + assert(((length - start) % chunk_size) == 0, "invariant: start %zu, length %zu, chunk size %zu", - start, length, _chunk_size); + start, length, chunk_size); // Determine the number of new tasks to create. // Zero-based index for this partial task. The initial task isn't counted. - uint task_num = checked_cast(start / _chunk_size); + uint task_num = checked_cast(start / chunk_size); // Number of tasks left to process, including this one. - uint remaining_tasks = checked_cast((length - start) / _chunk_size); + uint remaining_tasks = checked_cast((length - start) / chunk_size); assert(remaining_tasks > 0, "invariant"); // Compute number of pending tasks, including this one. The maximum number // of tasks is a function of task_num (N) and _task_fanout (F). @@ -89,7 +85,7 @@ PartialArrayTaskStepper::next_impl(size_t length, Atomic* index_addr) co PartialArrayTaskStepper::Step PartialArrayTaskStepper::next(PartialArrayState* state) const { - return next_impl(state->length(), state->index_addr()); + return next_impl(state->length(), state->chunk_size(), state->index_addr()); } #endif // SHARE_GC_SHARED_PARTIALARRAYTASKSTEPPER_INLINE_HPP diff --git a/src/hotspot/share/gc/shared/taskqueue.inline.hpp b/src/hotspot/share/gc/shared/taskqueue.inline.hpp index e77645f4fcf..b142aadc580 100644 --- a/src/hotspot/share/gc/shared/taskqueue.inline.hpp +++ b/src/hotspot/share/gc/shared/taskqueue.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -279,13 +279,11 @@ typename GenericTaskQueue::PopResult GenericTaskQueue::pop_g // Increment top; if it wraps, also increment tag, to distinguish it // from any recent _age for the same top() index. idx_t new_top = increment_index(oldAge.top()); + // Don't use bottom, since a pop_local might have decremented it. + assert_not_underflow(localBot, new_top); idx_t new_tag = oldAge.tag() + ((new_top == 0) ? 1 : 0); Age newAge(new_top, new_tag); bool result = par_set_age(oldAge, newAge); - - // Note that using "bottom" here might fail, since a pop_local might - // have decremented it. - assert_not_underflow(localBot, newAge.top()); return result ? PopResult::Success : PopResult::Contended; } diff --git a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp index 61cf73fe04a..f9b8694eb04 100644 --- a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp +++ b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp @@ -37,8 +37,7 @@ #include "utilities/copy.hpp" size_t ThreadLocalAllocBuffer::_max_size = 0; -int ThreadLocalAllocBuffer::_reserve_for_allocation_prefetch = 0; -unsigned int ThreadLocalAllocBuffer::_target_refills = 0; +unsigned int ThreadLocalAllocBuffer::_target_num_refills = 0; ThreadLocalAllocBuffer::ThreadLocalAllocBuffer() : _start(nullptr), @@ -49,10 +48,10 @@ ThreadLocalAllocBuffer::ThreadLocalAllocBuffer() : _desired_size(0), _refill_waste_limit(0), _allocated_before_last_gc(0), - _number_of_refills(0), + _num_refills(0), _refill_waste(0), _gc_waste(0), - _slow_allocations(0), + _num_slow_allocations(0), _allocated_size(0), _allocation_fraction(TLABAllocationWeight) { @@ -82,7 +81,7 @@ void ThreadLocalAllocBuffer::accumulate_and_reset_statistics(ThreadLocalAllocSta print_stats("gc"); - if (_number_of_refills > 0) { + if (_num_refills > 0) { // Update allocation history if a reasonable amount of eden was allocated. bool update_allocation_history = used > 0.5 * capacity; @@ -99,16 +98,16 @@ void ThreadLocalAllocBuffer::accumulate_and_reset_statistics(ThreadLocalAllocSta _allocation_fraction.sample(alloc_frac); } - stats->update_fast_allocations(_number_of_refills, + stats->update_fast_allocations(_num_refills, _allocated_size, _gc_waste, _refill_waste); } else { - assert(_number_of_refills == 0 && _refill_waste == 0 && _gc_waste == 0, + assert(_num_refills == 0 && _refill_waste == 0 && _gc_waste == 0, "tlab stats == 0"); } - stats->update_slow_allocations(_slow_allocations); + stats->update_num_slow_allocations(_num_slow_allocations); reset_statistics(); } @@ -148,7 +147,7 @@ void ThreadLocalAllocBuffer::resize() { assert(ResizeTLAB, "Should not call this otherwise"); size_t alloc = (size_t)(_allocation_fraction.average() * (Universe::heap()->tlab_capacity() / HeapWordSize)); - size_t new_size = alloc / _target_refills; + size_t new_size = alloc / _target_num_refills; new_size = clamp(new_size, min_size(), max_size()); @@ -157,24 +156,24 @@ void ThreadLocalAllocBuffer::resize() { log_trace(gc, tlab)("TLAB new size: thread: " PTR_FORMAT " [id: %2d]" " refills %d alloc: %8.6f desired_size: %zu -> %zu", p2i(thread()), thread()->osthread()->thread_id(), - _target_refills, _allocation_fraction.average(), desired_size(), aligned_new_size); + _target_num_refills, _allocation_fraction.average(), desired_size(), aligned_new_size); set_desired_size(aligned_new_size); set_refill_waste_limit(initial_refill_waste_limit()); } void ThreadLocalAllocBuffer::reset_statistics() { - _number_of_refills = 0; - _refill_waste = 0; - _gc_waste = 0; - _slow_allocations = 0; - _allocated_size = 0; + _num_refills = 0; + _refill_waste = 0; + _gc_waste = 0; + _num_slow_allocations = 0; + _allocated_size = 0; } void ThreadLocalAllocBuffer::fill(HeapWord* start, HeapWord* top, size_t new_size) { - _number_of_refills++; + _num_refills++; _allocated_size += new_size; print_stats("fill"); assert(top <= start + new_size - alignment_reserve(), "size too small"); @@ -206,7 +205,7 @@ void ThreadLocalAllocBuffer::initialize() { size_t capacity = Universe::heap()->tlab_capacity() / HeapWordSize; if (capacity > 0) { // Keep alloc_frac as float and not double to avoid the double to float conversion - float alloc_frac = desired_size() * target_refills() / (float)capacity; + float alloc_frac = desired_size() * target_num_refills() / (float)capacity; _allocation_fraction.sample(alloc_frac); } @@ -220,34 +219,10 @@ void ThreadLocalAllocBuffer::startup_initialization() { // Assuming each thread's active tlab is, on average, // 1/2 full at a GC - _target_refills = 100 / (2 * TLABWasteTargetPercent); - // We need to set initial target refills to 2 to avoid a GC which causes VM + _target_num_refills = 100 / (2 * TLABWasteTargetPercent); + // We need to set the initial target number of refills to 2 to avoid a GC which causes VM // abort during VM initialization. - _target_refills = MAX2(_target_refills, 2U); - -#ifdef COMPILER2 - // If the C2 compiler is present, extra space is needed at the end of - // TLABs, otherwise prefetching instructions generated by the C2 - // compiler will fault (due to accessing memory outside of heap). - // The amount of space is the max of the number of lines to - // prefetch for array and for instance allocations. (Extra space must be - // reserved to accommodate both types of allocations.) - // - // Only SPARC-specific BIS instructions are known to fault. (Those - // instructions are generated if AllocatePrefetchStyle==3 and - // AllocatePrefetchInstr==1). To be on the safe side, however, - // extra space is reserved for all combinations of - // AllocatePrefetchStyle and AllocatePrefetchInstr. - // - // If the C2 compiler is not present, no space is reserved. - - // +1 for rounding up to next cache line, +1 to be safe - if (CompilerConfig::is_c2_or_jvmci_compiler_enabled()) { - int lines = MAX2(AllocatePrefetchLines, AllocateInstancePrefetchLines) + 2; - _reserve_for_allocation_prefetch = (AllocatePrefetchDistance + AllocatePrefetchStepSize * lines) / - (int)HeapWordSize; - } -#endif + _target_num_refills = MAX2(_target_num_refills, 2U); // During jvm startup, the main thread is initialized // before the heap is initialized. So reinitialize it now. @@ -265,10 +240,10 @@ size_t ThreadLocalAllocBuffer::initial_desired_size() { init_sz = TLABSize / HeapWordSize; } else { // Initial size is a function of the average number of allocating threads. - unsigned int nof_threads = ThreadLocalAllocStats::allocating_threads_avg(); + unsigned int num_threads = ThreadLocalAllocStats::num_allocating_threads_avg(); init_sz = (Universe::heap()->tlab_capacity() / HeapWordSize) / - (nof_threads * target_refills()); + (num_threads * target_num_refills()); init_sz = align_object_size(init_sz); } // We can't use clamp() between min_size() and max_size() here because some @@ -296,10 +271,10 @@ void ThreadLocalAllocBuffer::print_stats(const char* tag) { " slow: %dB", tag, p2i(thrd), thrd->osthread()->thread_id(), _desired_size / (K / HeapWordSize), - _slow_allocations, _refill_waste_limit * HeapWordSize, + _num_slow_allocations, _refill_waste_limit * HeapWordSize, _allocation_fraction.average(), _allocation_fraction.average() * tlab_used / K, - _number_of_refills, waste_percent, + _num_refills, waste_percent, _gc_waste * HeapWordSize, _refill_waste * HeapWordSize); } @@ -324,17 +299,17 @@ HeapWord* ThreadLocalAllocBuffer::hard_end() { return _allocation_end + alignment_reserve(); } -PerfVariable* ThreadLocalAllocStats::_perf_allocating_threads; -PerfVariable* ThreadLocalAllocStats::_perf_total_refills; -PerfVariable* ThreadLocalAllocStats::_perf_max_refills; -PerfVariable* ThreadLocalAllocStats::_perf_total_allocations; +PerfVariable* ThreadLocalAllocStats::_perf_num_allocating_threads; +PerfVariable* ThreadLocalAllocStats::_perf_total_num_refills; +PerfVariable* ThreadLocalAllocStats::_perf_max_num_refills; +PerfVariable* ThreadLocalAllocStats::_perf_total_allocated_size; PerfVariable* ThreadLocalAllocStats::_perf_total_gc_waste; PerfVariable* ThreadLocalAllocStats::_perf_max_gc_waste; PerfVariable* ThreadLocalAllocStats::_perf_total_refill_waste; PerfVariable* ThreadLocalAllocStats::_perf_max_refill_waste; -PerfVariable* ThreadLocalAllocStats::_perf_total_slow_allocations; -PerfVariable* ThreadLocalAllocStats::_perf_max_slow_allocations; -AdaptiveWeightedAverage ThreadLocalAllocStats::_allocating_threads_avg(0); +PerfVariable* ThreadLocalAllocStats::_perf_total_num_slow_allocations; +PerfVariable* ThreadLocalAllocStats::_perf_max_num_slow_allocations; +AdaptiveWeightedAverage ThreadLocalAllocStats::_num_allocating_threads_avg(0); static PerfVariable* create_perf_variable(const char* name, PerfData::Units unit, TRAPS) { ResourceMark rm; @@ -342,120 +317,119 @@ static PerfVariable* create_perf_variable(const char* name, PerfData::Units unit } void ThreadLocalAllocStats::initialize() { - _allocating_threads_avg = AdaptiveWeightedAverage(TLABAllocationWeight); - _allocating_threads_avg.sample(1); // One allocating thread at startup + _num_allocating_threads_avg = AdaptiveWeightedAverage(TLABAllocationWeight); + _num_allocating_threads_avg.sample(1); // One allocating thread at startup if (UsePerfData) { EXCEPTION_MARK; - _perf_allocating_threads = create_perf_variable("allocThreads", PerfData::U_None, CHECK); - _perf_total_refills = create_perf_variable("fills", PerfData::U_None, CHECK); - _perf_max_refills = create_perf_variable("maxFills", PerfData::U_None, CHECK); - _perf_total_allocations = create_perf_variable("alloc", PerfData::U_Bytes, CHECK); - _perf_total_gc_waste = create_perf_variable("gcWaste", PerfData::U_Bytes, CHECK); - _perf_max_gc_waste = create_perf_variable("maxGcWaste", PerfData::U_Bytes, CHECK); - _perf_total_refill_waste = create_perf_variable("refillWaste", PerfData::U_Bytes, CHECK); - _perf_max_refill_waste = create_perf_variable("maxRefillWaste", PerfData::U_Bytes, CHECK); - _perf_total_slow_allocations = create_perf_variable("slowAlloc", PerfData::U_None, CHECK); - _perf_max_slow_allocations = create_perf_variable("maxSlowAlloc", PerfData::U_None, CHECK); + _perf_num_allocating_threads = create_perf_variable("allocThreads", PerfData::U_None, CHECK); + _perf_total_num_refills = create_perf_variable("fills", PerfData::U_None, CHECK); + _perf_max_num_refills = create_perf_variable("maxFills", PerfData::U_None, CHECK); + _perf_total_allocated_size = create_perf_variable("alloc", PerfData::U_Bytes, CHECK); + _perf_total_gc_waste = create_perf_variable("gcWaste", PerfData::U_Bytes, CHECK); + _perf_max_gc_waste = create_perf_variable("maxGcWaste", PerfData::U_Bytes, CHECK); + _perf_total_refill_waste = create_perf_variable("refillWaste", PerfData::U_Bytes, CHECK); + _perf_max_refill_waste = create_perf_variable("maxRefillWaste", PerfData::U_Bytes, CHECK); + _perf_total_num_slow_allocations = create_perf_variable("slowAlloc", PerfData::U_None, CHECK); + _perf_max_num_slow_allocations = create_perf_variable("maxSlowAlloc", PerfData::U_None, CHECK); } } ThreadLocalAllocStats::ThreadLocalAllocStats() : - _allocating_threads(0), - _total_refills(0), - _max_refills(0), - _total_allocations(0), + _num_allocating_threads(0), + _total_num_refills(0), + _max_num_refills(0), + _total_allocated_size(0), _total_gc_waste(0), _max_gc_waste(0), _total_refill_waste(0), _max_refill_waste(0), - _total_slow_allocations(0), - _max_slow_allocations(0) {} + _total_num_slow_allocations(0), + _max_num_slow_allocations(0) {} -unsigned int ThreadLocalAllocStats::allocating_threads_avg() { - return MAX2((unsigned int)(_allocating_threads_avg.average() + 0.5), 1U); +unsigned int ThreadLocalAllocStats::num_allocating_threads_avg() { + return MAX2((unsigned int)(_num_allocating_threads_avg.average() + 0.5), 1U); } -void ThreadLocalAllocStats::update_fast_allocations(unsigned int refills, - size_t allocations, - size_t gc_waste, - size_t refill_waste) { - _allocating_threads += 1; - _total_refills += refills; - _max_refills = MAX2(_max_refills, refills); - _total_allocations += allocations; +void ThreadLocalAllocStats::update_fast_allocations(unsigned int num_refills, + size_t allocated_size, + size_t gc_waste, + size_t refill_waste) { + _num_allocating_threads += 1; + _total_num_refills += num_refills; + _max_num_refills = MAX2(_max_num_refills, num_refills); + _total_allocated_size += allocated_size; _total_gc_waste += gc_waste; _max_gc_waste = MAX2(_max_gc_waste, gc_waste); _total_refill_waste += refill_waste; _max_refill_waste = MAX2(_max_refill_waste, refill_waste); } -void ThreadLocalAllocStats::update_slow_allocations(unsigned int allocations) { - _total_slow_allocations += allocations; - _max_slow_allocations = MAX2(_max_slow_allocations, allocations); +void ThreadLocalAllocStats::update_num_slow_allocations(unsigned int num_slow_allocations) { + _total_num_slow_allocations += num_slow_allocations; + _max_num_slow_allocations = MAX2(_max_num_slow_allocations, num_slow_allocations); } void ThreadLocalAllocStats::update(const ThreadLocalAllocStats& other) { - _allocating_threads += other._allocating_threads; - _total_refills += other._total_refills; - _max_refills = MAX2(_max_refills, other._max_refills); - _total_allocations += other._total_allocations; - _total_gc_waste += other._total_gc_waste; - _max_gc_waste = MAX2(_max_gc_waste, other._max_gc_waste); - _total_refill_waste += other._total_refill_waste; - _max_refill_waste = MAX2(_max_refill_waste, other._max_refill_waste); - _total_slow_allocations += other._total_slow_allocations; - _max_slow_allocations = MAX2(_max_slow_allocations, other._max_slow_allocations); + _num_allocating_threads += other._num_allocating_threads; + _total_num_refills += other._total_num_refills; + _max_num_refills = MAX2(_max_num_refills, other._max_num_refills); + _total_allocated_size += other._total_allocated_size; + _total_gc_waste += other._total_gc_waste; + _max_gc_waste = MAX2(_max_gc_waste, other._max_gc_waste); + _total_refill_waste += other._total_refill_waste; + _max_refill_waste = MAX2(_max_refill_waste, other._max_refill_waste); + _total_num_slow_allocations += other._total_num_slow_allocations; + _max_num_slow_allocations = MAX2(_max_num_slow_allocations, other._max_num_slow_allocations); } void ThreadLocalAllocStats::reset() { - _allocating_threads = 0; - _total_refills = 0; - _max_refills = 0; - _total_allocations = 0; - _total_gc_waste = 0; - _max_gc_waste = 0; - _total_refill_waste = 0; - _max_refill_waste = 0; - _total_slow_allocations = 0; - _max_slow_allocations = 0; + _num_allocating_threads = 0; + _total_num_refills = 0; + _max_num_refills = 0; + _total_allocated_size = 0; + _total_gc_waste = 0; + _max_gc_waste = 0; + _total_refill_waste = 0; + _max_refill_waste = 0; + _total_num_slow_allocations = 0; + _max_num_slow_allocations = 0; } void ThreadLocalAllocStats::publish() { - if (_total_allocations == 0) { + if (_total_allocated_size == 0) { return; } - _allocating_threads_avg.sample(_allocating_threads); + _num_allocating_threads_avg.sample(_num_allocating_threads); const size_t waste = _total_gc_waste + _total_refill_waste; - const double waste_percent = percent_of(waste, _total_allocations); + const double waste_percent = percent_of(waste, _total_allocated_size); log_debug(gc, tlab)("TLAB totals: thrds: %d refills: %d max: %d" " slow allocs: %d max %d waste: %4.1f%%" " gc: %zuB max: %zuB" " slow: %zuB max: %zuB", - _allocating_threads, _total_refills, _max_refills, - _total_slow_allocations, _max_slow_allocations, waste_percent, + _num_allocating_threads, _total_num_refills, _max_num_refills, + _total_num_slow_allocations, _max_num_slow_allocations, waste_percent, _total_gc_waste * HeapWordSize, _max_gc_waste * HeapWordSize, _total_refill_waste * HeapWordSize, _max_refill_waste * HeapWordSize); if (UsePerfData) { - _perf_allocating_threads ->set_value(_allocating_threads); - _perf_total_refills ->set_value(_total_refills); - _perf_max_refills ->set_value(_max_refills); - _perf_total_allocations ->set_value(_total_allocations); - _perf_total_gc_waste ->set_value(_total_gc_waste); - _perf_max_gc_waste ->set_value(_max_gc_waste); - _perf_total_refill_waste ->set_value(_total_refill_waste); - _perf_max_refill_waste ->set_value(_max_refill_waste); - _perf_total_slow_allocations ->set_value(_total_slow_allocations); - _perf_max_slow_allocations ->set_value(_max_slow_allocations); + _perf_num_allocating_threads ->set_value(_num_allocating_threads); + _perf_total_num_refills ->set_value(_total_num_refills); + _perf_max_num_refills ->set_value(_max_num_refills); + _perf_total_allocated_size ->set_value(_total_allocated_size); + _perf_total_gc_waste ->set_value(_total_gc_waste); + _perf_max_gc_waste ->set_value(_max_gc_waste); + _perf_total_refill_waste ->set_value(_total_refill_waste); + _perf_max_refill_waste ->set_value(_max_refill_waste); + _perf_total_num_slow_allocations ->set_value(_total_num_slow_allocations); + _perf_max_num_slow_allocations ->set_value(_max_num_slow_allocations); } } size_t ThreadLocalAllocBuffer::end_reserve() { - size_t reserve_size = CollectedHeap::lab_alignment_reserve(); - return MAX2(reserve_size, (size_t)_reserve_for_allocation_prefetch); + return CollectedHeap::lab_alignment_reserve(); } size_t ThreadLocalAllocBuffer::estimated_used_bytes() const { diff --git a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp index 25d9bf00eac..67bc149013e 100644 --- a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp +++ b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp @@ -56,14 +56,13 @@ private: size_t _refill_waste_limit; // hold onto tlab if free() is larger than this uint64_t _allocated_before_last_gc; // total bytes allocated up until the last gc - static size_t _max_size; // maximum size of any TLAB - static int _reserve_for_allocation_prefetch; // Reserve at the end of the TLAB - static unsigned _target_refills; // expected number of refills between GCs + static size_t _max_size; // maximum size of any TLAB + static unsigned _target_num_refills; // expected number of refills between GCs - unsigned _number_of_refills; + unsigned _num_refills; unsigned _refill_waste; unsigned _gc_waste; - unsigned _slow_allocations; + unsigned _num_slow_allocations; size_t _allocated_size; AdaptiveWeightedAverage _allocation_fraction; // fraction of eden allocated in tlabs @@ -80,7 +79,7 @@ private: size_t initial_refill_waste_limit(); - static int target_refills() { return _target_refills; } + static int target_num_refills() { return _target_num_refills; } size_t initial_desired_size(); size_t remaining(); @@ -99,9 +98,9 @@ private: // statistics - int number_of_refills() const { return _number_of_refills; } - int gc_waste() const { return _gc_waste; } - int slow_allocations() const { return _slow_allocations; } + int num_refills() const { return _num_refills; } + int gc_waste() const { return _gc_waste; } + int num_slow_allocations() const { return _num_slow_allocations; } public: ThreadLocalAllocBuffer(); @@ -180,41 +179,41 @@ public: class ThreadLocalAllocStats : public StackObj { private: - static PerfVariable* _perf_allocating_threads; - static PerfVariable* _perf_total_refills; - static PerfVariable* _perf_max_refills; - static PerfVariable* _perf_total_allocations; + static PerfVariable* _perf_num_allocating_threads; + static PerfVariable* _perf_total_num_refills; + static PerfVariable* _perf_max_num_refills; + static PerfVariable* _perf_total_allocated_size; static PerfVariable* _perf_total_gc_waste; static PerfVariable* _perf_max_gc_waste; static PerfVariable* _perf_total_refill_waste; static PerfVariable* _perf_max_refill_waste; - static PerfVariable* _perf_total_slow_allocations; - static PerfVariable* _perf_max_slow_allocations; + static PerfVariable* _perf_total_num_slow_allocations; + static PerfVariable* _perf_max_num_slow_allocations; - static AdaptiveWeightedAverage _allocating_threads_avg; + static AdaptiveWeightedAverage _num_allocating_threads_avg; - unsigned int _allocating_threads; - unsigned int _total_refills; - unsigned int _max_refills; - size_t _total_allocations; + unsigned int _num_allocating_threads; + unsigned int _total_num_refills; + unsigned int _max_num_refills; + size_t _total_allocated_size; size_t _total_gc_waste; size_t _max_gc_waste; size_t _total_refill_waste; size_t _max_refill_waste; - unsigned int _total_slow_allocations; - unsigned int _max_slow_allocations; + unsigned int _total_num_slow_allocations; + unsigned int _max_num_slow_allocations; public: static void initialize(); - static unsigned int allocating_threads_avg(); + static unsigned int num_allocating_threads_avg(); ThreadLocalAllocStats(); - void update_fast_allocations(unsigned int refills, - size_t allocations, + void update_fast_allocations(unsigned int num_refills, + size_t allocated_size, size_t gc_waste, size_t refill_waste); - void update_slow_allocations(unsigned int allocations); + void update_num_slow_allocations(unsigned int num_slow_allocations); void update(const ThreadLocalAllocStats& other); void reset(); diff --git a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.inline.hpp b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.inline.hpp index 441686c5c4c..727467f98d0 100644 --- a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.inline.hpp +++ b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.inline.hpp @@ -82,7 +82,7 @@ void ThreadLocalAllocBuffer::record_slow_allocation(size_t obj_size) { set_refill_waste_limit(refill_waste_limit() + refill_waste_limit_increment()); - _slow_allocations++; + _num_slow_allocations++; log_develop_trace(gc, tlab)("TLAB: %s thread: " PTR_FORMAT " [id: %2d]" " obj: %zu" diff --git a/src/hotspot/share/gc/shared/workerThread.cpp b/src/hotspot/share/gc/shared/workerThread.cpp index 2f6f003608f..2738c98e5c3 100644 --- a/src/hotspot/share/gc/shared/workerThread.cpp +++ b/src/hotspot/share/gc/shared/workerThread.cpp @@ -210,8 +210,6 @@ WorkerThread::WorkerThread(const char* name_prefix, uint name_suffix, WorkerTask } void WorkerThread::run() { - os::set_priority(this, NearMaxPriority); - while (true) { _dispatcher->worker_run_task(); } diff --git a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp index fdfde866cd7..f721c3cd001 100644 --- a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp +++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2023, Red Hat, Inc. All rights reserved. + * Copyright (c) 2018, 2026, Red Hat, Inc. All rights reserved. * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -519,7 +519,33 @@ void ShenandoahBarrierSetC2::post_barrier(GraphKit* kit, #undef __ -const TypeFunc* ShenandoahBarrierSetC2::write_barrier_pre_Type() { +const TypeFunc* ShenandoahBarrierSetC2::_write_barrier_pre_Type = nullptr; +const TypeFunc* ShenandoahBarrierSetC2::_clone_barrier_Type = nullptr; +const TypeFunc* ShenandoahBarrierSetC2::_load_reference_barrier_Type = nullptr; + +inline const TypeFunc* ShenandoahBarrierSetC2::write_barrier_pre_Type() { + assert(ShenandoahBarrierSetC2::_write_barrier_pre_Type != nullptr, "should be initialized"); + return ShenandoahBarrierSetC2::_write_barrier_pre_Type; +} + +inline const TypeFunc* ShenandoahBarrierSetC2::clone_barrier_Type() { + assert(ShenandoahBarrierSetC2::_clone_barrier_Type != nullptr, "should be initialized"); + return ShenandoahBarrierSetC2::_clone_barrier_Type; +} + +const TypeFunc* ShenandoahBarrierSetC2::load_reference_barrier_Type() { + assert(ShenandoahBarrierSetC2::_load_reference_barrier_Type != nullptr, "should be initialized"); + return ShenandoahBarrierSetC2::_load_reference_barrier_Type; +} + +void ShenandoahBarrierSetC2::init() { + ShenandoahBarrierSetC2::make_write_barrier_pre_Type(); + ShenandoahBarrierSetC2::make_clone_barrier_Type(); + ShenandoahBarrierSetC2::make_load_reference_barrier_Type(); +} + +void ShenandoahBarrierSetC2::make_write_barrier_pre_Type() { + assert(ShenandoahBarrierSetC2::_write_barrier_pre_Type == nullptr, "should be"); const Type **fields = TypeTuple::fields(1); fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); @@ -528,10 +554,11 @@ const TypeFunc* ShenandoahBarrierSetC2::write_barrier_pre_Type() { fields = TypeTuple::fields(0); const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); - return TypeFunc::make(domain, range); + ShenandoahBarrierSetC2::_write_barrier_pre_Type = TypeFunc::make(domain, range); } -const TypeFunc* ShenandoahBarrierSetC2::clone_barrier_Type() { +void ShenandoahBarrierSetC2::make_clone_barrier_Type() { + assert(ShenandoahBarrierSetC2::_clone_barrier_Type == nullptr, "should be"); const Type **fields = TypeTuple::fields(1); fields[TypeFunc::Parms+0] = TypeOopPtr::NOTNULL; // src oop const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); @@ -540,10 +567,11 @@ const TypeFunc* ShenandoahBarrierSetC2::clone_barrier_Type() { fields = TypeTuple::fields(0); const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); - return TypeFunc::make(domain, range); + ShenandoahBarrierSetC2::_clone_barrier_Type = TypeFunc::make(domain, range); } -const TypeFunc* ShenandoahBarrierSetC2::load_reference_barrier_Type() { +void ShenandoahBarrierSetC2::make_load_reference_barrier_Type() { + assert(ShenandoahBarrierSetC2::_load_reference_barrier_Type == nullptr, "should be"); const Type **fields = TypeTuple::fields(2); fields[TypeFunc::Parms+0] = TypeOopPtr::BOTTOM; // original field value fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // original load address @@ -555,7 +583,7 @@ const TypeFunc* ShenandoahBarrierSetC2::load_reference_barrier_Type() { fields[TypeFunc::Parms+0] = TypeOopPtr::BOTTOM; const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); - return TypeFunc::make(domain, range); + ShenandoahBarrierSetC2::_load_reference_barrier_Type = TypeFunc::make(domain, range); } Node* ShenandoahBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const { @@ -890,7 +918,7 @@ void ShenandoahBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCo Node* thread = phase->transform_later(new ThreadLocalNode()); Node* offset = phase->igvn().MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset())); - Node* gc_state_addr = phase->transform_later(new AddPNode(phase->C->top(), thread, offset)); + Node* gc_state_addr = phase->transform_later(AddPNode::make_off_heap(thread, offset)); uint gc_state_idx = Compile::AliasIdxRaw; const TypePtr* gc_state_adr_type = nullptr; // debug-mode-only argument diff --git a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.hpp b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.hpp index dd9e9bcc1a5..108eaa0998b 100644 --- a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.hpp +++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.hpp @@ -82,6 +82,13 @@ private: static bool clone_needs_barrier(Node* src, PhaseGVN& gvn); + static const TypeFunc* _write_barrier_pre_Type; + static const TypeFunc* _clone_barrier_Type; + static const TypeFunc* _load_reference_barrier_Type; + static void make_write_barrier_pre_Type(); + static void make_clone_barrier_Type(); + static void make_load_reference_barrier_Type(); + protected: virtual Node* load_at_resolved(C2Access& access, const Type* val_type) const; virtual Node* store_at_resolved(C2Access& access, C2AccessValue& val) const; @@ -106,6 +113,8 @@ public: static const TypeFunc* write_barrier_pre_Type(); static const TypeFunc* clone_barrier_Type(); static const TypeFunc* load_reference_barrier_Type(); + static void init(); + virtual bool has_load_barrier_nodes() const { return true; } // This is the entry-point for the backend to perform accesses through the Access API. diff --git a/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp b/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp index 40fe0c00490..015276feb5c 100644 --- a/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp +++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2021, Red Hat, Inc. All rights reserved. + * Copyright (c) 2015, 2026, Red Hat, Inc. All rights reserved. * Copyright (C) 2022, Tencent. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -871,7 +871,7 @@ void ShenandoahBarrierC2Support::test_gc_state(Node*& ctrl, Node* raw_mem, Node* Node* thread = new ThreadLocalNode(); Node* gc_state_offset = igvn.MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset())); - Node* gc_state_addr = new AddPNode(phase->C->top(), thread, gc_state_offset); + Node* gc_state_addr = AddPNode::make_off_heap(thread, gc_state_offset); Node* gc_state = new LoadBNode(old_ctrl, raw_mem, gc_state_addr, DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(nullptr), TypeInt::BYTE, MemNode::unordered); diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp index 7a8bd55c795..c595d1fd9cd 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp @@ -33,6 +33,7 @@ #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" #include "gc/shenandoah/shenandoahHeap.inline.hpp" #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" +#include "gc/shenandoah/shenandoahYoungGeneration.hpp" #include "logging/log.hpp" #include "logging/logTag.hpp" #include "runtime/globals.hpp" @@ -59,14 +60,95 @@ const double ShenandoahAdaptiveHeuristics::HIGHEST_EXPECTED_AVAILABLE_AT_END = 0 const double ShenandoahAdaptiveHeuristics::MINIMUM_CONFIDENCE = 0.319; // 25% const double ShenandoahAdaptiveHeuristics::MAXIMUM_CONFIDENCE = 3.291; // 99.9% + +// To enable detection of GC time trends, we keep separate track of the recent history of gc time. During initialization, +// for example, the amount of live memory may be increasing, which is likely to cause the GC times to increase. This history +// allows us to predict increasing GC times rather than always assuming average recent GC time is the best predictor. +const size_t ShenandoahAdaptiveHeuristics::GC_TIME_SAMPLE_SIZE = 3; + +// We also keep separate track of recently sampled allocation rates for two purposes: +// 1. The number of samples examined to determine acceleration of allocation is represented by +// ShenandoahRateAccelerationSampleSize +// 2. The number of most recent samples averaged to determine a momentary allocation spike is represented by +// ShenandoahMomentaryAllocationRateSpikeSampleSize + +// Allocation rates are sampled by the regulator thread, which typically runs every ms. There may be jitter in the scheduling +// of the regulator thread. To reduce signal noise and synchronization overhead, we do not sample allocation rate with every +// iteration of the regulator. We prefer sample time longer than 1 ms so that there can be a statistically significant number +// of allocations occuring within each sample period. The regulator thread samples allocation rate only if at least +// ShenandoahAccelerationSamplePeriod ms have passed since it previously sampled the allocation rate. +// +// This trigger responds much more quickly than the traditional trigger, which monitors 100 ms spans. When acceleration is +// detected, the impact of acceleration on anticipated consumption of available memory is also much more impactful +// than the assumed constant allocation rate consumption of available memory. + ShenandoahAdaptiveHeuristics::ShenandoahAdaptiveHeuristics(ShenandoahSpaceInfo* space_info) : ShenandoahHeuristics(space_info), _margin_of_error_sd(ShenandoahAdaptiveInitialConfidence), _spike_threshold_sd(ShenandoahAdaptiveInitialSpikeThreshold), _last_trigger(OTHER), - _available(Moving_Average_Samples, ShenandoahAdaptiveDecayFactor) { } + _available(Moving_Average_Samples, ShenandoahAdaptiveDecayFactor), + _free_set(nullptr), + _previous_acceleration_sample_timestamp(0.0), + _gc_time_first_sample_index(0), + _gc_time_num_samples(0), + _gc_time_timestamps(NEW_C_HEAP_ARRAY(double, GC_TIME_SAMPLE_SIZE, mtGC)), + _gc_time_samples(NEW_C_HEAP_ARRAY(double, GC_TIME_SAMPLE_SIZE, mtGC)), + _gc_time_xy(NEW_C_HEAP_ARRAY(double, GC_TIME_SAMPLE_SIZE, mtGC)), + _gc_time_xx(NEW_C_HEAP_ARRAY(double, GC_TIME_SAMPLE_SIZE, mtGC)), + _gc_time_sum_of_timestamps(0), + _gc_time_sum_of_samples(0), + _gc_time_sum_of_xy(0), + _gc_time_sum_of_xx(0), + _gc_time_m(0.0), + _gc_time_b(0.0), + _gc_time_sd(0.0), + _spike_acceleration_buffer_size(MAX2(ShenandoahRateAccelerationSampleSize, 1+ShenandoahMomentaryAllocationRateSpikeSampleSize)), + _spike_acceleration_first_sample_index(0), + _spike_acceleration_num_samples(0), + _spike_acceleration_rate_samples(NEW_C_HEAP_ARRAY(double, _spike_acceleration_buffer_size, mtGC)), + _spike_acceleration_rate_timestamps(NEW_C_HEAP_ARRAY(double, _spike_acceleration_buffer_size, mtGC)) { + } -ShenandoahAdaptiveHeuristics::~ShenandoahAdaptiveHeuristics() {} +ShenandoahAdaptiveHeuristics::~ShenandoahAdaptiveHeuristics() { + FREE_C_HEAP_ARRAY(double, _spike_acceleration_rate_samples); + FREE_C_HEAP_ARRAY(double, _spike_acceleration_rate_timestamps); + FREE_C_HEAP_ARRAY(double, _gc_time_timestamps); + FREE_C_HEAP_ARRAY(double, _gc_time_samples); + FREE_C_HEAP_ARRAY(double, _gc_time_xy); + FREE_C_HEAP_ARRAY(double, _gc_time_xx); +} + +void ShenandoahAdaptiveHeuristics::initialize() { + ShenandoahHeuristics::initialize(); +} + +void ShenandoahAdaptiveHeuristics::post_initialize() { + ShenandoahHeuristics::post_initialize(); + _free_set = ShenandoahHeap::heap()->free_set(); + assert(!ShenandoahHeap::heap()->mode()->is_generational(), "ShenandoahGenerationalHeuristics overrides this method"); + compute_headroom_adjustment(); +} + +void ShenandoahAdaptiveHeuristics::compute_headroom_adjustment() { + // The trigger threshold represents mutator available - "head room". + // We plan for GC to finish before the amount of allocated memory exceeds trigger threshold. This is the same as saying we + // intend to finish GC before the amount of available memory is less than the allocation headroom. Headroom is the planned + // safety buffer to allow a small amount of additional allocation to take place in case we were overly optimistic in delaying + // our trigger. + size_t capacity = ShenandoahHeap::heap()->soft_max_capacity(); + size_t spike_headroom = capacity / 100 * ShenandoahAllocSpikeFactor; + size_t penalties = capacity / 100 * _gc_time_penalties; + _headroom_adjustment = spike_headroom + penalties; +} + +void ShenandoahAdaptiveHeuristics::start_idle_span() { + compute_headroom_adjustment(); +} + +void ShenandoahAdaptiveHeuristics::adjust_penalty(intx step) { + ShenandoahHeuristics::adjust_penalty(step); +} void ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset, RegionData* data, size_t size, @@ -76,8 +158,8 @@ void ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(Shenand // The logic for cset selection in adaptive is as follows: // // 1. We cannot get cset larger than available free space. Otherwise we guarantee OOME - // during evacuation, and thus guarantee full GC. In practice, we also want to let - // application to allocate something. This is why we limit CSet to some fraction of + // during evacuation, and thus guarantee full GC. In practice, we also want to let the + // application allocate during concurrent GC. This is why we limit CSet to some fraction of // available space. In non-overloaded heap, max_cset would contain all plausible candidates // over garbage threshold. // @@ -108,6 +190,7 @@ void ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(Shenand size_t cur_cset = 0; size_t cur_garbage = 0; + // Regions are sorted in order of decreasing garbage for (size_t idx = 0; idx < size; idx++) { ShenandoahHeapRegion* r = data[idx].get_region(); @@ -126,6 +209,96 @@ void ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(Shenand } } +void ShenandoahAdaptiveHeuristics::add_degenerated_gc_time(double time_at_start, double gc_time) { + // Conservatively add sample into linear model If this time is above the predicted concurrent gc time + if (predict_gc_time(time_at_start) < gc_time) { + add_gc_time(time_at_start, gc_time); + } +} + +void ShenandoahAdaptiveHeuristics::add_gc_time(double time_at_start, double gc_time) { + // Update best-fit linear predictor of GC time + uint index = (_gc_time_first_sample_index + _gc_time_num_samples) % GC_TIME_SAMPLE_SIZE; + if (_gc_time_num_samples == GC_TIME_SAMPLE_SIZE) { + _gc_time_sum_of_timestamps -= _gc_time_timestamps[index]; + _gc_time_sum_of_samples -= _gc_time_samples[index]; + _gc_time_sum_of_xy -= _gc_time_xy[index]; + _gc_time_sum_of_xx -= _gc_time_xx[index]; + } + _gc_time_timestamps[index] = time_at_start; + _gc_time_samples[index] = gc_time; + _gc_time_xy[index] = time_at_start * gc_time; + _gc_time_xx[index] = time_at_start * time_at_start; + + _gc_time_sum_of_timestamps += _gc_time_timestamps[index]; + _gc_time_sum_of_samples += _gc_time_samples[index]; + _gc_time_sum_of_xy += _gc_time_xy[index]; + _gc_time_sum_of_xx += _gc_time_xx[index]; + + if (_gc_time_num_samples < GC_TIME_SAMPLE_SIZE) { + _gc_time_num_samples++; + } else { + _gc_time_first_sample_index = (_gc_time_first_sample_index + 1) % GC_TIME_SAMPLE_SIZE; + } + + if (_gc_time_num_samples == 1) { + // The predictor is constant (horizontal line) + _gc_time_m = 0; + _gc_time_b = gc_time; + _gc_time_sd = 0.0; + } else if (_gc_time_num_samples == 2) { + + assert(time_at_start > _gc_time_timestamps[_gc_time_first_sample_index], + "Two GC cycles cannot finish at same time: %.6f vs %.6f, with GC times %.6f and %.6f", time_at_start, + _gc_time_timestamps[_gc_time_first_sample_index], gc_time, _gc_time_samples[_gc_time_first_sample_index]); + + // Two points define a line + double delta_x = time_at_start - _gc_time_timestamps[_gc_time_first_sample_index]; + double delta_y = gc_time - _gc_time_samples[_gc_time_first_sample_index]; + _gc_time_m = delta_y / delta_x; + // y = mx + b + // so b = y0 - mx0 + _gc_time_b = gc_time - _gc_time_m * time_at_start; + _gc_time_sd = 0.0; + } else { + // Since timestamps are monotonically increasing, denominator does not equal zero. + double denominator = _gc_time_num_samples * _gc_time_sum_of_xx - _gc_time_sum_of_timestamps * _gc_time_sum_of_timestamps; + assert(denominator != 0.0, "Invariant: samples: %u, sum_of_xx: %.6f, sum_of_timestamps: %.6f", + _gc_time_num_samples, _gc_time_sum_of_xx, _gc_time_sum_of_timestamps); + _gc_time_m = ((_gc_time_num_samples * _gc_time_sum_of_xy - _gc_time_sum_of_timestamps * _gc_time_sum_of_samples) / + denominator); + _gc_time_b = (_gc_time_sum_of_samples - _gc_time_m * _gc_time_sum_of_timestamps) / _gc_time_num_samples; + double sum_of_squared_deviations = 0.0; + for (size_t i = 0; i < _gc_time_num_samples; i++) { + uint index = (_gc_time_first_sample_index + i) % GC_TIME_SAMPLE_SIZE; + double x = _gc_time_timestamps[index]; + double predicted_y = _gc_time_m * x + _gc_time_b; + double deviation = predicted_y - _gc_time_samples[index]; + sum_of_squared_deviations += deviation * deviation; + } + _gc_time_sd = sqrt(sum_of_squared_deviations / _gc_time_num_samples); + } +} + +double ShenandoahAdaptiveHeuristics::predict_gc_time(double timestamp_at_start) { + return _gc_time_m * timestamp_at_start + _gc_time_b + _gc_time_sd * _margin_of_error_sd; +} + +void ShenandoahAdaptiveHeuristics::add_rate_to_acceleration_history(double timestamp, double rate) { + uint new_sample_index = + (_spike_acceleration_first_sample_index + _spike_acceleration_num_samples) % _spike_acceleration_buffer_size; + _spike_acceleration_rate_timestamps[new_sample_index] = timestamp; + _spike_acceleration_rate_samples[new_sample_index] = rate; + if (_spike_acceleration_num_samples == _spike_acceleration_buffer_size) { + _spike_acceleration_first_sample_index++; + if (_spike_acceleration_first_sample_index == _spike_acceleration_buffer_size) { + _spike_acceleration_first_sample_index = 0; + } + } else { + _spike_acceleration_num_samples++; + } +} + void ShenandoahAdaptiveHeuristics::record_cycle_start() { ShenandoahHeuristics::record_cycle_start(); _allocation_rate.allocation_counter_reset(); @@ -133,6 +306,10 @@ void ShenandoahAdaptiveHeuristics::record_cycle_start() { void ShenandoahAdaptiveHeuristics::record_success_concurrent() { ShenandoahHeuristics::record_success_concurrent(); + double now = os::elapsedTime(); + + // Should we not add GC time if this was an abbreviated cycle? + add_gc_time(_cycle_start, elapsed_cycle_time()); size_t available = _space_info->available(); @@ -185,6 +362,7 @@ void ShenandoahAdaptiveHeuristics::record_success_concurrent() { void ShenandoahAdaptiveHeuristics::record_degenerated() { ShenandoahHeuristics::record_degenerated(); + add_degenerated_gc_time(_precursor_cycle_start, elapsed_degenerated_cycle_time()); // Adjust both trigger's parameters in the case of a degenerated GC because // either of them should have triggered earlier to avoid this case. adjust_margin_of_error(DEGENERATE_PENALTY_SD); @@ -236,6 +414,24 @@ bool ShenandoahAdaptiveHeuristics::should_start_gc() { size_t available = _space_info->soft_mutator_available(); size_t allocated = _space_info->bytes_allocated_since_gc_start(); + double avg_cycle_time = 0; + double avg_alloc_rate = 0; + double now = get_most_recent_wake_time(); + size_t allocatable_words = this->allocatable(available); + double predicted_future_accelerated_gc_time = 0.0; + size_t allocated_bytes_since_last_sample = 0; + double instantaneous_rate_words_per_second = 0.0; + size_t consumption_accelerated = 0; + double acceleration = 0.0; + double current_rate_by_acceleration = 0.0; + size_t min_threshold = min_free_threshold(); + double predicted_future_gc_time = 0; + double future_planned_gc_time = 0; + bool future_planned_gc_time_is_average = false; + double avg_time_to_deplete_available = 0.0; + bool is_spiking = false; + double spike_time_to_deplete_available = 0.0; + log_debug(gc, ergo)("should_start_gc calculation: available: " PROPERFMT ", soft_max_capacity: " PROPERFMT ", " "allocated_since_gc_start: " PROPERFMT, PROPERFMTARGS(available), PROPERFMTARGS(capacity), PROPERFMTARGS(allocated)); @@ -250,7 +446,6 @@ bool ShenandoahAdaptiveHeuristics::should_start_gc() { _last_trigger = OTHER; - size_t min_threshold = min_free_threshold(); if (available < min_threshold) { log_trigger("Free (Soft) (" PROPERFMT ") is below minimum threshold (" PROPERFMT ")", PROPERFMTARGS(available), PROPERFMTARGS(min_threshold)); @@ -271,55 +466,227 @@ bool ShenandoahAdaptiveHeuristics::should_start_gc() { return true; } } - // Check if allocation headroom is still okay. This also factors in: - // 1. Some space to absorb allocation spikes (ShenandoahAllocSpikeFactor) - // 2. Accumulated penalties from Degenerated and Full GC - size_t allocation_headroom = available; - size_t spike_headroom = capacity / 100 * ShenandoahAllocSpikeFactor; - size_t penalties = capacity / 100 * _gc_time_penalties; + // The test (3 * allocated > available) below is intended to prevent triggers from firing so quickly that there + // has not been sufficient time to create garbage that can be reclaimed during the triggered GC cycle. If we trigger before + // garbage has been created, the concurrent GC will find no garbage. This has been observed to result in degens which + // experience OOM during evac or that experience "bad progress", both of which escalate to Full GC. Note that garbage that + // was allocated following the start of the current GC cycle cannot be reclaimed in this GC cycle. Here is the derivation + // of the expression: + // + // Let R (runway) represent the total amount of memory that can be allocated following the start of GC(N). The runway + // represents memory available at the start of the current GC plus garbage reclaimed by the current GC. In a balanced, + // fully utilized configuration, we will be starting each new GC cycle immediately following completion of the preceding + // GC cycle. In this configuration, we would expect half of R to be consumed during concurrent cycle GC(N) and half + // to be consumed during concurrent GC(N+1). + // + // Assume we want to delay GC trigger until: A/V > 0.33 + // This is equivalent to enforcing that: A > 0.33V + // which is: 3A > V + // Since A+V equals R, we have: A + 3A > A + V = R + // which is to say that: A > R/4 + // + // Postponing the trigger until at least 1/4 of the runway has been consumed helps to improve the efficiency of the + // triggered GC. Under heavy steady state workload, this delay condition generally has no effect: if the allocation + // runway is divided "equally" between the current GC and the next GC, then at any potential trigger point (which cannot + // happen any sooner than completion of the first GC), it is already the case that roughly A > R/2. + if (3 * allocated <= available) { + // Even though we will not issue an adaptive trigger unless a minimum threshold of memory has been allocated, + // we still allow more generic triggers, such as guaranteed GC intervals, to act. + return ShenandoahHeuristics::should_start_gc(); + } - allocation_headroom -= MIN2(allocation_headroom, spike_headroom); - allocation_headroom -= MIN2(allocation_headroom, penalties); + avg_cycle_time = _gc_cycle_time_history->davg() + (_margin_of_error_sd * _gc_cycle_time_history->dsd()); + avg_alloc_rate = _allocation_rate.upper_bound(_margin_of_error_sd); + if ((now - _previous_acceleration_sample_timestamp) >= (ShenandoahAccelerationSamplePeriod / 1000.0)) { + predicted_future_accelerated_gc_time = + predict_gc_time(now + MAX2(get_planned_sleep_interval(), ShenandoahAccelerationSamplePeriod / 1000.0)); + double future_accelerated_planned_gc_time; + bool future_accelerated_planned_gc_time_is_average; + if (predicted_future_accelerated_gc_time > avg_cycle_time) { + future_accelerated_planned_gc_time = predicted_future_accelerated_gc_time; + future_accelerated_planned_gc_time_is_average = false; + } else { + future_accelerated_planned_gc_time = avg_cycle_time; + future_accelerated_planned_gc_time_is_average = true; + } + allocated_bytes_since_last_sample = _free_set->get_bytes_allocated_since_previous_sample(); + instantaneous_rate_words_per_second = + (allocated_bytes_since_last_sample / HeapWordSize) / (now - _previous_acceleration_sample_timestamp); - double avg_cycle_time = _gc_cycle_time_history->davg() + (_margin_of_error_sd * _gc_cycle_time_history->dsd()); - double avg_alloc_rate = _allocation_rate.upper_bound(_margin_of_error_sd); + _previous_acceleration_sample_timestamp = now; + add_rate_to_acceleration_history(now, instantaneous_rate_words_per_second); + current_rate_by_acceleration = instantaneous_rate_words_per_second; + consumption_accelerated = + accelerated_consumption(acceleration, current_rate_by_acceleration, avg_alloc_rate / HeapWordSize, + (ShenandoahAccelerationSamplePeriod / 1000.0) + future_accelerated_planned_gc_time); - log_debug(gc)("average GC time: %.2f ms, allocation rate: %.0f %s/s", - avg_cycle_time * 1000, byte_size_in_proper_unit(avg_alloc_rate), proper_unit_for_byte_size(avg_alloc_rate)); - if (avg_cycle_time * avg_alloc_rate > allocation_headroom) { - log_trigger("Average GC time (%.2f ms) is above the time for average allocation rate (%.0f %sB/s)" - " to deplete free headroom (%zu%s) (margin of error = %.2f)", - avg_cycle_time * 1000, - byte_size_in_proper_unit(avg_alloc_rate), proper_unit_for_byte_size(avg_alloc_rate), - byte_size_in_proper_unit(allocation_headroom), proper_unit_for_byte_size(allocation_headroom), - _margin_of_error_sd); - log_info(gc, ergo)("Free headroom: %zu%s (free) - %zu%s (spike) - %zu%s (penalties) = %zu%s", - byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), - byte_size_in_proper_unit(spike_headroom), proper_unit_for_byte_size(spike_headroom), - byte_size_in_proper_unit(penalties), proper_unit_for_byte_size(penalties), - byte_size_in_proper_unit(allocation_headroom), proper_unit_for_byte_size(allocation_headroom)); + // Note that even a single thread that wakes up and begins to allocate excessively can manifest as accelerating allocation + // rate. This thread will initially allocate a TLAB of minimum size. Then it will allocate a TLAB twice as big a bit later, + // and then twice as big again after another short delay. When a phase change causes many threads to increase their + // allocation behavior, this effect is multiplied, and compounded by jitter in the times that individual threads experience + // the phase change. + // + // The following trace represents an actual workload, with allocation rates sampled at 10 Hz, the default behavior before + // introduction of accelerated allocation rate detection. Though the allocation rate is seen to be increasing at times + // 101.907 and 102.007 and 102.108, the newly sampled allocation rate is not enough to trigger GC because the headroom is + // still quite large. In fact, GC is not triggered until time 102.409s, and this GC degenerates. + // + // Sample Time (s) Allocation Rate (MB/s) Headroom (GB) + // 101.807 0.0 26.93 + // <--- accelerated spike can trigger here, around time 101.9s + // 101.907 477.6 26.85 + // 102.007 3,206.0 26.35 + // 102.108 23,797.8 24.19 + // 102.208 24,164.5 21.83 + // 102.309 23,965.0 19.47 + // 102.409 24,624.35 17.05 <--- without accelerated rate detection, we trigger here + // + // Though the above measurements are from actual workload, the following details regarding sampled allocation rates at 3ms + // period were not measured directly for this run-time sample. These are hypothetical, though they represent a plausible + // result that correlates with the actual measurements. + // + // For most of the 100 ms time span that precedes the sample at 101.907, the allocation rate still remains at zero. The phase + // change that causes increasing allocations occurs near the end ot this time segment. When sampled with a 3 ms period, + // acceration of allocation can be triggered at approximately time 101.88s. + // + // In the default configuration, accelerated allocation rate is detected by examining a sequence of 8 allocation rate samples. + // + // Even a single allocation rate sample above the norm can be interpreted as acceleration of allocation rate. For example, the + // the best-fit line for the following samples has an acceleration rate of 3,553.3 MB/s/s. This is not enough to trigger GC, + // especially given the abundance of Headroom at this moment in time. + // + // TimeStamp (s) Alloc rate (MB/s) + // 101.857 0 + // 101.860 0 + // 101.863 0 + // 101.866 0 + // 101.869 53.3 + // + // At the next sample time, we will compute a slightly higher acceration, 9,150 MB/s/s. This is also insufficient to trigger + // GC. + // + // TimeStamp (s) Alloc rate (MB/s) + // 101.860 0 + // 101.863 0 + // 101.866 0 + // 101.869 53.3 + // 101.872 110.6 + // + // Eventually, we will observe a full history of accelerating rate samples, computing acceleration of 18,500 MB/s/s. This will + // trigger GC over 500 ms earlier than was previously possible. + // + // TimeStamp (s) Alloc rate (MB/s) + // 101.866 0 + // 101.869 53.3 + // 101.872 110.6 + // 101.875 165.9 + // 101.878 221.2 + // + // The accelerated rate heuristic is based on the following idea: + // + // Assume allocation rate is accelerating at a constant rate. If we postpone the spike trigger until the subsequent + // sample point, will there be enough memory to satisfy allocations that occur during the anticipated concurrent GC + // cycle? If not, we should trigger right now. + // + // Outline of this heuristic triggering technique: + // + // 1. We remember the N (e.g. N=3) most recent samples of spike allocation rate r0, r1, r2 samples at t0, t1, and t2 + // 2. if r1 < r0 or r2 < r1, approximate Acceleration = 0.0, Rate = Average(r0, r1, r2) + // 3. Otherwise, use least squares method to compute best-fit line of rate vs time + // 4. The slope of this line represents Acceleration. The y-intercept of this line represents "initial rate" + // 5. Use r2 to rrpresent CurrentRate + // 6. Use Consumption = CurrentRate * GCTime + 1/2 * Acceleration * GCTime * GCTime + // (See High School physics discussions on constant acceleration: D = v0 * t + 1/2 * a * t^2) + // 7. if Consumption exceeds headroom, trigger now + // + // Though larger sample size may improve quality of predictor, it also delays trigger response. Smaller sample sizes + // are more susceptible to false triggers based on random noise. The default configuration uses a sample size of 8 and + // a sample period of roughly 15 ms, spanning approximately 120 ms of execution. + if (consumption_accelerated > allocatable_words) { + size_t size_t_alloc_rate = (size_t) current_rate_by_acceleration * HeapWordSize; + if (acceleration > 0) { + size_t size_t_acceleration = (size_t) acceleration * HeapWordSize; + log_trigger("Accelerated consumption (" PROPERFMT ") exceeds free headroom (" PROPERFMT ") at " + "current rate (" PROPERFMT "/s) with acceleration (" PROPERFMT "/s/s) for planned %s GC time (%.2f ms)", + PROPERFMTARGS(consumption_accelerated * HeapWordSize), + PROPERFMTARGS(allocatable_words * HeapWordSize), + PROPERFMTARGS(size_t_alloc_rate), + PROPERFMTARGS(size_t_acceleration), + future_accelerated_planned_gc_time_is_average? "(from average)": "(by linear prediction)", + future_accelerated_planned_gc_time * 1000); + } else { + log_trigger("Momentary spike consumption (" PROPERFMT ") exceeds free headroom (" PROPERFMT ") at " + "current rate (" PROPERFMT "/s) for planned %s GC time (%.2f ms) (spike threshold = %.2f)", + PROPERFMTARGS(consumption_accelerated * HeapWordSize), + PROPERFMTARGS(allocatable_words * HeapWordSize), + PROPERFMTARGS(size_t_alloc_rate), + future_accelerated_planned_gc_time_is_average? "(from average)": "(by linear prediction)", + future_accelerated_planned_gc_time * 1000, _spike_threshold_sd); + + + } + _spike_acceleration_num_samples = 0; + _spike_acceleration_first_sample_index = 0; + + // Count this as a form of RATE trigger for purposes of adjusting heuristic triggering configuration because this + // trigger is influenced more by margin_of_error_sd than by spike_threshold_sd. + accept_trigger_with_type(RATE); + return true; + } + } + + // Suppose we don't trigger now, but decide to trigger in the next regulator cycle. What will be the GC time then? + predicted_future_gc_time = predict_gc_time(now + get_planned_sleep_interval()); + if (predicted_future_gc_time > avg_cycle_time) { + future_planned_gc_time = predicted_future_gc_time; + future_planned_gc_time_is_average = false; + } else { + future_planned_gc_time = avg_cycle_time; + future_planned_gc_time_is_average = true; + } + + log_debug(gc)("%s: average GC time: %.2f ms, predicted GC time: %.2f ms, allocation rate: %.0f %s/s", + _space_info->name(), avg_cycle_time * 1000, predicted_future_gc_time * 1000, + byte_size_in_proper_unit(avg_alloc_rate), proper_unit_for_byte_size(avg_alloc_rate)); + size_t allocatable_bytes = allocatable_words * HeapWordSize; + avg_time_to_deplete_available = allocatable_bytes / avg_alloc_rate; + + if (future_planned_gc_time > avg_time_to_deplete_available) { + log_trigger("%s GC time (%.2f ms) is above the time for average allocation rate (%.0f %sB/s)" + " to deplete free headroom (%zu%s) (margin of error = %.2f)", + future_planned_gc_time_is_average? "Average": "Linear prediction of", future_planned_gc_time * 1000, + byte_size_in_proper_unit(avg_alloc_rate), proper_unit_for_byte_size(avg_alloc_rate), + byte_size_in_proper_unit(allocatable_bytes), proper_unit_for_byte_size(allocatable_bytes), + _margin_of_error_sd); + + size_t spike_headroom = capacity / 100 * ShenandoahAllocSpikeFactor; + size_t penalties = capacity / 100 * _gc_time_penalties; + size_t allocation_headroom = available; + allocation_headroom -= MIN2(allocation_headroom, spike_headroom); + allocation_headroom -= MIN2(allocation_headroom, penalties); + log_info(gc, ergo)("Free headroom: " PROPERFMT " (free) - " PROPERFMT "(spike) - " PROPERFMT " (penalties) = " PROPERFMT, + PROPERFMTARGS(available), + PROPERFMTARGS(spike_headroom), + PROPERFMTARGS(penalties), + PROPERFMTARGS(allocation_headroom)); accept_trigger_with_type(RATE); return true; } - bool is_spiking = _allocation_rate.is_spiking(rate, _spike_threshold_sd); - if (is_spiking && avg_cycle_time > allocation_headroom / rate) { - log_trigger("Average GC time (%.2f ms) is above the time for instantaneous allocation rate (%.0f %sB/s) to deplete free headroom (%zu%s) (spike threshold = %.2f)", - avg_cycle_time * 1000, - byte_size_in_proper_unit(rate), proper_unit_for_byte_size(rate), - byte_size_in_proper_unit(allocation_headroom), proper_unit_for_byte_size(allocation_headroom), - _spike_threshold_sd); + is_spiking = _allocation_rate.is_spiking(rate, _spike_threshold_sd); + spike_time_to_deplete_available = (rate == 0)? 0: allocatable_bytes / rate; + if (is_spiking && (rate != 0) && (future_planned_gc_time > spike_time_to_deplete_available)) { + log_trigger("%s GC time (%.2f ms) is above the time for instantaneous allocation rate (%.0f %sB/s)" + " to deplete free headroom (%zu%s) (spike threshold = %.2f)", + future_planned_gc_time_is_average? "Average": "Linear prediction of", future_planned_gc_time * 1000, + byte_size_in_proper_unit(rate), proper_unit_for_byte_size(rate), + byte_size_in_proper_unit(allocatable_bytes), proper_unit_for_byte_size(allocatable_bytes), + _spike_threshold_sd); accept_trigger_with_type(SPIKE); return true; } - - if (ShenandoahHeuristics::should_start_gc()) { - _start_gc_is_pending = true; - return true; - } else { - return false; - } + return ShenandoahHeuristics::should_start_gc(); } void ShenandoahAdaptiveHeuristics::adjust_last_trigger_parameters(double amount) { @@ -352,6 +719,112 @@ size_t ShenandoahAdaptiveHeuristics::min_free_threshold() { return ShenandoahHeap::heap()->soft_max_capacity() / 100 * ShenandoahMinFreeThreshold; } +// This is called each time a new rate sample has been gathered, as governed by ShenandoahAccelerationSamplePeriod. +// Unlike traditional calculation of average allocation rate, there is no adjustment for standard deviation of the +// accelerated rate prediction. +size_t ShenandoahAdaptiveHeuristics::accelerated_consumption(double& acceleration, double& current_rate, + double avg_alloc_rate_words_per_second, + double predicted_cycle_time) const +{ + double *x_array = (double *) alloca(ShenandoahRateAccelerationSampleSize * sizeof(double)); + double *y_array = (double *) alloca(ShenandoahRateAccelerationSampleSize * sizeof(double)); + double x_sum = 0.0; + double y_sum = 0.0; + + assert(_spike_acceleration_num_samples > 0, "At minimum, we should have sample from this period"); + + double weighted_average_alloc; + if (_spike_acceleration_num_samples >= ShenandoahRateAccelerationSampleSize) { + double weighted_y_sum = 0; + double total_weight = 0; + double previous_x = 0; + uint delta = _spike_acceleration_num_samples - ShenandoahRateAccelerationSampleSize; + for (uint i = 0; i < ShenandoahRateAccelerationSampleSize; i++) { + uint index = (_spike_acceleration_first_sample_index + delta + i) % _spike_acceleration_buffer_size; + x_array[i] = _spike_acceleration_rate_timestamps[index]; + x_sum += x_array[i]; + y_array[i] = _spike_acceleration_rate_samples[index]; + if (i > 0) { + // first sample not included in weighted average because it has no weight. + double sample_weight = x_array[i] - x_array[i-1]; + weighted_y_sum += y_array[i] * sample_weight; + total_weight += sample_weight; + } + y_sum += y_array[i]; + } + weighted_average_alloc = (total_weight > 0)? weighted_y_sum / total_weight: 0; + } else { + weighted_average_alloc = 0; + } + + double momentary_rate; + if (_spike_acceleration_num_samples > ShenandoahMomentaryAllocationRateSpikeSampleSize) { + // Num samples must be strictly greater than sample size, because we need one extra sample to compute rate and weights + // In this context, the weight of a y value (an allocation rate) is the duration for which this allocation rate was + // active (the time since previous y value was reported). An allocation rate measured over a span of 300 ms (e.g. during + // concurrent GC) has much more "weight" than an allocation rate measured over a span of 15 s. + double weighted_y_sum = 0; + double total_weight = 0; + double sum_for_average = 0.0; + uint delta = _spike_acceleration_num_samples - ShenandoahMomentaryAllocationRateSpikeSampleSize; + for (uint i = 0; i < ShenandoahMomentaryAllocationRateSpikeSampleSize; i++) { + uint sample_index = (_spike_acceleration_first_sample_index + delta + i) % _spike_acceleration_buffer_size; + uint preceding_index = (sample_index == 0)? _spike_acceleration_buffer_size - 1: sample_index - 1; + double sample_weight = (_spike_acceleration_rate_timestamps[sample_index] + - _spike_acceleration_rate_timestamps[preceding_index]); + weighted_y_sum += _spike_acceleration_rate_samples[sample_index] * sample_weight; + total_weight += sample_weight; + } + momentary_rate = weighted_y_sum / total_weight; + bool is_spiking = _allocation_rate.is_spiking(momentary_rate, _spike_threshold_sd); + if (!is_spiking) { + // Disable momentary spike trigger unless allocation rate delta from average exceeds sd + momentary_rate = 0.0; + } + } else { + momentary_rate = 0.0; + } + + // By default, use momentary_rate for current rate and zero acceleration. Overwrite iff best-fit line has positive slope. + current_rate = momentary_rate; + acceleration = 0.0; + if ((_spike_acceleration_num_samples >= ShenandoahRateAccelerationSampleSize) + && (weighted_average_alloc >= avg_alloc_rate_words_per_second)) { + // If the average rate across the acceleration samples is below the overall average, this sample is not eligible to + // represent acceleration of allocation rate. We may just be catching up with allocations after a lull. + + double *xy_array = (double *) alloca(ShenandoahRateAccelerationSampleSize * sizeof(double)); + double *x2_array = (double *) alloca(ShenandoahRateAccelerationSampleSize * sizeof(double)); + double xy_sum = 0.0; + double x2_sum = 0.0; + for (uint i = 0; i < ShenandoahRateAccelerationSampleSize; i++) { + xy_array[i] = x_array[i] * y_array[i]; + xy_sum += xy_array[i]; + x2_array[i] = x_array[i] * x_array[i]; + x2_sum += x2_array[i]; + } + // Find the best-fit least-squares linear representation of rate vs time + double m; /* slope */ + double b; /* y-intercept */ + + m = ((ShenandoahRateAccelerationSampleSize * xy_sum - x_sum * y_sum) + / (ShenandoahRateAccelerationSampleSize * x2_sum - x_sum * x_sum)); + b = (y_sum - m * x_sum) / ShenandoahRateAccelerationSampleSize; + + if (m > 0) { + double proposed_current_rate = m * x_array[ShenandoahRateAccelerationSampleSize - 1] + b; + acceleration = m; + current_rate = proposed_current_rate; + } + // else, leave current_rate = momentary_rate, acceleration = 0 + } + // and here also, leave current_rate = momentary_rate, acceleration = 0 + + double time_delta = get_planned_sleep_interval() + predicted_cycle_time; + size_t words_to_be_consumed = (size_t) (current_rate * time_delta + 0.5 * acceleration * time_delta * time_delta); + return words_to_be_consumed; +} + ShenandoahAllocationRate::ShenandoahAllocationRate() : _last_sample_time(os::elapsedTime()), _last_sample_value(0), @@ -363,7 +836,7 @@ ShenandoahAllocationRate::ShenandoahAllocationRate() : double ShenandoahAllocationRate::force_sample(size_t allocated, size_t &unaccounted_bytes_allocated) { const double MinSampleTime = 0.002; // Do not sample if time since last update is less than 2 ms double now = os::elapsedTime(); - double time_since_last_update = now -_last_sample_time; + double time_since_last_update = now - _last_sample_time; if (time_since_last_update < MinSampleTime) { unaccounted_bytes_allocated = allocated - _last_sample_value; _last_sample_value = 0; @@ -412,8 +885,10 @@ bool ShenandoahAllocationRate::is_spiking(double rate, double threshold) const { double sd = _rate.sd(); if (sd > 0) { - // There is a small chance that that rate has already been sampled, but it - // seems not to matter in practice. + // There is a small chance that that rate has already been sampled, but it seems not to matter in practice. + // Note that z_score reports how close the rate is to the average. A value between -1 and 1 means we are within one + // standard deviation. A value between -3 and +3 means we are within 3 standard deviations. We only check for z_score + // greater than threshold because we are looking for an allocation spike which is greater than the mean. double z_score = (rate - _rate.avg()) / sd; if (z_score > threshold) { return true; diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp index 9b7824a50d7..c761f2a82f3 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp @@ -27,7 +27,9 @@ #define SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHADAPTIVEHEURISTICS_HPP #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" +#include "gc/shenandoah/shenandoahFreeSet.hpp" #include "gc/shenandoah/shenandoahPhaseTimings.hpp" +#include "gc/shenandoah/shenandoahRegulatorThread.hpp" #include "gc/shenandoah/shenandoahSharedVariables.hpp" #include "memory/allocation.hpp" #include "utilities/numberSeq.hpp" @@ -108,6 +110,26 @@ public: virtual ~ShenandoahAdaptiveHeuristics(); + virtual void initialize() override; + + virtual void post_initialize() override; + + virtual void adjust_penalty(intx step) override; + + // At the end of GC(N), we idle GC until necessary to start the next GC. Compute the threshold of memory that can be allocated + // before we need to start the next GC. + void start_idle_span() override; + + // Having observed a new allocation rate sample, add this to the acceleration history so that we can determine if allocation + // rate is accelerating. + void add_rate_to_acceleration_history(double timestamp, double rate); + + // Compute and return the current allocation rate, the current rate of acceleration, and the amount of memory that we expect + // to consume if we start GC right now and gc takes predicted_cycle_time to complete. + size_t accelerated_consumption(double& acceleration, double& current_rate, + double avg_rate_words_per_sec, double predicted_cycle_time) const; + + void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset, RegionData* data, size_t size, size_t actual_free) override; @@ -136,6 +158,8 @@ public: const static double LOWEST_EXPECTED_AVAILABLE_AT_END; const static double HIGHEST_EXPECTED_AVAILABLE_AT_END; + const static size_t GC_TIME_SAMPLE_SIZE; + friend class ShenandoahAllocationRate; // Used to record the last trigger that signaled to start a GC. @@ -150,9 +174,19 @@ public: void adjust_margin_of_error(double amount); void adjust_spike_threshold(double amount); + // Returns number of words that can be allocated before we need to trigger next GC, given available in bytes. + inline size_t allocatable(size_t available) const { + return (available > _headroom_adjustment)? (available - _headroom_adjustment) / HeapWordSize: 0; + } + protected: ShenandoahAllocationRate _allocation_rate; + // Invocations of should_start_gc() happen approximately once per ms. Queries of allocation rate only happen if a + // a certain amount of time has passed since the previous query. + size_t _allocated_at_previous_query; + double _time_of_previous_allocation_query; + // The margin of error expressed in standard deviations to add to our // average cycle time and allocation rate. As this value increases we // tend to overestimate the rate at which mutators will deplete the @@ -179,6 +213,48 @@ protected: // source of feedback to adjust trigger parameters. TruncatedSeq _available; + ShenandoahFreeSet* _free_set; + + // This represents the time at which the allocation rate was most recently sampled for the purpose of detecting acceleration. + double _previous_acceleration_sample_timestamp; + size_t _total_allocations_at_start_of_idle; + + // bytes of headroom at which we should trigger GC + size_t _headroom_adjustment; + + // Keep track of GC_TIME_SAMPLE_SIZE most recent concurrent GC cycle times + uint _gc_time_first_sample_index; + uint _gc_time_num_samples; + double* const _gc_time_timestamps; + double* const _gc_time_samples; + double* const _gc_time_xy; // timestamp * sample + double* const _gc_time_xx; // timestamp squared + double _gc_time_sum_of_timestamps; + double _gc_time_sum_of_samples; + double _gc_time_sum_of_xy; + double _gc_time_sum_of_xx; + + double _gc_time_m; // slope + double _gc_time_b; // y-intercept + double _gc_time_sd; // sd on deviance from prediction + + // In preparation for a span during which GC will be idle, compute the headroom adjustment that will be used to + // detect when GC needs to trigger. + void compute_headroom_adjustment() override; + + void add_gc_time(double timestamp_at_start, double duration); + void add_degenerated_gc_time(double timestamp_at_start, double duration); + double predict_gc_time(double timestamp_at_start); + + // Keep track of SPIKE_ACCELERATION_SAMPLE_SIZE most recent spike allocation rate measurements. Note that it is + // typical to experience a small spike following end of GC cycle, as mutator threads refresh their TLABs. But + // there is generally an abundance of memory at this time as well, so this will not generally trigger GC. + uint _spike_acceleration_buffer_size; + uint _spike_acceleration_first_sample_index; + uint _spike_acceleration_num_samples; + double* const _spike_acceleration_rate_samples; // holds rates in words/second + double* const _spike_acceleration_rate_timestamps; + // A conservative minimum threshold of free space that we'll try to maintain when possible. // For example, we might trigger a concurrent gc if we are likely to drop below // this threshold, or we might consider this when dynamically resizing generations diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp index 029b917deab..594367e2972 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp @@ -25,7 +25,6 @@ #include "gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.hpp" #include "gc/shenandoah/shenandoahCollectionSet.hpp" -#include "gc/shenandoah/shenandoahCollectionSetPreselector.hpp" #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" #include "gc/shenandoah/shenandoahGeneration.hpp" #include "gc/shenandoah/shenandoahGenerationalHeap.inline.hpp" @@ -39,11 +38,6 @@ using idx_t = ShenandoahSimpleBitMap::idx_t; -typedef struct { - ShenandoahHeapRegion* _region; - size_t _live_data; -} AgedRegionData; - static int compare_by_aged_live(AgedRegionData a, AgedRegionData b) { if (a._live_data < b._live_data) return -1; @@ -52,6 +46,12 @@ static int compare_by_aged_live(AgedRegionData a, AgedRegionData b) { return 0; } +void ShenandoahGenerationalHeuristics::post_initialize() { + ShenandoahHeuristics::post_initialize(); + _free_set = ShenandoahHeap::heap()->free_set(); + compute_headroom_adjustment(); +} + inline void assert_no_in_place_promotions() { #ifdef ASSERT class ShenandoahNoInPlacePromotions : public ShenandoahHeapRegionClosure { @@ -69,25 +69,28 @@ ShenandoahGenerationalHeuristics::ShenandoahGenerationalHeuristics(ShenandoahGen : ShenandoahAdaptiveHeuristics(generation), _generation(generation), _add_regions_to_old(0) { } -void ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) { - ShenandoahHeap* heap = ShenandoahHeap::heap(); +void ShenandoahGenerationalHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* collection_set, + RegionData* data, size_t data_size, + size_t free) { + ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap(); _add_regions_to_old = 0; - // Seed the collection set with resource area-allocated - // preselected regions, which are removed when we exit this scope. - ShenandoahCollectionSetPreselector preselector(collection_set, heap->num_regions()); - // Find the amount that will be promoted, regions that will be promoted in // place, and preselected older regions that will be promoted by evacuation. - compute_evacuation_budgets(heap); + ShenandoahInPlacePromotionPlanner in_place_promotions(heap); + compute_evacuation_budgets(in_place_promotions, heap); - // Choose the collection set, including the regions preselected above for promotion into the old generation. - filter_regions(collection_set); + // Call the subclasses to add regions into the collection set. + select_collection_set_regions(collection_set, data, data_size, free); // Even if collection_set->is_empty(), we want to adjust budgets, making reserves available to mutator. adjust_evacuation_budgets(heap, collection_set); + if (collection_set->has_old_regions()) { + heap->shenandoah_policy()->record_mixed_cycle(); + } + if (_generation->is_global()) { // We have just chosen a collection set for a global cycle. The mark bitmap covering old regions is complete, so // the remembered set scan can use that to avoid walking into garbage. When the next old mark begins, we will @@ -98,11 +101,20 @@ void ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectio // coalesce those regions. Only the old regions which are not part of the collection set at this point are // eligible for coalescing. As implemented now, this has the side effect of possibly initiating mixed-evacuations // after a global cycle for old regions that were not included in this collection set. - heap->old_generation()->prepare_for_mixed_collections_after_global_gc(); + heap->old_generation()->transition_old_generation_after_global_gc(); } + + ShenandoahTracer::report_promotion_info(collection_set, + in_place_promotions.humongous_region_stats().count, + in_place_promotions.humongous_region_stats().garbage, + in_place_promotions.humongous_region_stats().free, + in_place_promotions.regular_region_stats().count, + in_place_promotions.regular_region_stats().garbage, + in_place_promotions.regular_region_stats().free); } -void ShenandoahGenerationalHeuristics::compute_evacuation_budgets(ShenandoahHeap* const heap) { +void ShenandoahGenerationalHeuristics::compute_evacuation_budgets(ShenandoahInPlacePromotionPlanner& in_place_promotions, + ShenandoahHeap* const heap) { shenandoah_assert_generational(); ShenandoahOldGeneration* const old_generation = heap->old_generation(); @@ -200,7 +212,7 @@ void ShenandoahGenerationalHeuristics::compute_evacuation_budgets(ShenandoahHeap // If is_global(), we let garbage-first heuristic determine cset membership. Otherwise, we give priority // to tenurable regions by preselecting regions for promotion by evacuation (obtaining the live data to seed promoted_reserve). // This also identifies regions that will be promoted in place. These use the tenuring threshold. - const size_t consumed_by_advance_promotion = select_aged_regions(_generation->is_global()? 0: old_promo_reserve); + const size_t consumed_by_advance_promotion = select_aged_regions(in_place_promotions, _generation->is_global()? 0: old_promo_reserve); assert(consumed_by_advance_promotion <= old_promo_reserve, "Do not promote more than budgeted"); // The young evacuation reserve can be no larger than young_unaffiliated. Planning to evacuate into partially consumed @@ -224,166 +236,48 @@ void ShenandoahGenerationalHeuristics::compute_evacuation_budgets(ShenandoahHeap // case of a GLOBAL gc. During choose_collection_set() of GLOBAL, old will be expanded on demand. } -void ShenandoahGenerationalHeuristics::filter_regions(ShenandoahCollectionSet* collection_set) { - assert(collection_set->is_empty(), "Must be empty"); +void ShenandoahGenerationalHeuristics::add_tenured_regions_to_collection_set(const size_t old_promotion_reserve, + ShenandoahGenerationalHeap *const heap, + size_t candidates, AgedRegionData* sorted_regions) { + size_t old_consumed = 0; + if (candidates > 0) { + // Sort in increasing order according to live data bytes. Note that + // candidates represents the number of regions that qualify to be promoted + // by evacuation. + QuickSort::sort(sorted_regions, candidates, + compare_by_aged_live); - auto heap = ShenandoahGenerationalHeap::heap(); - size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); - - - // Check all pinned regions have updated status before choosing the collection set. - heap->assert_pinned_region_status(_generation); - - // Step 1. Build up the region candidates we care about, rejecting losers and accepting winners right away. - - size_t num_regions = heap->num_regions(); - - RegionData* candidates = _region_data; - - size_t cand_idx = 0; - size_t preselected_candidates = 0; - - size_t total_garbage = 0; - - size_t immediate_garbage = 0; - size_t immediate_regions = 0; - - size_t free = 0; - size_t free_regions = 0; - - // This counts number of humongous regions that we intend to promote in this cycle. - size_t humongous_regions_promoted = 0; - // This counts number of regular regions that will be promoted in place. - size_t regular_regions_promoted_in_place = 0; - // This counts bytes of memory used by regular regions to be promoted in place. - size_t regular_regions_promoted_usage = 0; - // This counts bytes of memory free in regular regions to be promoted in place. - size_t regular_regions_promoted_free = 0; - // This counts bytes of garbage memory in regular regions to be promoted in place. - size_t regular_regions_promoted_garbage = 0; - - for (size_t i = 0; i < num_regions; i++) { - ShenandoahHeapRegion* region = heap->get_region(i); - if (!_generation->contains(region)) { - continue; - } - size_t garbage = region->garbage(); - total_garbage += garbage; - if (region->is_empty()) { - free_regions++; - free += region_size_bytes; - } else if (region->is_regular()) { - if (!region->has_live()) { - // We can recycle it right away and put it in the free set. - immediate_regions++; - immediate_garbage += garbage; - region->make_trash_immediate(); - } else { - bool is_candidate; - // This is our candidate for later consideration. - if (collection_set->is_preselected(i)) { - assert(heap->is_tenurable(region), "Preselection filter"); - is_candidate = true; - preselected_candidates++; - // Set garbage value to maximum value to force this into the sorted collection set. - garbage = region_size_bytes; - } else if (region->is_young() && heap->is_tenurable(region)) { - // Note that for GLOBAL GC, region may be OLD, and OLD regions do not qualify for pre-selection - - // This region is old enough to be promoted but it was not preselected, either because its garbage is below - // old garbage threshold so it will be promoted in place, or because there is not sufficient room - // in old gen to hold the evacuated copies of this region's live data. In both cases, we choose not to - // place this region into the collection set. - if (region->get_top_before_promote() != nullptr) { - // Region was included for promotion-in-place - regular_regions_promoted_in_place++; - regular_regions_promoted_usage += region->used_before_promote(); - regular_regions_promoted_free += region->free(); - regular_regions_promoted_garbage += region->garbage(); - } - is_candidate = false; - } else { - is_candidate = true; - } - if (is_candidate) { - candidates[cand_idx].set_region_and_garbage(region, garbage); - cand_idx++; - } + size_t selected_regions = 0; + size_t selected_live = 0; + for (size_t i = 0; i < candidates; i++) { + ShenandoahHeapRegion *const region = sorted_regions[i]._region; + const size_t region_live_data = sorted_regions[i]._live_data; + const size_t promotion_need = (size_t)(region_live_data * ShenandoahPromoEvacWaste); + if (old_consumed + promotion_need > old_promotion_reserve) { + // We rejected the remaining promotable regions from the collection set + // because we have no room to hold their evacuees. We do not need to + // iterate the remaining regions to estimate the amount we expect to + // promote because we know it directly form the census we computed + // during the preceding mark phase. + break; } - } else if (region->is_humongous_start()) { - // Reclaim humongous regions here, and count them as the immediate garbage -#ifdef ASSERT - bool reg_live = region->has_live(); - bool bm_live = _generation->complete_marking_context()->is_marked(cast_to_oop(region->bottom())); - assert(reg_live == bm_live, - "Humongous liveness and marks should agree. Region live: %s; Bitmap live: %s; Region Live Words: %zu", - BOOL_TO_STR(reg_live), BOOL_TO_STR(bm_live), region->get_live_data_words()); -#endif - if (!region->has_live()) { - heap->trash_humongous_region_at(region); - // Count only the start. Continuations would be counted on "trash" path - immediate_regions++; - immediate_garbage += garbage; - } else { - if (region->is_young() && heap->is_tenurable(region)) { - oop obj = cast_to_oop(region->bottom()); - size_t humongous_regions = ShenandoahHeapRegion::required_regions(obj->size() * HeapWordSize); - humongous_regions_promoted += humongous_regions; - } - } - } else if (region->is_trash()) { - // Count in just trashed collection set, during coalesced CM-with-UR - immediate_regions++; - immediate_garbage += garbage; + old_consumed += promotion_need; + heap->collection_set()->add_region(region); + selected_regions++; + selected_live += region_live_data; } + log_debug(gc, ergo)( "Preselected %zu regions containing " PROPERFMT " live data," + " consuming: " PROPERFMT " of budgeted: " PROPERFMT, + selected_regions, PROPERFMTARGS(selected_live), + PROPERFMTARGS(old_consumed), PROPERFMTARGS(old_promotion_reserve)); } - heap->old_generation()->set_expected_humongous_region_promotions(humongous_regions_promoted); - heap->old_generation()->set_expected_regular_region_promotions(regular_regions_promoted_in_place); - log_info(gc, ergo)("Planning to promote in place %zu humongous regions and %zu" - " regular regions, spanning a total of %zu used bytes", - humongous_regions_promoted, regular_regions_promoted_in_place, - humongous_regions_promoted * ShenandoahHeapRegion::region_size_bytes() + - regular_regions_promoted_usage); - - // Step 2. Look back at garbage statistics, and decide if we want to collect anything, - // given the amount of immediately reclaimable garbage. If we do, figure out the collection set. - - assert (immediate_garbage <= total_garbage, - "Cannot have more immediate garbage than total garbage: %zu%s vs %zu%s", - byte_size_in_proper_unit(immediate_garbage), proper_unit_for_byte_size(immediate_garbage), - byte_size_in_proper_unit(total_garbage), proper_unit_for_byte_size(total_garbage)); - - size_t immediate_percent = (total_garbage == 0) ? 0 : (immediate_garbage * 100 / total_garbage); - bool doing_promote_in_place = (humongous_regions_promoted + regular_regions_promoted_in_place > 0); - - if (doing_promote_in_place || (preselected_candidates > 0) || (immediate_percent <= ShenandoahImmediateThreshold)) { - // Call the subclasses to add young-gen regions into the collection set. - choose_collection_set_from_regiondata(collection_set, candidates, cand_idx, immediate_garbage + free); - } - - if (collection_set->has_old_regions()) { - heap->shenandoah_policy()->record_mixed_cycle(); - } - - collection_set->summarize(total_garbage, immediate_garbage, immediate_regions); - - ShenandoahTracer::report_evacuation_info(collection_set, - free_regions, - humongous_regions_promoted, - regular_regions_promoted_in_place, - regular_regions_promoted_garbage, - regular_regions_promoted_free, - immediate_regions, - immediate_garbage); } -// Preselect for inclusion into the collection set all regions whose age is at or above tenure age and for which the -// garbage percentage exceeds a dynamically adjusted threshold (known as the old-garbage threshold percentage). We -// identify these regions by setting the appropriate entry of the collection set's preselected regions array to true. -// All entries are initialized to false before calling this function. +// Select for inclusion into the collection set all regions whose age is at or +// above tenure age and for which the +// garbage percentage exceeds a dynamically adjusted threshold (known as the old-garbage threshold percentage). // -// During the subsequent selection of the collection set, we give priority to these promotion set candidates. // Without this prioritization, we found that the aged regions tend to be ignored because they typically have // much less garbage and much more live data than the recently allocated "eden" regions. When aged regions are // repeatedly excluded from the collection set, the amount of live memory within the young generation tends to @@ -391,20 +285,17 @@ void ShenandoahGenerationalHeuristics::filter_regions(ShenandoahCollectionSet* c // CPU and wall-clock time. // // A second benefit of treating aged regions differently than other regions during collection set selection is -// that this allows us to more accurately budget memory to hold the results of evacuation. Memory for evacuation -// of aged regions must be reserved in the old generation. Memory for evacuation of all other regions must be +// that this allows us to more accurately budget memory to hold the results of evacuation. Memory for evacuation +// of aged regions must be reserved in the old generation. Memory for evacuation of all other regions must be // reserved in the young generation. -size_t ShenandoahGenerationalHeuristics::select_aged_regions(const size_t old_promotion_reserve) { +size_t ShenandoahGenerationalHeuristics::select_aged_regions(ShenandoahInPlacePromotionPlanner& in_place_promotions, + const size_t old_promotion_reserve) { // There should be no regions configured for subsequent in-place-promotions carried over from the previous cycle. assert_no_in_place_promotions(); auto const heap = ShenandoahGenerationalHeap::heap(); - ShenandoahFreeSet* free_set = heap->free_set(); - bool* const candidate_regions_for_promotion_by_copy = heap->collection_set()->preselected_regions(); - ShenandoahMarkingContext* const ctx = heap->marking_context(); - size_t promo_potential = 0; size_t candidates = 0; // Sort the promotion-eligible regions in order of increasing live-data-bytes so that we can first reclaim regions that require @@ -415,14 +306,21 @@ size_t ShenandoahGenerationalHeuristics::select_aged_regions(const size_t old_pr ResourceMark rm; AgedRegionData* sorted_regions = NEW_RESOURCE_ARRAY(AgedRegionData, num_regions); - ShenandoahInPlacePromotionPlanner in_place_promotions(heap); - for (idx_t i = 0; i < num_regions; i++) { ShenandoahHeapRegion* const r = heap->get_region(i); - if (r->is_empty() || !r->has_live() || !r->is_young() || !r->is_regular()) { - // skip over regions that aren't regular young with some live data + if (r->is_empty() || !r->has_live() || !r->is_young()) { + // skip over regions that aren't young with some live data continue; } + + if (!r->is_regular()) { + if (r->is_humongous_start() && heap->is_tenurable(r)) { + in_place_promotions.prepare(r); + } + // Nothing else to be done for humongous regions + continue; + } + if (heap->is_tenurable(r)) { if (in_place_promotions.is_eligible(r)) { // We prefer to promote this region in place because it has a small amount of garbage and a large usage. @@ -438,66 +336,28 @@ size_t ShenandoahGenerationalHeuristics::select_aged_regions(const size_t old_pr sorted_regions[candidates]._live_data = r->get_live_data_bytes(); candidates++; } - } else { - // We only evacuate & promote objects from regular regions whose garbage() is above old-garbage-threshold. - // Objects in tenure-worthy regions with less garbage are promoted in place. These take a different path to - // old-gen. Regions excluded from promotion because their garbage content is too low (causing us to anticipate that - // the region would be promoted in place) may be eligible for evacuation promotion by the time promotion takes - // place during a subsequent GC pass because more garbage is found within the region between now and then. This - // should not happen if we are properly adapting the tenure age. The theory behind adaptive tenuring threshold - // is to choose the youngest age that demonstrates no "significant" further loss of population since the previous - // age. If not this, we expect the tenure age to demonstrate linear population decay for at least two population - // samples, whereas we expect to observe exponential population decay for ages younger than the tenure age. - // - // In the case that certain regions which were anticipated to be promoted in place need to be promoted by - // evacuation, it may be the case that there is not sufficient reserve within old-gen to hold evacuation of - // these regions. The likely outcome is that these regions will not be selected for evacuation or promotion - // in the current cycle and we will anticipate that they will be promoted in the next cycle. This will cause - // us to reserve more old-gen memory so that these objects can be promoted in the subsequent cycle. - if (heap->is_aging_cycle() && heap->age_census()->is_tenurable(r->age() + 1)) { - if (r->garbage() >= in_place_promotions.old_garbage_threshold()) { - promo_potential += r->get_live_data_bytes(); - } - } } - // Note that we keep going even if one region is excluded from selection. - // Subsequent regions may be selected if they have smaller live data. } - in_place_promotions.update_free_set(); + in_place_promotions.complete_planning(); - // Sort in increasing order according to live data bytes. Note that candidates represents the number of regions - // that qualify to be promoted by evacuation. - size_t old_consumed = 0; - if (candidates > 0) { - size_t selected_regions = 0; - size_t selected_live = 0; - QuickSort::sort(sorted_regions, candidates, compare_by_aged_live); - for (size_t i = 0; i < candidates; i++) { - ShenandoahHeapRegion* const region = sorted_regions[i]._region; - const size_t region_live_data = sorted_regions[i]._live_data; - const size_t promotion_need = (size_t) (region_live_data * ShenandoahPromoEvacWaste); - if (old_consumed + promotion_need <= old_promotion_reserve) { - old_consumed += promotion_need; - candidate_regions_for_promotion_by_copy[region->index()] = true; - selected_regions++; - selected_live += region_live_data; - } else { - // We rejected this promotable region from the collection set because we had no room to hold its copy. - // Add this region to promo potential for next GC. - promo_potential += region_live_data; - assert(!candidate_regions_for_promotion_by_copy[region->index()], "Shouldn't be selected"); - } - // We keep going even if one region is excluded from selection because we need to accumulate all eligible - // regions that are not preselected into promo_potential - } - log_debug(gc, ergo)("Preselected %zu regions containing " PROPERFMT " live data," - " consuming: " PROPERFMT " of budgeted: " PROPERFMT, - selected_regions, PROPERFMTARGS(selected_live), PROPERFMTARGS(old_consumed), PROPERFMTARGS(old_promotion_reserve)); - } + add_tenured_regions_to_collection_set(old_promotion_reserve, heap, candidates, sorted_regions); - log_info(gc, ergo)("Promotion potential of aged regions with sufficient garbage: " PROPERFMT, PROPERFMTARGS(promo_potential)); + const uint tenuring_threshold = heap->age_census()->tenuring_threshold(); + const size_t tenurable_this_cycle = heap->age_census()->get_tenurable_bytes(tenuring_threshold); + const size_t tenurable_next_cycle = heap->age_census()->get_tenurable_bytes(tenuring_threshold - 1); + assert(tenurable_next_cycle >= tenurable_this_cycle, + "Tenurable next cycle (" PROPERFMT ") should include tenurable this cycle (" PROPERFMT ")", + PROPERFMTARGS(tenurable_next_cycle), PROPERFMTARGS(tenurable_this_cycle)); + + const size_t max_promotions = tenurable_this_cycle * ShenandoahPromoEvacWaste; + const size_t old_consumed = MIN2(max_promotions, old_promotion_reserve); + + // Don't include the bytes we expect to promote in this cycle in the next cycle + const size_t promo_potential = (tenurable_next_cycle - tenurable_this_cycle) * ShenandoahPromoEvacWaste; heap->old_generation()->set_promotion_potential(promo_potential); + log_info(gc, ergo)("Promotion potential of aged regions with sufficient garbage: " PROPERFMT, PROPERFMTARGS(promo_potential)); + return old_consumed; } @@ -550,12 +410,16 @@ void ShenandoahGenerationalHeuristics::adjust_evacuation_budgets(ShenandoahHeap* size_t young_evacuated = collection_set->get_live_bytes_in_untenurable_regions(); size_t young_evacuated_reserve_used = (size_t) (ShenandoahEvacWaste * double(young_evacuated)); - size_t total_young_available = young_generation->available_with_reserve() - _add_regions_to_old * region_size_bytes;; + // In top_off_collection_set(), we shrunk planned future reserve by _add_regions_to_old * region_size_bytes, but we + // didn't shrink available. The current reserve is not affected by the planned future reserve. Current available is + // larger than planned available by the planned adjustment amount. + size_t total_young_available = young_generation->available_with_reserve() - _add_regions_to_old * region_size_bytes; + assert(young_evacuated_reserve_used <= total_young_available, "Cannot evacuate (%zu) more than is available in young (%zu)", young_evacuated_reserve_used, total_young_available); young_generation->set_evacuation_reserve(young_evacuated_reserve_used); - // We have not yet rebuilt the free set. Some of the memory that is thought to be avaiable within old may no + // We have not yet rebuilt the free set. Some of the memory that is thought to be available within old may no // longer be available if that memory had been free within regions that were selected for the collection set. // Make the necessary adjustments to old_available. size_t old_available = @@ -634,24 +498,3 @@ void ShenandoahGenerationalHeuristics::adjust_evacuation_budgets(ShenandoahHeap* old_generation->set_promoted_reserve(total_promotion_reserve); old_generation->reset_promoted_expended(); } - -size_t ShenandoahGenerationalHeuristics::add_preselected_regions_to_collection_set(ShenandoahCollectionSet* cset, - const RegionData* data, - size_t size) const { - // cur_young_garbage represents the amount of memory to be reclaimed from young-gen. In the case that live objects - // are known to be promoted out of young-gen, we count this as cur_young_garbage because this memory is reclaimed - // from young-gen and becomes available to serve future young-gen allocation requests. - size_t cur_young_garbage = 0; - for (size_t idx = 0; idx < size; idx++) { - ShenandoahHeapRegion* r = data[idx].get_region(); - if (cset->is_preselected(r->index())) { - assert(ShenandoahGenerationalHeap::heap()->is_tenurable(r), "Preselected regions must have tenure age"); - // Entire region will be promoted, This region does not impact young-gen or old-gen evacuation reserve. - // This region has been pre-selected and its impact on promotion reserve is already accounted for. - cur_young_garbage += r->garbage(); - cset->add_region(r); - } - } - return cur_young_garbage; -} - diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.hpp index 74d657feab7..8ea5cdb36c8 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.hpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.hpp @@ -27,12 +27,18 @@ #include "gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp" +#include "gc/shenandoah/shenandoahInPlacePromoter.hpp" class ShenandoahGeneration; class ShenandoahHeap; class ShenandoahCollectionSet; class RegionData; +typedef struct { + ShenandoahHeapRegion* _region; + size_t _live_data; +} AgedRegionData; + /* * This class serves as the base class for heuristics used to trigger and * choose the collection sets for young and global collections. It leans @@ -47,11 +53,16 @@ class ShenandoahGenerationalHeuristics : public ShenandoahAdaptiveHeuristics { public: explicit ShenandoahGenerationalHeuristics(ShenandoahGeneration* generation); - void choose_collection_set(ShenandoahCollectionSet* collection_set) override; + void post_initialize() override; + + // Wraps budget computation, subclass region selection, budget adjustment, and tracing. + void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set, + RegionData* data, size_t data_size, + size_t free) override; private: // Compute evacuation budgets prior to choosing collection set. - void compute_evacuation_budgets(ShenandoahHeap* const heap); + void compute_evacuation_budgets(ShenandoahInPlacePromotionPlanner& in_place_promotions, ShenandoahHeap* const heap); // Preselect for possible inclusion into the collection set exactly the most // garbage-dense regions, including those that satisfy criteria 1 & 2 below, @@ -68,24 +79,28 @@ private: // regions, which are marked in the preselected_regions() indicator // array of the heap's collection set, which should be initialized // to false. - size_t select_aged_regions(const size_t old_promotion_reserve); + size_t select_aged_regions(ShenandoahInPlacePromotionPlanner& in_place_promotions, const size_t old_promotion_reserve); - // Filter and sort remaining regions before adding to collection set. - void filter_regions(ShenandoahCollectionSet* collection_set); + // Select regions for inclusion in the collection set that are tenured, but do + // not hold enough live data to warrant promotion in place. + void add_tenured_regions_to_collection_set(size_t old_promotion_reserve, + ShenandoahGenerationalHeap *const heap, + size_t candidates, AgedRegionData* sorted_regions); - // Adjust evacuation budgets after choosing collection set. The argument regions_to_xfer + // Adjust evacuation budgets after choosing collection set. On entry, the instance variable _regions_to_xfer // represents regions to be transferred to old based on decisions made in top_off_collection_set() void adjust_evacuation_budgets(ShenandoahHeap* const heap, ShenandoahCollectionSet* const collection_set); protected: + // Subclasses override this to perform generation-specific region selection. + virtual void select_collection_set_regions(ShenandoahCollectionSet* set, + RegionData* data, size_t data_size, + size_t free) = 0; + ShenandoahGeneration* _generation; size_t _add_regions_to_old; - - size_t add_preselected_regions_to_collection_set(ShenandoahCollectionSet* cset, - const RegionData* data, - size_t size) const; }; diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp index dd2ad28aa4b..9452e8b28cb 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp @@ -31,21 +31,94 @@ #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" #include "utilities/quickSort.hpp" +bool ShenandoahEvacuationBudget::try_reserve(size_t bytes) { + size_t new_consumption = _consumed + bytes; + if (new_consumption <= _reserve) { + return true; + } + // Try expanding from shared pool + size_t new_reserve = _reserve; + size_t new_committed = _shared->committed; + while ((new_consumption > new_reserve) && (new_committed < _shared->limit)) { + new_committed += _region_size_bytes; + new_reserve += _region_size_bytes; + } + if (new_consumption <= new_reserve) { + _reserve = new_reserve; + _shared->committed = new_committed; + return true; + } + return false; +} + +void ShenandoahEvacuationBudget::commit(size_t consumption, size_t live) { + _consumed += consumption; + _live_bytes += live; + _region_count++; +} + +ShenandoahGlobalRegionDisposition ShenandoahGlobalCSetBudget::try_add_region( + const ShenandoahGlobalRegionAttributes& region) { + + size_t region_garbage = region.garbage; + size_t new_garbage = _cur_garbage + region_garbage; + bool add_regardless = (region_garbage > _ignore_threshold) && (new_garbage < _min_garbage); + + if (!add_regardless && (region_garbage < _garbage_threshold)) { + return ShenandoahGlobalRegionDisposition::SKIP; + } + + size_t live_bytes = region.live_data_bytes; + + if (region.is_old) { + size_t evac_need = old_evac.anticipated_consumption(live_bytes); + size_t promo_loss = region.free_bytes; + + // Snapshot state for rollback — old branch does two reservations + size_t saved_committed = _shared.committed; + size_t saved_old_reserve = old_evac.reserve(); + size_t saved_promo_reserve = promo.reserve(); + + if (old_evac.try_reserve(evac_need) && promo.try_reserve(promo_loss)) { + old_evac.commit(evac_need, live_bytes); + promo.commit_raw(promo_loss); + _cur_garbage = new_garbage; + return ShenandoahGlobalRegionDisposition::ADD_OLD_EVAC; + } + _shared.committed = saved_committed; + old_evac.set_reserve(saved_old_reserve); + promo.set_reserve(saved_promo_reserve); + return ShenandoahGlobalRegionDisposition::SKIP; + } else if (region.is_tenurable) { + size_t promo_need = promo.anticipated_consumption(live_bytes); + if (promo.try_reserve(promo_need)) { + promo.commit(promo_need, live_bytes); + _cur_garbage = new_garbage; + return ShenandoahGlobalRegionDisposition::ADD_PROMO; + } + return ShenandoahGlobalRegionDisposition::SKIP; + } else { + size_t evac_need = young_evac.anticipated_consumption(live_bytes); + if (young_evac.try_reserve(evac_need)) { + young_evac.commit(evac_need, live_bytes); + _cur_garbage = new_garbage; + return ShenandoahGlobalRegionDisposition::ADD_YOUNG_EVAC; + } + return ShenandoahGlobalRegionDisposition::SKIP; + } +} + ShenandoahGlobalHeuristics::ShenandoahGlobalHeuristics(ShenandoahGlobalGeneration* generation) : ShenandoahGenerationalHeuristics(generation) { } - -void ShenandoahGlobalHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset, - RegionData* data, size_t size, - size_t actual_free) { - // Better select garbage-first regions +void ShenandoahGlobalHeuristics::select_collection_set_regions(ShenandoahCollectionSet* cset, + RegionData* data, size_t size, + size_t actual_free) { QuickSort::sort(data, size, compare_by_garbage); - - choose_global_collection_set(cset, data, size, actual_free, 0 /* cur_young_garbage */); + choose_global_collection_set(cset, data, size, actual_free, 0); } - void ShenandoahGlobalHeuristics::choose_global_collection_set(ShenandoahCollectionSet* cset, const ShenandoahHeuristics::RegionData* data, size_t size, size_t actual_free, @@ -80,8 +153,7 @@ void ShenandoahGlobalHeuristics::choose_global_collection_set(ShenandoahCollecti if (young_evac_reserve > unaffiliated_young_memory) { shared_reserve_regions += unaffiliated_young_regions; } else { - size_t delta_regions = young_evac_reserve / region_size_bytes; - shared_reserve_regions += delta_regions; + shared_reserve_regions += young_evac_reserve / region_size_bytes; } young_evac_reserve = 0; size_t total_old_reserve = old_evac_reserve + old_promo_reserve; @@ -90,24 +162,15 @@ void ShenandoahGlobalHeuristics::choose_global_collection_set(ShenandoahCollecti shared_reserve_regions += unaffiliated_old_regions; old_promo_reserve = total_old_reserve - unaffiliated_old_memory; } else { - size_t delta_regions = old_evac_reserve / region_size_bytes; - shared_reserve_regions += delta_regions; + shared_reserve_regions += old_evac_reserve / region_size_bytes; } old_evac_reserve = 0; assert(shared_reserve_regions <= (heap->young_generation()->free_unaffiliated_regions() + heap->old_generation()->free_unaffiliated_regions()), - "simple math"); - - size_t shared_reserves = shared_reserve_regions * region_size_bytes; - size_t committed_from_shared_reserves = 0; - - size_t promo_bytes = 0; - size_t old_evac_bytes = 0; - size_t young_evac_bytes = 0; - - size_t consumed_by_promo = 0; // promo_bytes * ShenandoahPromoEvacWaste - size_t consumed_by_old_evac = 0; // old_evac_bytes * ShenandoahOldEvacWaste - size_t consumed_by_young_evac = 0; // young_evac_bytes * ShenandoahEvacWaste + "Shared reserve regions (%zu) should not exceed total unaffiliated regions (young: %zu, old: %zu)", + shared_reserve_regions, + heap->young_generation()->free_unaffiliated_regions(), + heap->old_generation()->free_unaffiliated_regions()); // Of the memory reclaimed by GC, some of this will need to be reserved for the next GC collection. Use the current // young reserve as an approximation of the future Collector reserve requirement. Try to end with at least @@ -115,147 +178,93 @@ void ShenandoahGlobalHeuristics::choose_global_collection_set(ShenandoahCollecti size_t free_target = (capacity * ShenandoahMinFreeThreshold) / 100 + original_young_evac_reserve; size_t min_garbage = (free_target > actual_free) ? (free_target - actual_free) : 0; - size_t aged_regions_promoted = 0; - size_t young_regions_evacuated = 0; - size_t old_regions_evacuated = 0; + ShenandoahGlobalCSetBudget budget(region_size_bytes, + shared_reserve_regions * region_size_bytes, + garbage_threshold, ignore_threshold, min_garbage, + young_evac_reserve, ShenandoahEvacWaste, + old_evac_reserve, ShenandoahOldEvacWaste, + old_promo_reserve, ShenandoahPromoEvacWaste); + budget.set_cur_garbage(cur_young_garbage); - log_info(gc, ergo)("Adaptive CSet Selection for GLOBAL. Discretionary evacuation budget (for either old or young): %zu%s" - ", Actual Free: %zu%s.", - byte_size_in_proper_unit(shared_reserves), proper_unit_for_byte_size(shared_reserves), - byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free)); + log_info(gc, ergo)("Adaptive CSet Selection for global cycle. Discretionary evacuation budget (for either old or young): " PROPERFMT ", Actual Free: " PROPERFMT, + PROPERFMTARGS(budget.shared_reserves()), PROPERFMTARGS(actual_free)); - size_t cur_garbage = cur_young_garbage; for (size_t idx = 0; idx < size; idx++) { ShenandoahHeapRegion* r = data[idx].get_region(); - assert(!cset->is_preselected(r->index()), "There should be no preselected regions during GLOBAL GC"); - bool add_region = false; - size_t region_garbage = r->garbage(); - size_t new_garbage = cur_garbage + region_garbage; - bool add_regardless = (region_garbage > ignore_threshold) && (new_garbage < min_garbage); - size_t live_bytes = r->get_live_data_bytes(); - if (add_regardless || (region_garbage >= garbage_threshold)) { - if (r->is_old()) { - size_t anticipated_consumption = (size_t) (live_bytes * ShenandoahOldEvacWaste); - size_t new_old_consumption = consumed_by_old_evac + anticipated_consumption; - size_t new_old_evac_reserve = old_evac_reserve; - size_t proposed_old_region_expansion = 0; - while ((new_old_consumption > new_old_evac_reserve) && (committed_from_shared_reserves < shared_reserves)) { - committed_from_shared_reserves += region_size_bytes; - proposed_old_region_expansion++; - new_old_evac_reserve += region_size_bytes; - } - // If this region has free memory and we choose to place it in the collection set, its free memory is no longer - // available to hold promotion results. So we behave as if its free memory is consumed within the promotion reserve. - size_t anticipated_loss_from_promo_reserve = r->free(); - size_t new_promo_consumption = consumed_by_promo + anticipated_loss_from_promo_reserve; - size_t new_promo_reserve = old_promo_reserve; - while ((new_promo_consumption > new_promo_reserve) && (committed_from_shared_reserves < shared_reserves)) { - committed_from_shared_reserves += region_size_bytes; - proposed_old_region_expansion++; - new_promo_reserve += region_size_bytes; - } - if ((new_old_consumption <= new_old_evac_reserve) && (new_promo_consumption <= new_promo_reserve)) { - add_region = true; - old_evac_reserve = new_old_evac_reserve; - old_promo_reserve = new_promo_reserve; - old_evac_bytes += live_bytes; - consumed_by_old_evac = new_old_consumption; - consumed_by_promo = new_promo_consumption; - cur_garbage = new_garbage; - old_regions_evacuated++; - } else { - // We failed to sufficiently expand old so unwind proposed expansion - committed_from_shared_reserves -= proposed_old_region_expansion * region_size_bytes; - } - } else if (heap->is_tenurable(r)) { - size_t anticipated_consumption = (size_t) (live_bytes * ShenandoahPromoEvacWaste); - size_t new_promo_consumption = consumed_by_promo + anticipated_consumption; - size_t new_promo_reserve = old_promo_reserve; - size_t proposed_old_region_expansion = 0; - while ((new_promo_consumption > new_promo_reserve) && (committed_from_shared_reserves < shared_reserves)) { - committed_from_shared_reserves += region_size_bytes; - proposed_old_region_expansion++; - new_promo_reserve += region_size_bytes; - } - if (new_promo_consumption <= new_promo_reserve) { - add_region = true; - old_promo_reserve = new_promo_reserve; - promo_bytes += live_bytes; - consumed_by_promo = new_promo_consumption; - cur_garbage = new_garbage; - aged_regions_promoted++; - } else { - // We failed to sufficiently expand old so unwind proposed expansion - committed_from_shared_reserves -= proposed_old_region_expansion * region_size_bytes; - } - } else { - assert(r->is_young() && !heap->is_tenurable(r), "DeMorgan's law (assuming r->is_affiliated)"); - size_t anticipated_consumption = (size_t) (live_bytes * ShenandoahEvacWaste); - size_t new_young_evac_consumption = consumed_by_young_evac + anticipated_consumption; - size_t new_young_evac_reserve = young_evac_reserve; - size_t proposed_young_region_expansion = 0; - while ((new_young_evac_consumption > new_young_evac_reserve) && (committed_from_shared_reserves < shared_reserves)) { - committed_from_shared_reserves += region_size_bytes; - proposed_young_region_expansion++; - new_young_evac_reserve += region_size_bytes; - } - if (new_young_evac_consumption <= new_young_evac_reserve) { - add_region = true; - young_evac_reserve = new_young_evac_reserve; - young_evac_bytes += live_bytes; - consumed_by_young_evac = new_young_evac_consumption; - cur_garbage = new_garbage; - young_regions_evacuated++; - } else { - // We failed to sufficiently expand old so unwind proposed expansion - committed_from_shared_reserves -= proposed_young_region_expansion * region_size_bytes; - } - } + if (cset->is_in(r) || r->get_top_before_promote() != nullptr) { + assert(heap->is_tenurable(r), "Region %zu already selected for promotion must be tenurable", idx); + continue; } - if (add_region) { + + ShenandoahGlobalRegionAttributes attrs; + attrs.garbage = r->garbage(); + attrs.live_data_bytes = r->get_live_data_bytes(); + attrs.free_bytes = r->free(); + attrs.is_old = r->is_old(); + attrs.is_tenurable = !r->is_old() && heap->is_tenurable(r); + + if (budget.try_add_region(attrs) != ShenandoahGlobalRegionDisposition::SKIP) { cset->add_region(r); } } - if (committed_from_shared_reserves < shared_reserves) { - // Give all the rest to promotion - old_promo_reserve += (shared_reserves - committed_from_shared_reserves); - // dead code: committed_from_shared_reserves = shared_reserves; - } + budget.finish(); - // Consider the effects of round-off: - // 1. We know that the sum over each evacuation mutiplied by Evacuation Waste is <= total evacuation reserve - // 2. However, the reserve for each individual evacuation may be rounded down. In the worst case, we will be over budget - // by the number of regions evacuated, since each region's reserve might be under-estimated by at most 1 - // 3. Likewise, if we take the sum of bytes evacuated and multiply this by the Evacuation Waste and then round down - // to nearest integer, the calculated reserve will underestimate the true reserve needs by at most 1. - // 4. This explains the adjustments to subtotals in the assert statements below. - assert(young_evac_bytes * ShenandoahEvacWaste <= young_evac_reserve + young_regions_evacuated, - "budget: %zu <= %zu", (size_t) (young_evac_bytes * ShenandoahEvacWaste), young_evac_reserve); - assert(old_evac_bytes * ShenandoahOldEvacWaste <= old_evac_reserve + old_regions_evacuated, - "budget: %zu <= %zu", (size_t) (old_evac_bytes * ShenandoahOldEvacWaste), old_evac_reserve); - assert(promo_bytes * ShenandoahPromoEvacWaste <= old_promo_reserve + aged_regions_promoted, - "budget: %zu <= %zu", (size_t) (promo_bytes * ShenandoahPromoEvacWaste), old_promo_reserve); - assert(young_evac_reserve + old_evac_reserve + old_promo_reserve <= - heap->young_generation()->get_evacuation_reserve() + heap->old_generation()->get_evacuation_reserve() + - heap->old_generation()->get_promoted_reserve(), "Exceeded budget"); + DEBUG_ONLY(budget.assert_budget_constraints_hold( + heap->young_generation()->get_evacuation_reserve() + + heap->old_generation()->get_evacuation_reserve() + + heap->old_generation()->get_promoted_reserve())); - if (heap->young_generation()->get_evacuation_reserve() < young_evac_reserve) { - size_t delta_bytes = young_evac_reserve - heap->young_generation()->get_evacuation_reserve(); + if (heap->young_generation()->get_evacuation_reserve() < budget.young_evac.reserve()) { + size_t delta_bytes = budget.young_evac.reserve() - heap->young_generation()->get_evacuation_reserve(); size_t delta_regions = delta_bytes / region_size_bytes; size_t regions_to_transfer = MIN2(unaffiliated_old_regions, delta_regions); log_info(gc)("Global GC moves %zu unaffiliated regions from old collector to young collector reserves", regions_to_transfer); ssize_t negated_regions = -regions_to_transfer; heap->free_set()->move_unaffiliated_regions_from_collector_to_old_collector(negated_regions); - } else if (heap->young_generation()->get_evacuation_reserve() > young_evac_reserve) { - size_t delta_bytes = heap->young_generation()->get_evacuation_reserve() - young_evac_reserve; + } else if (heap->young_generation()->get_evacuation_reserve() > budget.young_evac.reserve()) { + size_t delta_bytes = heap->young_generation()->get_evacuation_reserve() - budget.young_evac.reserve(); size_t delta_regions = delta_bytes / region_size_bytes; size_t regions_to_transfer = MIN2(unaffiliated_young_regions, delta_regions); log_info(gc)("Global GC moves %zu unaffiliated regions from young collector to old collector reserves", regions_to_transfer); heap->free_set()->move_unaffiliated_regions_from_collector_to_old_collector(regions_to_transfer); } - heap->young_generation()->set_evacuation_reserve(young_evac_reserve); - heap->old_generation()->set_evacuation_reserve(old_evac_reserve); - heap->old_generation()->set_promoted_reserve(old_promo_reserve); + heap->young_generation()->set_evacuation_reserve(budget.young_evac.reserve()); + heap->old_generation()->set_evacuation_reserve(budget.old_evac.reserve()); + heap->old_generation()->set_promoted_reserve(budget.promo.reserve()); } + +#ifdef ASSERT +void ShenandoahGlobalCSetBudget::assert_budget_constraints_hold(size_t original_total_reserves) const { + // Consider the effects of round-off: + // 1. We know that the sum over each evacuation multiplied by Evacuation Waste is <= total evacuation reserve + // 2. However, the reserve for each individual evacuation may be rounded down. In the worst case, we will be + // over budget by the number of regions evacuated, since each region's reserve might be under-estimated by + // at most 1. + // 3. Likewise, if we take the sum of bytes evacuated and multiply this by the Evacuation Waste and then round + // down to nearest integer, the calculated reserve will underestimate the true reserve needs by at most 1. + // 4. This explains the adjustments to subtotals in the assert statements below. + assert(young_evac.live_bytes() * young_evac.waste_factor() <= + young_evac.reserve() + young_evac.region_count(), + "Young evac consumption (%zu) exceeds reserve (%zu) + region count (%zu)", + (size_t)(young_evac.live_bytes() * young_evac.waste_factor()), + young_evac.reserve(), young_evac.region_count()); + assert(old_evac.live_bytes() * old_evac.waste_factor() <= + old_evac.reserve() + old_evac.region_count(), + "Old evac consumption (%zu) exceeds reserve (%zu) + region count (%zu)", + (size_t)(old_evac.live_bytes() * old_evac.waste_factor()), + old_evac.reserve(), old_evac.region_count()); + assert(promo.live_bytes() * promo.waste_factor() <= + promo.reserve() + promo.region_count(), + "Promo consumption (%zu) exceeds reserve (%zu) + region count (%zu)", + (size_t)(promo.live_bytes() * promo.waste_factor()), + promo.reserve(), promo.region_count()); + + size_t total_post_reserves = young_evac.reserve() + old_evac.reserve() + promo.reserve(); + assert(total_post_reserves <= original_total_reserves, + "Total post-cset reserves (%zu + %zu + %zu = %zu) exceed original reserves (%zu)", + young_evac.reserve(), old_evac.reserve(), promo.reserve(), + total_post_reserves, original_total_reserves); +} +#endif diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.hpp index 1f95f75c521..1e96a665704 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.hpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.hpp @@ -25,23 +25,145 @@ #ifndef SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHGLOBALHEURISTICS_HPP #define SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHGLOBALHEURISTICS_HPP - #include "gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.hpp" class ShenandoahGlobalGeneration; -/* - * This is a specialization of the generational heuristics which is aware - * of old and young regions and respects the configured evacuation parameters - * for such regions during a global collection of a generational heap. - */ +enum class ShenandoahGlobalRegionDisposition { + SKIP, + ADD_OLD_EVAC, + ADD_PROMO, + ADD_YOUNG_EVAC +}; + +// A shared pool of evacuation reserves that can be drawn from by any +// evacuation category. Owned by ShenandoahGlobalCSetBudget; each +// ShenandoahEvacuationBudget holds a pointer to it. +struct ShenandoahSharedEvacReserve { + size_t limit; + size_t committed; + + ShenandoahSharedEvacReserve(size_t limit) : limit(limit), committed(0) {} +}; + +// Tracks the budget for a single evacuation category. +class ShenandoahEvacuationBudget { + size_t _reserve; + size_t _consumed; + size_t _live_bytes; + size_t _region_count; + size_t _region_size_bytes; + double _waste_factor; + ShenandoahSharedEvacReserve* _shared; + +public: + ShenandoahEvacuationBudget(size_t reserve, double waste_factor, + size_t region_size_bytes, + ShenandoahSharedEvacReserve* shared) + : _reserve(reserve), _consumed(0), _live_bytes(0), + _region_count(0), _region_size_bytes(region_size_bytes), + _waste_factor(waste_factor), _shared(shared) {} + + size_t anticipated_consumption(size_t live_bytes) const { + return (size_t)(live_bytes * _waste_factor); + } + + // Try to reserve 'bytes' from this budget, expanding from the shared + // pool if necessary. On success, updates _reserve and shared->committed + // and returns true. On failure, nothing is modified. + bool try_reserve(size_t bytes); + + // Record that a region was accepted. + void commit(size_t consumption, size_t live_bytes); + + // Record a raw consumption (e.g. free bytes lost from promo reserve). + void commit_raw(size_t bytes) { _consumed += bytes; } + + size_t reserve() const { return _reserve; } + size_t consumed() const { return _consumed; } + size_t live_bytes() const { return _live_bytes; } + size_t region_count() const { return _region_count; } + double waste_factor() const { return _waste_factor; } + + void add_to_reserve(size_t bytes) { _reserve += bytes; } + void set_reserve(size_t bytes) { _reserve = bytes; } +}; + +// These are the attributes of a region required to decide if it can be +// added to the collection set or not. +struct ShenandoahGlobalRegionAttributes { + size_t garbage; + size_t live_data_bytes; + size_t free_bytes; + bool is_old; + bool is_tenurable; +}; + +// This class consolidates all of the data required to build a global +// collection set. Critically, it takes no dependencies on any classes +// that themselves depend on ShenandoahHeap. This makes it possible to +// write extensive unit tests for this complex code. +class ShenandoahGlobalCSetBudget { + size_t _region_size_bytes; + size_t _garbage_threshold; + size_t _ignore_threshold; + size_t _min_garbage; + size_t _cur_garbage; + + ShenandoahSharedEvacReserve _shared; + +public: + ShenandoahEvacuationBudget young_evac; + ShenandoahEvacuationBudget old_evac; + ShenandoahEvacuationBudget promo; + + ShenandoahGlobalCSetBudget(size_t region_size_bytes, + size_t shared_reserves, + size_t garbage_threshold, + size_t ignore_threshold, + size_t min_garbage, + size_t young_evac_reserve, double young_waste, + size_t old_evac_reserve, double old_waste, + size_t promo_reserve, double promo_waste) + : _region_size_bytes(region_size_bytes), + _garbage_threshold(garbage_threshold), + _ignore_threshold(ignore_threshold), + _min_garbage(min_garbage), + _cur_garbage(0), + _shared(shared_reserves), + young_evac(young_evac_reserve, young_waste, region_size_bytes, &_shared), + old_evac(old_evac_reserve, old_waste, region_size_bytes, &_shared), + promo(promo_reserve, promo_waste, region_size_bytes, &_shared) {} + + ShenandoahGlobalRegionDisposition try_add_region(const ShenandoahGlobalRegionAttributes& region); + + // Any remaining shared budget is given to the promotion reserve. + void finish() { + if (_shared.committed < _shared.limit) { + promo.add_to_reserve(_shared.limit - _shared.committed); + } + } + + // Verify that the budget invariants hold after collection set selection. + // original_total_reserves is the sum of the young, old, and promo evacuation + // reserves as they were before the budget was constructed. + DEBUG_ONLY(void assert_budget_constraints_hold(size_t original_total_reserves) const;) + + size_t region_size_bytes() const { return _region_size_bytes; } + size_t shared_reserves() const { return _shared.limit; } + size_t committed_from_shared() const { return _shared.committed; } + size_t cur_garbage() const { return _cur_garbage; } + + void set_cur_garbage(size_t g) { _cur_garbage = g; } +}; + class ShenandoahGlobalHeuristics : public ShenandoahGenerationalHeuristics { public: ShenandoahGlobalHeuristics(ShenandoahGlobalGeneration* generation); - void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset, - RegionData* data, size_t size, - size_t actual_free) override; + void select_collection_set_regions(ShenandoahCollectionSet* cset, + RegionData* data, size_t size, + size_t actual_free) override; private: void choose_global_collection_set(ShenandoahCollectionSet* cset, @@ -50,5 +172,4 @@ private: size_t cur_young_garbage) const; }; - #endif // SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHGLOBALHEURISTICS_HPP diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.cpp index 8fc744112bf..3091b19b600 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.cpp @@ -29,6 +29,7 @@ #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" +#include "gc/shenandoah/shenandoahTrace.hpp" #include "logging/log.hpp" #include "logging/logTag.hpp" #include "runtime/globals_extension.hpp" @@ -46,13 +47,16 @@ int ShenandoahHeuristics::compare_by_garbage(RegionData a, RegionData b) { } ShenandoahHeuristics::ShenandoahHeuristics(ShenandoahSpaceInfo* space_info) : + _most_recent_trigger_evaluation_time(os::elapsedTime()), + _most_recent_planned_sleep_interval(0.0), _start_gc_is_pending(false), _declined_trigger_count(0), _most_recent_declined_trigger_count(0), _space_info(space_info), _region_data(nullptr), _guaranteed_gc_interval(0), - _cycle_start(os::elapsedTime()), + _precursor_cycle_start(os::elapsedTime()), + _cycle_start(_precursor_cycle_start), _last_cycle_end(0), _gc_times_learned(0), _gc_time_penalties(0), @@ -76,10 +80,6 @@ void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collec ShenandoahHeap* heap = ShenandoahHeap::heap(); assert(collection_set->is_empty(), "Must be empty"); - assert(!heap->mode()->is_generational(), "Wrong heuristic for heap mode"); - - // Check all pinned regions have updated status before choosing the collection set. - heap->assert_pinned_region_status(); // Step 1. Build up the region candidates we care about, rejecting losers and accepting winners right away. @@ -100,6 +100,10 @@ void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collec for (size_t i = 0; i < num_regions; i++) { ShenandoahHeapRegion* region = heap->get_region(i); + if (!_space_info->contains(region)) { + continue; + } + size_t garbage = region->garbage(); total_garbage += garbage; @@ -114,18 +118,14 @@ void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collec region->make_trash_immediate(); } else { // This is our candidate for later consideration. + assert(region->get_top_before_promote() == nullptr, + "Cannot add region %zu scheduled for in-place-promotion to the collection set", i); candidates[cand_idx].set_region_and_garbage(region, garbage); cand_idx++; } } else if (region->is_humongous_start()) { // Reclaim humongous regions here, and count them as the immediate garbage -#ifdef ASSERT - bool reg_live = region->has_live(); - bool bm_live = heap->global_generation()->complete_marking_context()->is_marked(cast_to_oop(region->bottom())); - assert(reg_live == bm_live, - "Humongous liveness and marks should agree. Region live: %s; Bitmap live: %s; Region Live Words: %zu", - BOOL_TO_STR(reg_live), BOOL_TO_STR(bm_live), region->get_live_data_words()); -#endif + DEBUG_ONLY(assert_humongous_mark_consistency(region)); if (!region->has_live()) { heap->trash_humongous_region_at(region); @@ -134,7 +134,7 @@ void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collec immediate_garbage += garbage; } } else if (region->is_trash()) { - // Count in just trashed collection set, during coalesced CM-with-UR + // Count in just trashed humongous continuation regions immediate_regions++; immediate_garbage += garbage; } @@ -142,18 +142,30 @@ void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collec // Step 2. Look back at garbage statistics, and decide if we want to collect anything, // given the amount of immediately reclaimable garbage. If we do, figure out the collection set. + assert(immediate_garbage <= total_garbage, + "Cannot have more immediate garbage than total garbage: " PROPERFMT " vs " PROPERFMT, + PROPERFMTARGS(immediate_garbage), PROPERFMTARGS(total_garbage)); - assert (immediate_garbage <= total_garbage, - "Cannot have more immediate garbage than total garbage: %zu%s vs %zu%s", - byte_size_in_proper_unit(immediate_garbage), proper_unit_for_byte_size(immediate_garbage), - byte_size_in_proper_unit(total_garbage), proper_unit_for_byte_size(total_garbage)); - - size_t immediate_percent = (total_garbage == 0) ? 0 : (immediate_garbage * 100 / total_garbage); + const size_t immediate_percent = (total_garbage == 0) ? 0 : (immediate_garbage * 100 / total_garbage); if (immediate_percent <= ShenandoahImmediateThreshold) { choose_collection_set_from_regiondata(collection_set, candidates, cand_idx, immediate_garbage + free); } collection_set->summarize(total_garbage, immediate_garbage, immediate_regions); + ShenandoahTracer::report_evacuation_info(collection_set, free_regions, immediate_regions, immediate_garbage); +} + +void ShenandoahHeuristics::start_idle_span() { + // do nothing +} + +void ShenandoahHeuristics::record_degenerated_cycle_start(bool out_of_cycle) { + if (out_of_cycle) { + _precursor_cycle_start = _cycle_start = os::elapsedTime(); + } else { + _precursor_cycle_start = _cycle_start; + _cycle_start = os::elapsedTime(); + } } void ShenandoahHeuristics::record_cycle_start() { @@ -197,7 +209,6 @@ bool ShenandoahHeuristics::should_degenerate_cycle() { void ShenandoahHeuristics::adjust_penalty(intx step) { assert(0 <= _gc_time_penalties && _gc_time_penalties <= 100, "In range before adjustment: %zd", _gc_time_penalties); - if ((_most_recent_declined_trigger_count <= Penalty_Free_Declinations) && (step > 0)) { // Don't penalize if heuristics are not responsible for a negative outcome. Allow Penalty_Free_Declinations following // previous GC for self calibration without penalty. @@ -274,6 +285,30 @@ void ShenandoahHeuristics::initialize() { // Nothing to do by default. } +void ShenandoahHeuristics::post_initialize() { + // Nothing to do by default. +} + double ShenandoahHeuristics::elapsed_cycle_time() const { return os::elapsedTime() - _cycle_start; } + + +// Includes the time spent in abandoned concurrent GC cycle that may have triggered this degenerated cycle. +double ShenandoahHeuristics::elapsed_degenerated_cycle_time() const { + double now = os::elapsedTime(); + return now - _precursor_cycle_start; +} + +#ifdef ASSERT +void ShenandoahHeuristics::assert_humongous_mark_consistency(ShenandoahHeapRegion* region) { + assert(region->is_humongous(), "Region %zu must be humongous", region->index()); + const oop humongous_oop = cast_to_oop(region->bottom()); + ShenandoahGeneration* generation = ShenandoahHeap::heap()->generation_for(region->affiliation()); + const bool bm_live = generation->complete_marking_context()->is_marked(humongous_oop); + const bool reg_live = region->has_live(); + assert(reg_live == bm_live, + "Humongous liveness and marks should agree. Region live: %s; Bitmap live: %s; Region Live Words: %zu", + BOOL_TO_STR(reg_live), BOOL_TO_STR(bm_live), region->get_live_data_words()); +} +#endif diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.hpp index 633c4e87126..9066cdfccac 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.hpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.hpp @@ -78,6 +78,10 @@ class ShenandoahHeuristics : public CHeapObj { }; #endif +private: + double _most_recent_trigger_evaluation_time; + double _most_recent_planned_sleep_interval; + protected: static const uint Moving_Average_Samples = 10; // Number of samples to store in moving averages @@ -85,14 +89,13 @@ protected: size_t _declined_trigger_count; // This counts how many times since previous GC finished that this // heuristic has answered false to should_start_gc(). size_t _most_recent_declined_trigger_count; - ; // This represents the value of _declined_trigger_count as captured at the + // This represents the value of _declined_trigger_count as captured at the // moment the most recent GC effort was triggered. In case the most recent // concurrent GC effort degenerates, the value of this variable allows us to // differentiate between degeneration because heuristic was overly optimistic // in delaying the trigger vs. degeneration for other reasons (such as the // most recent GC triggered "immediately" after previous GC finished, but the // free headroom has already been depleted). - class RegionData { private: ShenandoahHeapRegion* _region; @@ -103,6 +106,7 @@ protected: #ifdef ASSERT UnionTag _union_tag; #endif + public: inline void clear() { @@ -171,6 +175,7 @@ protected: size_t _guaranteed_gc_interval; + double _precursor_cycle_start; double _cycle_start; double _last_cycle_end; @@ -188,7 +193,7 @@ protected: RegionData* data, size_t data_size, size_t free) = 0; - void adjust_penalty(intx step); + virtual void adjust_penalty(intx step); inline void accept_trigger() { _most_recent_declined_trigger_count = _declined_trigger_count; @@ -200,6 +205,14 @@ protected: _declined_trigger_count++; } + inline double get_most_recent_wake_time() const { + return _most_recent_trigger_evaluation_time; + } + + inline double get_planned_sleep_interval() const { + return _most_recent_planned_sleep_interval; + } + public: ShenandoahHeuristics(ShenandoahSpaceInfo* space_info); virtual ~ShenandoahHeuristics(); @@ -212,10 +225,22 @@ public: _guaranteed_gc_interval = guaranteed_gc_interval; } + virtual void start_idle_span(); + virtual void compute_headroom_adjustment() { + // Default implementation does nothing. + } + virtual void record_cycle_start(); + void record_degenerated_cycle_start(bool out_of_cycle); + virtual void record_cycle_end(); + void update_should_start_query_times(double now, double planned_sleep_interval) { + _most_recent_trigger_evaluation_time = now; + _most_recent_planned_sleep_interval = planned_sleep_interval; + } + virtual bool should_start_gc(); inline void cancel_trigger_request() { @@ -248,8 +273,10 @@ public: virtual bool is_diagnostic() = 0; virtual bool is_experimental() = 0; virtual void initialize(); + virtual void post_initialize(); double elapsed_cycle_time() const; + double elapsed_degenerated_cycle_time() const; virtual size_t force_alloc_rate_sample(size_t bytes_allocated) { // do nothing @@ -258,6 +285,8 @@ public: // Format prefix and emit log message indicating a GC cycle hs been triggered void log_trigger(const char* fmt, ...) ATTRIBUTE_PRINTF(2, 3); + + DEBUG_ONLY(static void assert_humongous_mark_consistency(ShenandoahHeapRegion* region)); }; #endif // SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHHEURISTICS_HPP diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp index e0cab781674..0789fd5cb1c 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp @@ -79,7 +79,6 @@ ShenandoahOldHeuristics::ShenandoahOldHeuristics(ShenandoahOldGeneration* genera } bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* collection_set) { - _mixed_evac_cset = collection_set; _included_old_regions = 0; _evacuated_old_bytes = 0; _collected_old_bytes = 0; @@ -106,10 +105,6 @@ bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* coll _first_pinned_candidate = NOT_FOUND; - uint included_old_regions = 0; - size_t evacuated_old_bytes = 0; - size_t collected_old_bytes = 0; - // If a region is put into the collection set, then this region's free (not yet used) bytes are no longer // "available" to hold the results of other evacuations. This may cause a decrease in the remaining amount // of memory that can still be evacuated. We address this by reducing the evacuation budget by the amount @@ -152,7 +147,7 @@ bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* coll log_debug(gc)("Choose old regions for mixed collection: old evacuation budget: " PROPERFMT ", candidates: %u", PROPERFMTARGS(_old_evacuation_budget), unprocessed_old_collection_candidates()); - return add_old_regions_to_cset(); + return add_old_regions_to_cset(collection_set); } bool ShenandoahOldHeuristics::all_candidates_are_pinned() { @@ -226,7 +221,7 @@ void ShenandoahOldHeuristics::slide_pinned_regions_to_front() { _next_old_collection_candidate = write_index + 1; } -bool ShenandoahOldHeuristics::add_old_regions_to_cset() { +bool ShenandoahOldHeuristics::add_old_regions_to_cset(ShenandoahCollectionSet* collection_set) { if (unprocessed_old_collection_candidates() == 0) { return false; } @@ -310,7 +305,7 @@ bool ShenandoahOldHeuristics::add_old_regions_to_cset() { break; } } - _mixed_evac_cset->add_region(r); + collection_set->add_region(r); _included_old_regions++; _evacuated_old_bytes += live_data_for_evacuation; _collected_old_bytes += r->garbage(); @@ -356,7 +351,7 @@ bool ShenandoahOldHeuristics::finalize_mixed_evacs() { return (_included_old_regions > 0); } -bool ShenandoahOldHeuristics::top_off_collection_set(size_t &add_regions_to_old) { +bool ShenandoahOldHeuristics::top_off_collection_set(ShenandoahCollectionSet* collection_set, size_t &add_regions_to_old) { if (unprocessed_old_collection_candidates() == 0) { add_regions_to_old = 0; return false; @@ -367,15 +362,13 @@ bool ShenandoahOldHeuristics::top_off_collection_set(size_t &add_regions_to_old) // We have budgeted to assure the live_bytes_in_tenurable_regions() get evacuated into old generation. Young reserves // only for untenurable region evacuations. - size_t planned_young_evac = _mixed_evac_cset->get_live_bytes_in_untenurable_regions(); + size_t planned_young_evac = collection_set->get_live_bytes_in_untenurable_regions(); size_t consumed_from_young_cset = (size_t) (planned_young_evac * ShenandoahEvacWaste); size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); - size_t regions_required_for_collector_reserve = (consumed_from_young_cset + region_size_bytes - 1) / region_size_bytes; assert(consumed_from_young_cset <= max_young_cset, "sanity"); assert(max_young_cset <= young_unaffiliated_regions * region_size_bytes, "sanity"); - size_t regions_for_old_expansion; if (consumed_from_young_cset < max_young_cset) { size_t excess_young_reserves = max_young_cset - consumed_from_young_cset; @@ -398,8 +391,11 @@ bool ShenandoahOldHeuristics::top_off_collection_set(size_t &add_regions_to_old) _unspent_unfragmented_old_budget += supplement_without_waste; _old_generation->augment_evacuation_reserve(budget_supplement); young_generation->set_evacuation_reserve(max_young_cset - budget_supplement); - - return add_old_regions_to_cset(); + assert(young_generation->get_evacuation_reserve() >= + collection_set->get_live_bytes_in_untenurable_regions() * ShenandoahEvacWaste, + "adjusted evac reserve (%zu) must be large enough for planned evacuation (%zu)", + young_generation->get_evacuation_reserve(), collection_set->get_live_bytes_in_untenurable_regions()); + return add_old_regions_to_cset(collection_set); } else { add_regions_to_old = 0; return false; @@ -580,7 +576,7 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() { } else if (has_coalesce_and_fill_candidates()) { _old_generation->transition_to(ShenandoahOldGeneration::FILLING); } else { - _old_generation->transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP); + _old_generation->transition_to(ShenandoahOldGeneration::IDLE); } } diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp index e657ac58ae4..04a92d28248 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp @@ -106,7 +106,6 @@ private: // when client code invokes prime_collection_set(). They are consulted, and sometimes modified, when client code // calls top_off_collection_set() to possibly expand the number of old-gen regions in a mixed evacuation cset, and by // finalize_mixed_evacs(), which prepares the way for mixed evacuations to begin. - ShenandoahCollectionSet* _mixed_evac_cset; size_t _evacuated_old_bytes; size_t _collected_old_bytes; size_t _included_old_regions; @@ -163,7 +162,7 @@ private: // a conservative old evacuation budget, and the second time with a larger more aggressive old evacuation budget. Returns // true iff we need to finalize mixed evacs. (If no regions are added to the collection set, there is no need to finalize // mixed evacuations.) - bool add_old_regions_to_cset(); + bool add_old_regions_to_cset(ShenandoahCollectionSet* collection_set); public: explicit ShenandoahOldHeuristics(ShenandoahOldGeneration* generation, ShenandoahGenerationalHeap* gen_heap); @@ -180,7 +179,7 @@ public: // evacuation candidate regions into the collection set as will fit within this excess repurposed reserved. // Returns true iff we need to finalize mixed evacs. Upon return, the var parameter regions_to_xfer holds the // number of regions to transfer from young to old. - bool top_off_collection_set(size_t &add_regions_to_old); + bool top_off_collection_set(ShenandoahCollectionSet* collection_set, size_t &add_regions_to_old); // Having added all eligible mixed-evacuation candidates to the collection set, this function updates the total count // of how much old-gen memory remains to be evacuated and adjusts the representation of old-gen regions that remain to diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahSpaceInfo.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahSpaceInfo.hpp index 6ed05abf0b1..765061a43ed 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahSpaceInfo.hpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahSpaceInfo.hpp @@ -27,6 +27,8 @@ #include "utilities/globalDefinitions.hpp" +class ShenandoahHeapRegion; + /* * The purpose of this interface is to decouple the heuristics from a * direct dependency on the ShenandoahHeap singleton instance. This is @@ -46,6 +48,9 @@ public: // in time within each GC cycle. For certain GC cycles, the value returned may include some bytes allocated before // the start of the current GC cycle. virtual size_t bytes_allocated_since_gc_start() const = 0; + + // Return true if this region belongs to this space. + virtual bool contains(ShenandoahHeapRegion* region) const = 0; }; #endif //SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHSPACEINFO_HPP diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.cpp index beff2200d90..27aa9a47510 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.cpp @@ -37,9 +37,9 @@ ShenandoahYoungHeuristics::ShenandoahYoungHeuristics(ShenandoahYoungGeneration* } -void ShenandoahYoungHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset, - RegionData* data, size_t size, - size_t actual_free) { +void ShenandoahYoungHeuristics::select_collection_set_regions(ShenandoahCollectionSet* cset, + RegionData* data, size_t size, + size_t actual_free) { // See comments in ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(): // we do the same here, but with the following adjustments for generational mode: // @@ -54,15 +54,13 @@ void ShenandoahYoungHeuristics::choose_collection_set_from_regiondata(Shenandoah // Better select garbage-first regions QuickSort::sort(data, size, compare_by_garbage); - size_t cur_young_garbage = add_preselected_regions_to_collection_set(cset, data, size); - - choose_young_collection_set(cset, data, size, actual_free, cur_young_garbage); + choose_young_collection_set(cset, data, size, actual_free); // Especially when young-gen trigger is expedited in order to finish mixed evacuations, there may not be // enough consolidated garbage to make effective use of young-gen evacuation reserve. If there is still // young-gen reserve available following selection of the young-gen collection set, see if we can use // this memory to expand the old-gen evacuation collection set. - need_to_finalize_mixed |= heap->old_generation()->heuristics()->top_off_collection_set(_add_regions_to_old); + need_to_finalize_mixed |= heap->old_generation()->heuristics()->top_off_collection_set(cset, _add_regions_to_old); if (need_to_finalize_mixed) { heap->old_generation()->heuristics()->finalize_mixed_evacs(); } @@ -70,8 +68,7 @@ void ShenandoahYoungHeuristics::choose_collection_set_from_regiondata(Shenandoah void ShenandoahYoungHeuristics::choose_young_collection_set(ShenandoahCollectionSet* cset, const RegionData* data, - size_t size, size_t actual_free, - size_t cur_young_garbage) const { + size_t size, size_t actual_free) const { const auto heap = ShenandoahGenerationalHeap::heap(); @@ -82,23 +79,23 @@ void ShenandoahYoungHeuristics::choose_young_collection_set(ShenandoahCollection // This is young-gen collection or a mixed evacuation. // If this is mixed evacuation, the old-gen candidate regions have already been added. size_t cur_cset = 0; + size_t cur_young_garbage = cset->garbage(); const size_t max_cset = (size_t) (heap->young_generation()->get_evacuation_reserve() / ShenandoahEvacWaste); const size_t free_target = (capacity * ShenandoahMinFreeThreshold) / 100 + max_cset; const size_t min_garbage = (free_target > actual_free) ? (free_target - actual_free) : 0; - log_info(gc, ergo)( - "Adaptive CSet Selection for YOUNG. Max Evacuation: %zu%s, Actual Free: %zu%s.", - byte_size_in_proper_unit(max_cset), proper_unit_for_byte_size(max_cset), - byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free)); + "Adaptive CSet Selection for YOUNG. Max Evacuation: " PROPERFMT ", Actual Free: " PROPERFMT, + PROPERFMTARGS(max_cset), PROPERFMTARGS(actual_free)); for (size_t idx = 0; idx < size; idx++) { ShenandoahHeapRegion* r = data[idx].get_region(); - if (cset->is_preselected(r->index())) { + if (cset->is_in(r) || r->get_top_before_promote() != nullptr) { + assert(heap->is_tenurable(r), "Region %zu already selected for promotion must be tenurable", idx); continue; } - // Note that we do not add tenurable regions if they were not pre-selected. They were not preselected + // Note that we do not add tenurable regions if they were not pre-selected. They were not selected // because there is insufficient room in old-gen to hold their to-be-promoted live objects or because // they are to be promoted in place. if (!heap->is_tenurable(r)) { @@ -137,6 +134,7 @@ bool ShenandoahYoungHeuristics::should_start_gc() { // inherited triggers have already decided to start a cycle, so no further evaluation is required if (ShenandoahAdaptiveHeuristics::should_start_gc()) { + // ShenandoahAdaptiveHeuristics::should_start_gc() has already accepted trigger, or declined it. return true; } @@ -178,11 +176,9 @@ size_t ShenandoahYoungHeuristics::bytes_of_allocation_runway_before_gc_trigger(s size_t capacity = _space_info->max_capacity(); size_t usage = _space_info->used(); size_t available = (capacity > usage)? capacity - usage: 0; - size_t allocated = _space_info->bytes_allocated_since_gc_start(); + size_t allocated = _free_set->get_bytes_allocated_since_gc_start(); + size_t anticipated_available = available + young_regions_to_be_reclaimed * ShenandoahHeapRegion::region_size_bytes(); - size_t available_young_collected = ShenandoahHeap::heap()->collection_set()->get_young_available_bytes_collected(); - size_t anticipated_available = - available + young_regions_to_be_reclaimed * ShenandoahHeapRegion::region_size_bytes() - available_young_collected; size_t spike_headroom = capacity * ShenandoahAllocSpikeFactor / 100; size_t penalties = capacity * _gc_time_penalties / 100; diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp index b9d64059680..8fabc40693c 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp @@ -38,9 +38,9 @@ public: explicit ShenandoahYoungHeuristics(ShenandoahYoungGeneration* generation); - void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset, - RegionData* data, size_t size, - size_t actual_free) override; + void select_collection_set_regions(ShenandoahCollectionSet* cset, + RegionData* data, size_t size, + size_t actual_free) override; bool should_start_gc() override; @@ -49,8 +49,7 @@ public: private: void choose_young_collection_set(ShenandoahCollectionSet* cset, const RegionData* data, - size_t size, size_t actual_free, - size_t cur_young_garbage) const; + size_t size, size_t actual_free) const; }; diff --git a/src/hotspot/share/gc/shenandoah/mode/shenandoahMode.cpp b/src/hotspot/share/gc/shenandoah/mode/shenandoahMode.cpp index 5ef21719ed4..1c2c15c40dc 100644 --- a/src/hotspot/share/gc/shenandoah/mode/shenandoahMode.cpp +++ b/src/hotspot/share/gc/shenandoah/mode/shenandoahMode.cpp @@ -26,7 +26,6 @@ #include "gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp" #include "gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp" #include "gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp" -#include "gc/shenandoah/heuristics/shenandoahSpaceInfo.hpp" #include "gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp" #include "gc/shenandoah/mode/shenandoahMode.hpp" diff --git a/src/hotspot/share/gc/shenandoah/mode/shenandoahPassiveMode.cpp b/src/hotspot/share/gc/shenandoah/mode/shenandoahPassiveMode.cpp index 41b2703730b..cc098bc5a21 100644 --- a/src/hotspot/share/gc/shenandoah/mode/shenandoahPassiveMode.cpp +++ b/src/hotspot/share/gc/shenandoah/mode/shenandoahPassiveMode.cpp @@ -50,7 +50,6 @@ void ShenandoahPassiveMode::initialize_flags() const { SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahSATBBarrier); SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahCASBarrier); SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahCloneBarrier); - SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahStackWatermarkBarrier); SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahCardBarrier); } diff --git a/src/hotspot/share/gc/shenandoah/mode/shenandoahSATBMode.cpp b/src/hotspot/share/gc/shenandoah/mode/shenandoahSATBMode.cpp index 7ac2d7b818f..e27aa90542d 100644 --- a/src/hotspot/share/gc/shenandoah/mode/shenandoahSATBMode.cpp +++ b/src/hotspot/share/gc/shenandoah/mode/shenandoahSATBMode.cpp @@ -42,6 +42,5 @@ void ShenandoahSATBMode::initialize_flags() const { SHENANDOAH_CHECK_FLAG_SET(ShenandoahSATBBarrier); SHENANDOAH_CHECK_FLAG_SET(ShenandoahCASBarrier); SHENANDOAH_CHECK_FLAG_SET(ShenandoahCloneBarrier); - SHENANDOAH_CHECK_FLAG_SET(ShenandoahStackWatermarkBarrier); SHENANDOAH_CHECK_FLAG_UNSET(ShenandoahCardBarrier); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahAgeCensus.cpp b/src/hotspot/share/gc/shenandoah/shenandoahAgeCensus.cpp index 86ff6f22c72..a81efa99d70 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahAgeCensus.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahAgeCensus.cpp @@ -57,21 +57,13 @@ ShenandoahAgeCensus::ShenandoahAgeCensus(uint max_workers) // Sentinel value _tenuring_threshold[i] = MAX_COHORTS; } - if (ShenandoahGenerationalAdaptiveTenuring) { - _local_age_tables = NEW_C_HEAP_ARRAY(AgeTable*, _max_workers, mtGC); - CENSUS_NOISE(_local_noise = NEW_C_HEAP_ARRAY(ShenandoahNoiseStats, max_workers, mtGC);) - for (uint i = 0; i < _max_workers; i++) { - _local_age_tables[i] = new AgeTable(false); - CENSUS_NOISE(_local_noise[i].clear();) - } - } else { - _local_age_tables = nullptr; + _local_age_tables = NEW_C_HEAP_ARRAY(AgeTable*, _max_workers, mtGC); + CENSUS_NOISE(_local_noise = NEW_C_HEAP_ARRAY(ShenandoahNoiseStats, max_workers, mtGC);) + for (uint i = 0; i < _max_workers; i++) { + _local_age_tables[i] = new AgeTable(false); + CENSUS_NOISE(_local_noise[i].clear();) } _epoch = MAX_SNAPSHOTS - 1; // see prepare_for_census_update() - - if (!ShenandoahGenerationalAdaptiveTenuring) { - _tenuring_threshold[_epoch] = InitialTenuringThreshold; - } } ShenandoahAgeCensus::~ShenandoahAgeCensus() { @@ -154,7 +146,6 @@ void ShenandoahAgeCensus::prepare_for_census_update() { // and compute the new tenuring threshold. void ShenandoahAgeCensus::update_census(size_t age0_pop) { prepare_for_census_update(); - assert(ShenandoahGenerationalAdaptiveTenuring, "Only update census when adaptive tenuring is enabled"); assert(_global_age_tables[_epoch]->is_clear(), "Dirty decks"); CENSUS_NOISE(assert(_global_noise[_epoch].is_clear(), "Dirty decks");) @@ -180,6 +171,15 @@ void ShenandoahAgeCensus::update_census(size_t age0_pop) { NOT_PRODUCT(update_total();) } +size_t ShenandoahAgeCensus::get_tenurable_bytes(const uint tenuring_threshold) const { + assert(_epoch < MAX_SNAPSHOTS, "Out of bounds"); + size_t total = 0; + const AgeTable* pv = _global_age_tables[_epoch]; + for (uint i = tenuring_threshold; i < MAX_COHORTS; i++) { + total += pv->sizes[i]; + } + return total * HeapWordSize; +} // Reset the epoch for the global age tables, // clearing all history. @@ -195,10 +195,6 @@ void ShenandoahAgeCensus::reset_global() { // Reset the local age tables, clearing any partial census. void ShenandoahAgeCensus::reset_local() { - if (!ShenandoahGenerationalAdaptiveTenuring) { - assert(_local_age_tables == nullptr, "Error"); - return; - } for (uint i = 0; i < _max_workers; i++) { _local_age_tables[i]->clear(); CENSUS_NOISE(_local_noise[i].clear();) @@ -221,10 +217,6 @@ bool ShenandoahAgeCensus::is_clear_global() { // Is local census information clear? bool ShenandoahAgeCensus::is_clear_local() { - if (!ShenandoahGenerationalAdaptiveTenuring) { - assert(_local_age_tables == nullptr, "Error"); - return true; - } for (uint i = 0; i < _max_workers; i++) { bool clear = _local_age_tables[i]->is_clear(); CENSUS_NOISE(clear |= _local_noise[i].is_clear();) @@ -258,7 +250,6 @@ void ShenandoahAgeCensus::update_total() { #endif // !PRODUCT void ShenandoahAgeCensus::update_tenuring_threshold() { - assert(ShenandoahGenerationalAdaptiveTenuring, "Only update when adaptive tenuring is enabled"); uint tt = compute_tenuring_threshold(); assert(tt <= MAX_COHORTS, "Out of bounds"); _tenuring_threshold[_epoch] = tt; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahAgeCensus.hpp b/src/hotspot/share/gc/shenandoah/shenandoahAgeCensus.hpp index 39ea4ee9002..c140f445e21 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahAgeCensus.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahAgeCensus.hpp @@ -97,6 +97,8 @@ struct ShenandoahNoiseStats { // once the per-worker data is consolidated into the appropriate population vector // per minor collection. The _local_age_table is thus C x N, for N GC workers. class ShenandoahAgeCensus: public CHeapObj { + friend class ShenandoahTenuringOverride; + AgeTable** _global_age_tables; // Global age tables used for adapting tenuring threshold, one per snapshot AgeTable** _local_age_tables; // Local scratch age tables to track object ages, one per worker @@ -148,6 +150,10 @@ class ShenandoahAgeCensus: public CHeapObj { return _tenuring_threshold[prev]; } + // Override the tenuring threshold for the current epoch. This is used to + // cause everything to be promoted for a whitebox full gc request. + void set_tenuring_threshold(uint threshold) { _tenuring_threshold[_epoch] = threshold; } + #ifndef PRODUCT // Return the sum of size of objects of all ages recorded in the // census at snapshot indexed by snap. @@ -173,7 +179,6 @@ class ShenandoahAgeCensus: public CHeapObj { ~ShenandoahAgeCensus(); // Return the local age table (population vector) for worker_id. - // Only used in the case of ShenandoahGenerationalAdaptiveTenuring AgeTable* get_local_age_table(uint worker_id) const { return _local_age_tables[worker_id]; } @@ -211,6 +216,12 @@ class ShenandoahAgeCensus: public CHeapObj { // allocated when the concurrent marking was in progress. void update_census(size_t age0_pop); + // Return the total size of the population at or above the given threshold for the current epoch + size_t get_tenurable_bytes(uint tenuring_threshold) const; + + // As above, but use the current tenuring threshold + size_t get_tenurable_bytes() const { return get_tenurable_bytes(tenuring_threshold()); } + // Reset the epoch, clearing accumulated census history // Note: this isn't currently used, but reserved for planned // future usage. @@ -233,4 +244,26 @@ class ShenandoahAgeCensus: public CHeapObj { void print(); }; +// RAII object that temporarily overrides the tenuring threshold for the +// duration of a scope, restoring the original value on destruction. +// Used to force promotion of all young objects during whitebox full GCs. +class ShenandoahTenuringOverride : public StackObj { + ShenandoahAgeCensus* _census; + uint _saved_threshold; + bool _active; +public: + ShenandoahTenuringOverride(bool active, ShenandoahAgeCensus* census) : + _census(census), _saved_threshold(0), _active(active) { + if (_active) { + _saved_threshold = _census->tenuring_threshold(); + _census->set_tenuring_threshold(0); + } + } + ~ShenandoahTenuringOverride() { + if (_active) { + _census->set_tenuring_threshold(_saved_threshold); + } + } +}; + #endif // SHARE_GC_SHENANDOAH_SHENANDOAHAGECENSUS_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp b/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp index c1fa4b964b7..e9d6a686694 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp @@ -200,6 +200,7 @@ void ShenandoahArguments::initialize() { && strcmp(ShenandoahGCHeuristics, "adaptive") != 0) { log_warning(gc)("Ignoring -XX:ShenandoahGCHeuristics input: %s, because generational shenandoah only" " supports adaptive heuristics", ShenandoahGCHeuristics); + FLAG_SET_ERGO(ShenandoahGCHeuristics, "adaptive"); } FullGCForwarding::initialize_flags(MaxHeapSize); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp index baeaffb9c7b..268f5b13035 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp @@ -559,26 +559,24 @@ bool ShenandoahAsserts::extract_klass_safely(oop obj, narrowKlass& nk, const Kla if (!os::is_readable_pointer(obj)) { return false; } - if (UseCompressedClassPointers) { - if (UseCompactObjectHeaders) { // look in forwardee - markWord mark = obj->mark(); - if (mark.is_marked()) { - oop fwd = cast_to_oop(mark.clear_lock_bits().to_pointer()); - if (!os::is_readable_pointer(fwd)) { - return false; - } - mark = fwd->mark(); + + if (UseCompactObjectHeaders) { // look in forwardee + markWord mark = obj->mark(); + if (mark.is_marked()) { + oop fwd = cast_to_oop(mark.clear_lock_bits().to_pointer()); + if (!os::is_readable_pointer(fwd)) { + return false; } - nk = mark.narrow_klass(); - } else { - nk = obj->narrow_klass(); + mark = fwd->mark(); } - if (!CompressedKlassPointers::is_valid_narrow_klass_id(nk)) { - return false; - } - k = CompressedKlassPointers::decode_not_null_without_asserts(nk); + nk = mark.narrow_klass(); } else { - k = obj->klass(); + nk = obj->narrow_klass(); } + if (!CompressedKlassPointers::is_valid_narrow_klass_id(nk)) { + return false; + } + k = CompressedKlassPointers::decode_not_null_without_asserts(nk); + return k != nullptr; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp index 004558a9fa8..0949959b042 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp @@ -149,11 +149,9 @@ void ShenandoahBarrierSet::on_thread_attach(Thread *thread) { BarrierSetNMethod* bs_nm = barrier_set_nmethod(); thread->set_nmethod_disarmed_guard_value(bs_nm->disarmed_guard_value()); - if (ShenandoahStackWatermarkBarrier) { - JavaThread* const jt = JavaThread::cast(thread); - StackWatermark* const watermark = new ShenandoahStackWatermark(jt); - StackWatermarkSet::add_watermark(jt, watermark); - } + JavaThread* const jt = JavaThread::cast(thread); + StackWatermark* const watermark = new ShenandoahStackWatermark(jt); + StackWatermarkSet::add_watermark(jt, watermark); } } @@ -166,21 +164,18 @@ void ShenandoahBarrierSet::on_thread_detach(Thread *thread) { gclab->retire(); } - PLAB* plab = ShenandoahThreadLocalData::plab(thread); - if (plab != nullptr) { - // This will assert if plab is not null in non-generational mode - ShenandoahGenerationalHeap::heap()->retire_plab(plab); + ShenandoahPLAB* shenandoah_plab = ShenandoahThreadLocalData::shenandoah_plab(thread); + if (shenandoah_plab != nullptr) { + shenandoah_plab->retire(); } // SATB protocol requires to keep alive reachable oops from roots at the beginning of GC - if (ShenandoahStackWatermarkBarrier) { - if (_heap->is_concurrent_mark_in_progress()) { - ShenandoahKeepAliveClosure oops; - StackWatermarkSet::finish_processing(JavaThread::cast(thread), &oops, StackWatermarkKind::gc); - } else if (_heap->is_concurrent_weak_root_in_progress() && _heap->is_evacuation_in_progress()) { - ShenandoahContextEvacuateUpdateRootsClosure oops; - StackWatermarkSet::finish_processing(JavaThread::cast(thread), &oops, StackWatermarkKind::gc); - } + if (_heap->is_concurrent_mark_in_progress()) { + ShenandoahKeepAliveClosure oops; + StackWatermarkSet::finish_processing(JavaThread::cast(thread), &oops, StackWatermarkKind::gc); + } else if (_heap->is_concurrent_weak_root_in_progress() && _heap->is_evacuation_in_progress()) { + ShenandoahContextEvacuateUpdateRootsClosure oops; + StackWatermarkSet::finish_processing(JavaThread::cast(thread), &oops, StackWatermarkKind::gc); } } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahClosures.hpp b/src/hotspot/share/gc/shenandoah/shenandoahClosures.hpp index fefed0340c4..9ab45380c61 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahClosures.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahClosures.hpp @@ -180,9 +180,6 @@ public: }; class ShenandoahNMethodAndDisarmClosure : public NMethodToOopClosure { -private: - BarrierSetNMethod* const _bs; - public: inline ShenandoahNMethodAndDisarmClosure(OopClosure* cl); inline void do_nmethod(nmethod* nm); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp index e8d25b1e5a9..96ecbad1145 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp @@ -209,15 +209,13 @@ void ShenandoahCleanUpdateWeakOopsClosure::do_oo } ShenandoahNMethodAndDisarmClosure::ShenandoahNMethodAndDisarmClosure(OopClosure* cl) : - NMethodToOopClosure(cl, true /* fix_relocations */), - _bs(BarrierSet::barrier_set()->barrier_set_nmethod()) { -} + NMethodToOopClosure(cl, true /* fix_relocations */) {} void ShenandoahNMethodAndDisarmClosure::do_nmethod(nmethod* nm) { assert(nm != nullptr, "Sanity"); assert(!ShenandoahNMethod::gc_data(nm)->is_unregistered(), "Should not be here"); NMethodToOopClosure::do_nmethod(nm); - _bs->disarm(nm); + ShenandoahNMethod::disarm_nmethod(nm); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp b/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp index 64e135e9a4e..7cf60cdf65c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp @@ -40,20 +40,6 @@ ShenandoahNMethodTable* ShenandoahCodeRoots::_nmethod_table; int ShenandoahCodeRoots::_disarmed_value = 1; -bool ShenandoahCodeRoots::use_nmethod_barriers_for_mark() { - // Continuations need nmethod barriers for scanning stack chunk nmethods. - if (Continuations::enabled()) return true; - - // Concurrent class unloading needs nmethod barriers. - // When a nmethod is about to be executed, we need to make sure that all its - // metadata are marked. The alternative is to remark thread roots at final mark - // pause, which would cause latency issues. - if (ShenandoahHeap::heap()->unload_classes()) return true; - - // Otherwise, we can go without nmethod barriers. - return false; -} - void ShenandoahCodeRoots::initialize() { _nmethod_table = new ShenandoahNMethodTable(); } @@ -68,27 +54,14 @@ void ShenandoahCodeRoots::unregister_nmethod(nmethod* nm) { _nmethod_table->unregister_nmethod(nm); } -void ShenandoahCodeRoots::arm_nmethods_for_mark() { - if (use_nmethod_barriers_for_mark()) { - BarrierSet::barrier_set()->barrier_set_nmethod()->arm_all_nmethods(); - } -} - -void ShenandoahCodeRoots::arm_nmethods_for_evac() { +void ShenandoahCodeRoots::arm_nmethods() { BarrierSet::barrier_set()->barrier_set_nmethod()->arm_all_nmethods(); } class ShenandoahDisarmNMethodClosure : public NMethodClosure { -private: - BarrierSetNMethod* const _bs; - public: - ShenandoahDisarmNMethodClosure() : - _bs(BarrierSet::barrier_set()->barrier_set_nmethod()) { - } - virtual void do_nmethod(nmethod* nm) { - _bs->disarm(nm); + ShenandoahNMethod::disarm_nmethod(nm); } }; @@ -111,10 +84,8 @@ public: }; void ShenandoahCodeRoots::disarm_nmethods() { - if (use_nmethod_barriers_for_mark()) { - ShenandoahDisarmNMethodsTask task; - ShenandoahHeap::heap()->workers()->run_task(&task); - } + ShenandoahDisarmNMethodsTask task; + ShenandoahHeap::heap()->workers()->run_task(&task); } class ShenandoahNMethodUnlinkClosure : public NMethodClosure { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.hpp b/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.hpp index d29c446f210..d395b4516f4 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.hpp @@ -67,14 +67,11 @@ public: // Concurrent nmethod unloading support static void unlink(WorkerThreads* workers, bool unloading_occurred); static void purge(); - static void arm_nmethods_for_mark(); - static void arm_nmethods_for_evac(); + static void arm_nmethods(); static void disarm_nmethods(); static int disarmed_value() { return _disarmed_value; } static int* disarmed_value_address() { return &_disarmed_value; } - static bool use_nmethod_barriers_for_mark(); - private: static ShenandoahNMethodTable* _nmethod_table; static int _disarmed_value; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp index c1c6b876d90..d2a489a15be 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp @@ -49,7 +49,6 @@ ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedS _live(0), _region_count(0), _old_garbage(0), - _preselected_regions(nullptr), _young_available_bytes_collected(0), _old_available_bytes_collected(0), _current_index(0) { @@ -136,7 +135,7 @@ void ShenandoahCollectionSet::clear() { _live = 0; _region_count = 0; - _current_index = 0; + _current_index.store_relaxed(0); _young_bytes_to_evacuate = 0; _young_bytes_to_promote = 0; @@ -154,11 +153,11 @@ ShenandoahHeapRegion* ShenandoahCollectionSet::claim_next() { // before hitting the (potentially contended) atomic index. size_t max = _heap->num_regions(); - size_t old = AtomicAccess::load(&_current_index); + size_t old = _current_index.load_relaxed(); for (size_t index = old; index < max; index++) { if (is_in(index)) { - size_t cur = AtomicAccess::cmpxchg(&_current_index, old, index + 1, memory_order_relaxed); + size_t cur = _current_index.compare_exchange(old, index + 1, memory_order_relaxed); assert(cur >= old, "Always move forward"); if (cur == old) { // Successfully moved the claim index, this is our region. @@ -178,9 +177,9 @@ ShenandoahHeapRegion* ShenandoahCollectionSet::next() { assert(Thread::current()->is_VM_thread(), "Must be VMThread"); size_t max = _heap->num_regions(); - for (size_t index = _current_index; index < max; index++) { + for (size_t index = _current_index.load_relaxed(); index < max; index++) { if (is_in(index)) { - _current_index = index + 1; + _current_index.store_relaxed(index + 1); return _heap->get_region(index); } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp index c99271de1fb..7722423709d 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp @@ -32,16 +32,10 @@ #include "memory/allocation.hpp" #include "memory/reservedSpace.hpp" #include "memory/virtualspace.hpp" +#include "runtime/atomic.hpp" class ShenandoahCollectionSet : public CHeapObj { friend class ShenandoahHeap; - friend class ShenandoahCollectionSetPreselector; - - void establish_preselected(bool *preselected) { - assert(_preselected_regions == nullptr, "Over-writing"); - _preselected_regions = preselected; - } - void abandon_preselected() { _preselected_regions = nullptr; } private: size_t const _map_size; @@ -66,11 +60,6 @@ private: // How many bytes of old garbage are present in a mixed collection set? size_t _old_garbage; - // Points to array identifying which tenure-age regions have been preselected - // for inclusion in collection set. This field is only valid during brief - // spans of time while collection set is being constructed. - bool* _preselected_regions; - // When a region having memory available to be allocated is added to the collection set, the region's available memory // should be subtracted from what's available. size_t _young_available_bytes_collected; @@ -80,7 +69,7 @@ private: size_t _old_available_bytes_collected; shenandoah_padding(0); - volatile size_t _current_index; + Atomic _current_index; shenandoah_padding(1); public: @@ -99,7 +88,7 @@ public: bool is_empty() const { return _region_count == 0; } void clear_current_index() { - _current_index = 0; + _current_index.store_relaxed(0); } inline bool is_in(ShenandoahHeapRegion* r) const; @@ -131,16 +120,6 @@ public: // Returns the amount of garbage in old regions in the collection set. inline size_t get_old_garbage() const; - bool is_preselected(size_t region_idx) { - assert(_preselected_regions != nullptr, "Missing establish after abandon"); - return _preselected_regions[region_idx]; - } - - bool* preselected_regions() { - assert(_preselected_regions != nullptr, "Null ptr"); - return _preselected_regions; - } - bool has_old_regions() const { return _has_old_regions; } size_t used() const { return _used; } size_t live() const { return _live; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp index bbd9dca1513..cfa79fc055e 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp @@ -257,10 +257,10 @@ void ShenandoahCollectorPolicy::print_gc_stats(outputStream* out) const { out->print_cr("%5zu Full GCs (%.2f%%)", _success_full_gcs, percent_of(_success_full_gcs, completed_gcs)); if (!ExplicitGCInvokesConcurrent) { - out->print_cr(" %5zu invoked explicitly (%.2f%%)", explicit_requests, percent_of(explicit_requests, _success_concurrent_gcs)); + out->print_cr(" %5zu invoked explicitly (%.2f%%)", explicit_requests, percent_of(explicit_requests, _success_full_gcs)); } if (!ShenandoahImplicitGCInvokesConcurrent) { - out->print_cr(" %5zu invoked implicitly (%.2f%%)", implicit_requests, percent_of(implicit_requests, _success_concurrent_gcs)); + out->print_cr(" %5zu invoked implicitly (%.2f%%)", implicit_requests, percent_of(implicit_requests, _success_full_gcs)); } out->print_cr(" %5zu caused by allocation failure (%.2f%%)", _alloc_failure_full, percent_of(_alloc_failure_full, _success_full_gcs)); out->print_cr(" %5zu upgraded from Degenerated GC (%.2f%%)", _alloc_failure_degenerated_upgrade_to_full, percent_of(_alloc_failure_degenerated_upgrade_to_full, _success_full_gcs)); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp index 5206a0558e8..6723bb89021 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp @@ -112,6 +112,24 @@ void ShenandoahConcurrentGC::entry_concurrent_update_refs_prepare(ShenandoahHeap heap->concurrent_prepare_for_update_refs(); } +void ShenandoahConcurrentGC::entry_update_card_table() { + ShenandoahHeap* const heap = ShenandoahHeap::heap(); + TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); + + static const char* msg = "Concurrent update cards"; + ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_card_table); + EventMark em("%s", msg); + + ShenandoahWorkerScope scope(heap->workers(), + ShenandoahWorkerPolicy::calc_workers_for_conc_evac(), + "concurrent update cards"); + + // Heap needs to be parsable here. + // Also, parallel heap region iterate must have a phase set. + assert(ShenandoahTimingsTracker::is_current_phase_valid(), "Current phase must be set"); + ShenandoahGenerationalHeap::heap()->old_generation()->update_card_table(); +} + bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { ShenandoahHeap* const heap = ShenandoahHeap::heap(); _generation->ref_processor()->set_soft_reference_policy( @@ -206,6 +224,11 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { // Perform update-refs phase. entry_concurrent_update_refs_prepare(heap); + + if (ShenandoahHeap::heap()->mode()->is_generational()) { + entry_update_card_table(); + } + if (ShenandoahVerify) { vmop_entry_init_update_refs(); } @@ -232,8 +255,10 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { return false; } - if (VerifyAfterGC) { - vmop_entry_verify_final_roots(); + // In normal cycle, final-update-refs would verify at the end of the cycle. + // In abbreviated cycle, we need to verify separately. + if (ShenandoahVerify) { + vmop_entry_final_verify(); } } @@ -321,14 +346,14 @@ void ShenandoahConcurrentGC::vmop_entry_final_update_refs() { VMThread::execute(&op); } -void ShenandoahConcurrentGC::vmop_entry_verify_final_roots() { +void ShenandoahConcurrentGC::vmop_entry_final_verify() { ShenandoahHeap* const heap = ShenandoahHeap::heap(); TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); - ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_roots_gross); + ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_verify_gross); // This phase does not use workers, no need for setup heap->try_inject_alloc_failure(); - VM_ShenandoahFinalRoots op(this); + VM_ShenandoahFinalVerify op(this); VMThread::execute(&op); } @@ -377,12 +402,12 @@ void ShenandoahConcurrentGC::entry_final_update_refs() { op_final_update_refs(); } -void ShenandoahConcurrentGC::entry_verify_final_roots() { - const char* msg = verify_final_roots_event_message(); - ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots); +void ShenandoahConcurrentGC::entry_final_verify() { + const char* msg = verify_final_event_message(); + ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_verify); EventMark em("%s", msg); - op_verify_final_roots(); + op_verify_final(); } void ShenandoahConcurrentGC::entry_reset() { @@ -721,9 +746,8 @@ void ShenandoahConcurrentGC::op_init_mark() { // Make above changes visible to worker threads OrderAccess::fence(); - // Arm nmethods for concurrent mark - ShenandoahCodeRoots::arm_nmethods_for_mark(); - + // Arm nmethods/stack for concurrent processing + ShenandoahCodeRoots::arm_nmethods(); ShenandoahStackWatermark::change_epoch_id(); { @@ -782,7 +806,7 @@ void ShenandoahConcurrentGC::op_final_mark() { heap->set_has_forwarded_objects(true); // Arm nmethods/stack for concurrent processing - ShenandoahCodeRoots::arm_nmethods_for_evac(); + ShenandoahCodeRoots::arm_nmethods(); ShenandoahStackWatermark::change_epoch_id(); } else { @@ -1012,14 +1036,10 @@ void ShenandoahConcurrentGC::op_class_unloading() { class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure { private: - BarrierSetNMethod* const _bs; ShenandoahEvacuateUpdateMetadataClosure _cl; public: - ShenandoahEvacUpdateCodeCacheClosure() : - _bs(BarrierSet::barrier_set()->barrier_set_nmethod()), - _cl() { - } + ShenandoahEvacUpdateCodeCacheClosure() : _cl() {} void do_nmethod(nmethod* n) { ShenandoahNMethod* data = ShenandoahNMethod::gc_data(n); @@ -1027,8 +1047,8 @@ public: // Setup EvacOOM scope below reentrant lock to avoid deadlock with // nmethod_entry_barrier ShenandoahEvacOOMScope oom; - data->oops_do(&_cl, true/*fix relocation*/); - _bs->disarm(n); + data->oops_do(&_cl, /* fix_relocations = */ true); + ShenandoahNMethod::disarm_nmethod(n); } }; @@ -1215,6 +1235,7 @@ void ShenandoahConcurrentGC::op_final_update_refs() { } heap->rebuild_free_set(true /*concurrent*/); + _generation->heuristics()->start_idle_span(); { ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_propagate_gc_state); @@ -1244,10 +1265,10 @@ bool ShenandoahConcurrentGC::entry_final_roots() { return true; } -void ShenandoahConcurrentGC::op_verify_final_roots() { - if (VerifyAfterGC) { - Universe::verify(); - } +void ShenandoahConcurrentGC::op_verify_final() { + assert(ShenandoahVerify, "Should have been checked before"); + ShenandoahHeap* const heap = ShenandoahHeap::heap(); + heap->verifier()->verify_after_gc(_generation); } void ShenandoahConcurrentGC::op_cleanup_complete() { @@ -1332,11 +1353,11 @@ const char* ShenandoahConcurrentGC::conc_reset_after_collect_event_message() con } } -const char* ShenandoahConcurrentGC::verify_final_roots_event_message() const { +const char* ShenandoahConcurrentGC::verify_final_event_message() const { if (ShenandoahHeap::heap()->unload_classes()) { - SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Verify Final Roots", " (unload classes)"); + SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Verify Final", " (unload classes)"); } else { - SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Verify Final Roots", ""); + SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Verify Final", ""); } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.hpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.hpp index 54d43416fdb..fde585b4aa9 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.hpp @@ -43,7 +43,7 @@ class ShenandoahConcurrentGC : public ShenandoahGC { friend class VM_ShenandoahFinalMarkStartEvac; friend class VM_ShenandoahInitUpdateRefs; friend class VM_ShenandoahFinalUpdateRefs; - friend class VM_ShenandoahFinalRoots; + friend class VM_ShenandoahFinalVerify; protected: ShenandoahConcurrentMark _mark; @@ -59,8 +59,6 @@ public: bool collect(GCCause::Cause cause) override; ShenandoahDegenPoint degen_point() const; - void entry_concurrent_update_refs_prepare(ShenandoahHeap* heap); - // Return true if this cycle found enough immediate garbage to skip evacuation bool abbreviated() const { return _abbreviated; } @@ -71,7 +69,7 @@ protected: void vmop_entry_final_mark(); void vmop_entry_init_update_refs(); void vmop_entry_final_update_refs(); - void vmop_entry_verify_final_roots(); + void vmop_entry_final_verify(); // Entry methods to normally STW GC operations. These set up logging, monitoring // and workers for next VM operation @@ -79,7 +77,7 @@ protected: void entry_final_mark(); void entry_init_update_refs(); void entry_final_update_refs(); - void entry_verify_final_roots(); + void entry_final_verify(); // Entry methods to normally concurrent GC operations. These set up logging, monitoring // for concurrent operation. @@ -95,6 +93,8 @@ protected: void entry_cleanup_early(); void entry_evacuate(); void entry_update_thread_roots(); + void entry_update_card_table(); + void entry_concurrent_update_refs_prepare(ShenandoahHeap* heap); void entry_update_refs(); void entry_cleanup_complete(); @@ -122,7 +122,7 @@ protected: void op_update_thread_roots(); void op_final_update_refs(); - void op_verify_final_roots(); + void op_verify_final(); void op_cleanup_complete(); void op_reset_after_collect(); @@ -143,7 +143,7 @@ private: // passing around the logging/tracing systems const char* init_mark_event_message() const; const char* final_mark_event_message() const; - const char* verify_final_roots_event_message() const; + const char* verify_final_event_message() const; const char* conc_final_roots_event_message() const; const char* conc_mark_event_message() const; const char* conc_reset_event_message() const; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp index bc11659c5e5..6175f15676c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp @@ -45,7 +45,7 @@ ShenandoahControlThread::ShenandoahControlThread() : _requested_gc_cause(GCCause::_no_gc), _degen_point(ShenandoahGC::_degenerated_outside_cycle), _control_lock(CONTROL_LOCK_RANK, "ShenandoahControl_lock", true) { - set_name("Shenandoah Control Thread"); + set_name("ShenControl"); create_and_start(); } @@ -59,6 +59,7 @@ void ShenandoahControlThread::run_service() { ShenandoahCollectorPolicy* const policy = heap->shenandoah_policy(); ShenandoahHeuristics* const heuristics = heap->heuristics(); + double most_recent_wake_time = os::elapsedTime(); while (!should_terminate()) { const GCCause::Cause cancelled_cause = heap->cancelled_cause(); if (cancelled_cause == GCCause::_shenandoah_stop_vm) { @@ -137,7 +138,10 @@ void ShenandoahControlThread::run_service() { heuristics->cancel_trigger_request(); - heap->reset_bytes_allocated_since_gc_start(); + if (mode != stw_degenerated) { + // If mode is stw_degenerated, count bytes allocated from the start of the conc GC that experienced alloc failure. + heap->reset_bytes_allocated_since_gc_start(); + } MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics(); @@ -222,16 +226,26 @@ void ShenandoahControlThread::run_service() { // Wait before performing the next action. If allocation happened during this wait, // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle, // back off exponentially. - const double current = os::elapsedTime(); + const double before_sleep = most_recent_wake_time; if (heap->has_changed()) { sleep = ShenandoahControlIntervalMin; - } else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){ + } else if ((before_sleep - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){ sleep = MIN2(ShenandoahControlIntervalMax, MAX2(1, sleep * 2)); - last_sleep_adjust_time = current; + last_sleep_adjust_time = before_sleep; } - MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag); ml.wait(sleep); + // Record a conservative estimate of the longest anticipated sleep duration until we sample again. + double planned_sleep_interval = MIN2(ShenandoahControlIntervalMax, MAX2(1, sleep * 2)) / 1000.0; + most_recent_wake_time = os::elapsedTime(); + heuristics->update_should_start_query_times(most_recent_wake_time, planned_sleep_interval); + if (LogTarget(Debug, gc, thread)::is_enabled()) { + double elapsed = most_recent_wake_time - before_sleep; + double hiccup = elapsed - double(sleep); + if (hiccup > 0.001) { + log_debug(gc, thread)("Control Thread hiccup time: %.3fs", hiccup); + } + } } } @@ -332,7 +346,8 @@ void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) { void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahGC::ShenandoahDegenPoint point) { assert (point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set"); ShenandoahHeap* const heap = ShenandoahHeap::heap(); - ShenandoahGCSession session(cause, heap->global_generation()); + ShenandoahGCSession session(cause, heap->global_generation(), true, + point == ShenandoahGC::ShenandoahDegenPoint::_degenerated_outside_cycle); heap->increment_total_collections(false); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahController.cpp b/src/hotspot/share/gc/shenandoah/shenandoahController.cpp index 220f3df8d4f..0096aad2570 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahController.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahController.cpp @@ -23,19 +23,19 @@ * */ +#include "gc/shared/allocTracer.hpp" #include "gc/shared/gc_globals.hpp" #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" #include "gc/shenandoah/shenandoahController.hpp" #include "gc/shenandoah/shenandoahHeap.hpp" #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" - void ShenandoahController::update_gc_id() { - AtomicAccess::inc(&_gc_id); + _gc_id.add_then_fetch((size_t)1); } size_t ShenandoahController::get_gc_id() { - return AtomicAccess::load(&_gc_id); + return _gc_id.load_relaxed(); } void ShenandoahController::handle_alloc_failure(const ShenandoahAllocRequest& req, bool block) { @@ -45,10 +45,12 @@ void ShenandoahController::handle_alloc_failure(const ShenandoahAllocRequest& re const GCCause::Cause cause = is_humongous ? GCCause::_shenandoah_humongous_allocation_failure : GCCause::_allocation_failure; ShenandoahHeap* const heap = ShenandoahHeap::heap(); + size_t req_byte = req.size() * HeapWordSize; if (heap->cancel_gc(cause)) { - log_info(gc)("Failed to allocate %s, " PROPERFMT, req.type_string(), PROPERFMTARGS(req.size() * HeapWordSize)); + log_info(gc)("Failed to allocate %s, " PROPERFMT, req.type_string(), PROPERFMTARGS(req_byte)); request_gc(cause); } + AllocTracer::send_allocation_requiring_gc_event(req_byte, checked_cast(get_gc_id())); if (block) { MonitorLocker ml(&_alloc_failure_waiters_lock); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahController.hpp b/src/hotspot/share/gc/shenandoah/shenandoahController.hpp index b8ff4df4771..60b41a5fe99 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahController.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahController.hpp @@ -29,6 +29,7 @@ #include "gc/shared/gcCause.hpp" #include "gc/shenandoah/shenandoahAllocRequest.hpp" #include "gc/shenandoah/shenandoahSharedVariables.hpp" +#include "runtime/atomic.hpp" /** * This interface exposes methods necessary for the heap to interact @@ -38,7 +39,7 @@ class ShenandoahController: public ConcurrentGCThread { private: shenandoah_padding(0); // A monotonically increasing GC count. - volatile size_t _gc_id; + Atomic _gc_id; shenandoah_padding(1); protected: diff --git a/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp index 99776e38bfe..84b22f13d47 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp @@ -55,7 +55,7 @@ bool ShenandoahDegenGC::collect(GCCause::Cause cause) { vmop_degenerated(); ShenandoahHeap* heap = ShenandoahHeap::heap(); if (heap->mode()->is_generational()) { - bool is_bootstrap_gc = heap->old_generation()->is_bootstrapping(); + bool is_bootstrap_gc = heap->young_generation()->is_bootstrap_cycle(); heap->mmu_tracker()->record_degenerated(GCId::current(), is_bootstrap_gc); const char* msg = is_bootstrap_gc? "At end of Degenerated Bootstrap Old GC": "At end of Degenerated Young GC"; heap->log_heap_status(msg); @@ -277,6 +277,11 @@ void ShenandoahDegenGC::op_degenerated() { _abbreviated = true; } + // labs are retired, walk the old regions and update remembered set + if (ShenandoahHeap::heap()->mode()->is_generational()) { + ShenandoahGenerationalHeap::heap()->old_generation()->update_card_table(); + } + case _degenerated_update_refs: if (heap->has_forwarded_objects()) { op_update_refs(); @@ -314,6 +319,7 @@ void ShenandoahDegenGC::op_degenerated() { if (progress) { heap->notify_gc_progress(); _generation->heuristics()->record_degenerated(); + heap->start_idle_span(); } else if (policy->should_upgrade_degenerated_gc()) { // Upgrade to full GC, register full-GC impact on heuristics. op_degenerated_futile(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 961800f20d9..1807383123b 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -287,9 +287,25 @@ void ShenandoahFreeSet::resize_old_collector_capacity(size_t regions) { // else, old generation is already appropriately sized } + void ShenandoahFreeSet::reset_bytes_allocated_since_gc_start(size_t initial_bytes_allocated) { shenandoah_assert_heaplocked(); + // Future inquiries of get_total_bytes_allocated() will return the sum of + // _total_bytes_previously_allocated and _mutator_bytes_allocated_since_gc_start. + // Since _mutator_bytes_allocated_since_gc_start does not start at zero, we subtract initial_bytes_allocated so as + // to not double count these allocated bytes. + size_t original_mutator_bytes_allocated_since_gc_start = _mutator_bytes_allocated_since_gc_start; + + // Setting _mutator_bytes_allocated_since_gc_start before _total_bytes_previously_allocated reduces the damage + // in the case that the control or regulator thread queries get_bytes_allocated_since_previous_sample() between + // the two assignments. + // + // These are not declared as volatile so the compiler or hardware may reorder the assignments. The implementation of + // get_bytes_allocated_since_previous_cycle() is robust to this possibility, as are triggering heuristics. The current + // implementation assumes we are better off to tolerate the very rare race rather than impose a synchronization penalty + // on every update and fetch. (Perhaps it would be better to make the opposite tradeoff for improved maintainability.) _mutator_bytes_allocated_since_gc_start = initial_bytes_allocated; + _total_bytes_previously_allocated += original_mutator_bytes_allocated_since_gc_start - initial_bytes_allocated; } void ShenandoahFreeSet::increase_bytes_allocated(size_t bytes) { @@ -332,7 +348,7 @@ void ShenandoahRegionPartitions::make_all_regions_unavailable() { _leftmosts[partition_id] = _max; _rightmosts[partition_id] = -1; _leftmosts_empty[partition_id] = _max; - _rightmosts_empty[partition_id] = -1;; + _rightmosts_empty[partition_id] = -1; _capacity[partition_id] = 0; _region_counts[partition_id] = 0; _empty_region_counts[partition_id] = 0; @@ -429,7 +445,7 @@ void ShenandoahRegionPartitions::increase_humongous_waste(ShenandoahFreeSetParti size_t ShenandoahRegionPartitions::get_humongous_waste(ShenandoahFreeSetPartitionId which_partition) { assert (which_partition < NumPartitions, "Partition must be valid"); - return _humongous_waste[int(which_partition)];; + return _humongous_waste[int(which_partition)]; } void ShenandoahRegionPartitions::set_capacity_of(ShenandoahFreeSetPartitionId which_partition, size_t value) { @@ -494,7 +510,7 @@ void ShenandoahRegionPartitions::decrease_available(ShenandoahFreeSetPartitionId size_t ShenandoahRegionPartitions::get_available(ShenandoahFreeSetPartitionId which_partition) { assert (which_partition < NumPartitions, "Partition must be valid"); - return _available[int(which_partition)];; + return _available[int(which_partition)]; } void ShenandoahRegionPartitions::increase_region_counts(ShenandoahFreeSetPartitionId which_partition, size_t regions) { @@ -1211,6 +1227,8 @@ inline void ShenandoahRegionPartitions::assert_bounds_sanity() { ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) : _heap(heap), _partitions(max_regions, this), + _total_bytes_previously_allocated(0), + _mutator_bytes_at_last_sample(0), _total_humongous_waste(0), _alloc_bias_weight(0), _total_young_used(0), @@ -1551,7 +1569,7 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah // We must call try_recycle_under_lock() even if !r->is_trash(). The reason is that if r is being recycled at this // moment by a GC worker thread, it may appear to be not trash even though it has not yet been fully recycled. If // we proceed without waiting for the worker to finish recycling the region, the worker thread may overwrite the - // region's affiliation with FREE after we set the region's affiliation to req.afiliation() below + // region's affiliation with FREE after we set the region's affiliation to req.affiliation() below r->try_recycle_under_lock(); in_new_region = r->is_empty(); if (in_new_region) { @@ -1567,7 +1585,6 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah // concurrent preparations for mixed evacuations are completed), we mark this region as not requiring any // coalesce-and-fill processing. r->end_preemptible_coalesce_and_fill(); - _heap->old_generation()->clear_cards_for(r); } #ifdef ASSERT ShenandoahMarkingContext* const ctx = _heap->marking_context(); @@ -1676,9 +1693,6 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah // Regardless of whether this allocation succeeded, if the remaining memory is less than PLAB:min_size(), retire this region. // Note that retire_from_partition() increases used to account for waste. - // Also, if this allocation request failed and the consumed within this region * ShenandoahEvacWaste > region size, - // then retire the region so that subsequent searches can find available memory more quickly. - size_t idx = r->index(); size_t waste_bytes = _partitions.retire_from_partition(orig_partition, idx, r->used()); DEBUG_ONLY(boundary_changed = true;) @@ -1796,7 +1810,6 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req, bo // found the match break; } - end++; } @@ -2036,7 +2049,8 @@ void ShenandoahFreeSet::clear_internal() { _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::OldCollector, false); } -void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_regions, size_t &old_trashed_regions, +// Returns total allocatable words in Mutator partition +size_t ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_regions, size_t &old_trashed_regions, size_t &first_old_region, size_t &last_old_region, size_t &old_region_count) { // This resets all state information, removing all regions from all sets. @@ -2054,6 +2068,8 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r size_t region_size_bytes = _partitions.region_size_bytes(); size_t max_regions = _partitions.max(); + size_t mutator_alloc_capacity_in_words = 0; + size_t mutator_leftmost = max_regions; size_t mutator_rightmost = 0; size_t mutator_leftmost_empty = max_regions; @@ -2123,6 +2139,7 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r if (region->is_trash() || !region->is_old()) { // Both young and old (possibly immediately) collected regions (trashed) are placed into the Mutator set _partitions.raw_assign_membership(idx, ShenandoahFreeSetPartitionId::Mutator); + mutator_alloc_capacity_in_words += ac / HeapWordSize; if (idx < mutator_leftmost) { mutator_leftmost = idx; } @@ -2279,6 +2296,7 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator), _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector), _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector)); + return mutator_alloc_capacity_in_words; } void ShenandoahFreeSet::transfer_humongous_regions_from_mutator_to_old_collector(size_t xfer_regions, @@ -2583,19 +2601,20 @@ void ShenandoahFreeSet::prepare_to_rebuild(size_t &young_trashed_regions, size_t clear(); log_debug(gc, free)("Rebuilding FreeSet"); - // This places regions that have alloc_capacity into the old_collector set if they identify as is_old() or the - // mutator set otherwise. All trashed (cset) regions are affiliated young and placed in mutator set. - find_regions_with_alloc_capacity(young_trashed_regions, old_trashed_regions, - first_old_region, last_old_region, old_region_count); + // Place regions that have alloc_capacity into the old_collector set if they identify as is_old() or the + // mutator set otherwise. All trashed (cset) regions are affiliated young and placed in mutator set. Save the + // allocatable words in mutator partition in state variable. + _prepare_to_rebuild_mutator_free = find_regions_with_alloc_capacity(young_trashed_regions, old_trashed_regions, + first_old_region, last_old_region, old_region_count); } - -void ShenandoahFreeSet::finish_rebuild(size_t young_cset_regions, size_t old_cset_regions, size_t old_region_count) { +// Return mutator free +void ShenandoahFreeSet::finish_rebuild(size_t young_trashed_regions, size_t old_trashed_regions, size_t old_region_count) { shenandoah_assert_heaplocked(); size_t young_reserve(0), old_reserve(0); if (_heap->mode()->is_generational()) { - compute_young_and_old_reserves(young_cset_regions, old_cset_regions, young_reserve, old_reserve); + compute_young_and_old_reserves(young_trashed_regions, old_trashed_regions, young_reserve, old_reserve); } else { young_reserve = (_heap->max_capacity() / 100) * ShenandoahEvacReserve; old_reserve = 0; @@ -2659,8 +2678,11 @@ void ShenandoahFreeSet::reduce_young_reserve(size_t adjusted_young_reserve, size * 1. Memory currently available within old and young * 2. Trashed regions currently residing in young and old, which will become available momentarily * 3. The value of old_generation->get_region_balance() which represents the number of regions that we plan - * to transfer from old generation to young generation. Prior to each invocation of compute_young_and_old_reserves(), - * this value should computed by ShenandoahGenerationalHeap::compute_old_generation_balance(). + * to transfer from old generation to young generation. At the end of each GC cycle, we reset region_balance + * to zero. As we prepare to rebuild free set at the end of update-refs, we call + * ShenandoahGenerationalHeap::compute_old_generation_balance() to compute a new value of region_balance. + * This allows us to expand or shrink the size of the Old Collector reserves based on anticipated needs of + * the next GC cycle. */ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regions, size_t old_trashed_regions, size_t& young_reserve_result, size_t& old_reserve_result) const { @@ -2744,10 +2766,13 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regi // into the collector set or old collector set in order to assure that the memory available for allocations within // the collector set is at least to_reserve and the memory available for allocations within the old collector set // is at least to_reserve_old. -void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old, size_t &old_region_count, - size_t &young_used_regions, size_t &old_used_regions, - size_t &young_used_bytes, size_t &old_used_bytes) { +// +// Returns total mutator alloc capacity, in words. +size_t ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old, size_t &old_region_count, + size_t &young_used_regions, size_t &old_used_regions, + size_t &young_used_bytes, size_t &old_used_bytes) { const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + size_t mutator_allocatable_words = _prepare_to_rebuild_mutator_free; young_used_regions = 0; old_used_regions = 0; @@ -2776,7 +2801,7 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old size_t empty_regions_to_collector = 0; size_t empty_regions_to_old_collector = 0; - size_t old_collector_available = _partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector);; + size_t old_collector_available = _partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector); size_t collector_available = _partitions.available_in(ShenandoahFreeSetPartitionId::Collector); for (size_t i = _heap->num_regions(); i > 0; i--) { @@ -2825,6 +2850,8 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector), _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector)); old_region_count++; + assert(ac == ShenandoahHeapRegion::region_size_bytes(), "Cannot move to old unless entire region is in alloc capacity"); + mutator_allocatable_words -= ShenandoahHeapRegion::region_size_words(); continue; } } @@ -2868,8 +2895,10 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old " Collector range [%zd, %zd]", _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator), _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator), - _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector), - _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector)); + _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector), + _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector)); + + mutator_allocatable_words -= ac / HeapWordSize; continue; } @@ -2977,6 +3006,7 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old PROPERFMTARGS(to_reserve), PROPERFMTARGS(reserve)); } } + return mutator_allocatable_words; } void ShenandoahFreeSet::establish_old_collector_alloc_bias() { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp index d55a06d5713..eeff0fde87c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp @@ -437,6 +437,12 @@ private: ShenandoahHeap* const _heap; ShenandoahRegionPartitions _partitions; + size_t _total_bytes_previously_allocated; + size_t _mutator_bytes_at_last_sample; + + // Temporarily holds mutator_Free allocatable bytes between prepare_to_rebuild() and finish_rebuild() + size_t _prepare_to_rebuild_mutator_free; + // This locks the rebuild process (in combination with the global heap lock). Whenever we rebuild the free set, // we first acquire the global heap lock and then we acquire this _rebuild_lock in a nested context. Threads that // need to check available, acquire only the _rebuild_lock to make sure that they are not obtaining the value of @@ -446,10 +452,10 @@ private: // locks will acquire them in the same order: first the global heap lock and then the rebuild lock. ShenandoahRebuildLock _rebuild_lock; - size_t _total_humongous_waste; - HeapWord* allocate_aligned_plab(size_t size, ShenandoahAllocRequest& req, ShenandoahHeapRegion* r); + size_t _total_humongous_waste; + // We re-evaluate the left-to-right allocation bias whenever _alloc_bias_weight is less than zero. Each time // we allocate an object, we decrement the count of this value. Each time we re-evaluate whether to allocate // from right-to-left or left-to-right, we reset the value of this counter to _InitialAllocBiasWeight. @@ -662,10 +668,47 @@ public: void increase_bytes_allocated(size_t bytes); + // Return an approximation of the bytes allocated since GC start. The value returned is monotonically non-decreasing + // in time within each GC cycle. For certain GC cycles, the value returned may include some bytes allocated before + // the start of the current GC cycle. inline size_t get_bytes_allocated_since_gc_start() const { return _mutator_bytes_allocated_since_gc_start; } + inline size_t get_total_bytes_allocated() { + return _mutator_bytes_allocated_since_gc_start + _total_bytes_previously_allocated; + } + + inline size_t get_bytes_allocated_since_previous_sample() { + size_t total_bytes = get_total_bytes_allocated(); + size_t result; + if (total_bytes < _mutator_bytes_at_last_sample) { + // This rare condition may occur if bytes allocated overflows (wraps around) size_t tally of allocations. + // This may also occur in the very rare situation that get_total_bytes_allocated() is queried in the middle of + // reset_bytes_allocated_since_gc_start(). Note that there is no lock to assure that the two global variables + // it modifies are modified atomically (_total_bytes_previously_allocated and _mutator_byts_allocated_since_gc_start) + // This has been observed to occur when an out-of-cycle degenerated cycle is starting (and thus calls + // reset_bytes_allocated_since_gc_start()) at the same time that the control (non-generational mode) or + // regulator (generational-mode) thread calls should_start_gc() (which invokes get_bytes_allocated_since_previous_sample()). + // + // Handle this rare situation by responding with the "innocent" value 0 and resetting internal state so that the + // the next query can recalibrate. + result = 0; + } else { + // Note: there's always the possibility that the tally of total allocations exceeds the 64-bit capacity of our size_t + // counter. We assume that the difference between relevant samples does not exceed this count. Example: + // Suppose _mutator_words_at_last_sample is 0xffff_ffff_ffff_fff0 (18,446,744,073,709,551,600 Decimal) + // and _total_words is 0x0000_0000_0000_0800 ( 32,768 Decimal) + // Then, total_words - _mutator_words_at_last_sample can be done adding 1's complement of subtrahend: + // 1's complement of _mutator_words_at_last_sample is: 0x0000_0000_0000_0010 ( 16 Decimal)) + // plus total_words: 0x0000_0000_0000_0800 (32,768 Decimal) + // sum: 0x0000_0000_0000_0810 (32,784 Decimal) + result = total_bytes - _mutator_bytes_at_last_sample; + } + _mutator_bytes_at_last_sample = total_bytes; + return result; + } + // Public because ShenandoahRegionPartitions assertions require access. inline size_t alloc_capacity(ShenandoahHeapRegion *r) const; inline size_t alloc_capacity(size_t idx) const; @@ -781,15 +824,15 @@ public: // Acquire heap lock and log status, assuming heap lock is not acquired by the caller. void log_status_under_lock(); - // Note that capacity is the number of regions that had available memory at most recent rebuild. It is not the - // entire size of the young or global generation. (Regions within the generation that were fully utilized at time of - // rebuild are not counted as part of capacity.) - - // All three of the following functions may produce stale data if called without owning the global heap lock. + // All four of the following functions may produce stale data if called without owning the global heap lock. // Changes to the values of these variables are performed with a lock. A change to capacity or used "atomically" // adjusts available with respect to lock holders. However, sequential calls to these three functions may produce // inconsistent data: available may not equal capacity - used because the intermediate states of any "atomic" // locked action can be seen by these unlocked functions. + + // Note that capacity is the number of regions that had available memory at most recent rebuild. It is not the + // entire size of the young or global generation. (Regions within the generation that were fully utilized at time of + // rebuild are not counted as part of capacity.) inline size_t capacity_holding_lock() const { shenandoah_assert_heaplocked(); return _partitions.capacity_of(ShenandoahFreeSetPartitionId::Mutator); @@ -808,6 +851,7 @@ public: ShenandoahRebuildLocker locker(rebuild_lock()); return _partitions.used_by(ShenandoahFreeSetPartitionId::Mutator); } + inline size_t reserved() const { return _partitions.capacity_of(ShenandoahFreeSetPartitionId::Collector); } inline size_t available() { shenandoah_assert_not_heaplocked(); ShenandoahRebuildLocker locker(rebuild_lock()); @@ -819,6 +863,10 @@ public: return _partitions.available_in(ShenandoahFreeSetPartitionId::Mutator); } + inline size_t collector_available_locked() const { + return _partitions.available_in(ShenandoahFreeSetPartitionId::Collector); + } + inline size_t total_humongous_waste() const { return _total_humongous_waste; } inline size_t humongous_waste_in_mutator() const { return _partitions.humongous_waste(ShenandoahFreeSetPartitionId::Mutator); @@ -880,13 +928,17 @@ public: // first_old_region is the index of the first region that is part of the OldCollector set // last_old_region is the index of the last region that is part of the OldCollector set // old_region_count is the number of regions in the OldCollector set that have memory available to be allocated - void find_regions_with_alloc_capacity(size_t &young_cset_regions, size_t &old_cset_regions, - size_t &first_old_region, size_t &last_old_region, size_t &old_region_count); + // + // Returns allocatable memory within Mutator partition, in words. + size_t find_regions_with_alloc_capacity(size_t &young_cset_regions, size_t &old_cset_regions, + size_t &first_old_region, size_t &last_old_region, size_t &old_region_count); // Ensure that Collector has at least to_reserve bytes of available memory, and OldCollector has at least old_reserve // bytes of available memory. On input, old_region_count holds the number of regions already present in the // OldCollector partition. Upon return, old_region_count holds the updated number of regions in the OldCollector partition. - void reserve_regions(size_t to_reserve, size_t old_reserve, size_t &old_region_count, + // + // Returns allocatable memory within Mutator partition, in words. + size_t reserve_regions(size_t to_reserve, size_t old_reserve, size_t &old_region_count, size_t &young_used_regions, size_t &old_used_regions, size_t &young_used_bytes, size_t &old_used_bytes); // Reserve space for evacuations, with regions reserved for old evacuations placed to the right diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp index 3c92750cc0c..21b1fd9e0a8 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp @@ -252,6 +252,7 @@ void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) { phase5_epilog(); } + heap->start_idle_span(); // Resize metaspace MetaspaceGC::compute_new_size(); @@ -877,8 +878,11 @@ public: Copy::aligned_conjoint_words(compact_from, compact_to, size); oop new_obj = cast_to_oop(compact_to); - ContinuationGCSupport::relativize_stack_chunk(new_obj); + // Restore the mark word before relativizing the stack chunk. The copy's + // mark word contains the full GC forwarding encoding, which would cause + // is_stackChunk() to read garbage (especially with compact headers). new_obj->init_mark(); + ContinuationGCSupport::relativize_stack_chunk(new_obj); } } }; @@ -1124,8 +1128,9 @@ void ShenandoahFullGC::phase5_epilog() { if (heap->mode()->is_generational()) { ShenandoahGenerationalFullGC::compute_balances(); } - free_set->finish_rebuild(young_trashed_regions, old_trashed_regions, num_old); + heap->free_set()->finish_rebuild(young_trashed_regions, old_trashed_regions, num_old); } + // Set mark incomplete because the marking bitmaps have been reset except pinned regions. _generation->set_mark_incomplete(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index ddb50ee0020..5b26ee67653 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -151,6 +151,10 @@ ShenandoahHeuristics* ShenandoahGeneration::initialize_heuristics(ShenandoahMode return _heuristics; } +void ShenandoahGeneration::post_initialize_heuristics() { + _heuristics->post_initialize(); +} + void ShenandoahGeneration::set_evacuation_reserve(size_t new_val) { shenandoah_assert_heaplocked(); _evacuation_reserve = new_val; @@ -268,7 +272,7 @@ void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) { } // Tally the census counts and compute the adaptive tenuring threshold - if (is_generational && ShenandoahGenerationalAdaptiveTenuring) { + if (is_generational) { // Objects above TAMS weren't included in the age census. Since they were all // allocated in this cycle they belong in the age 0 cohort. We walk over all // young regions and sum the volume of objects between TAMS and top. @@ -297,6 +301,7 @@ void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) { collection_set->clear(); ShenandoahHeapLocker locker(heap->lock()); + heap->assert_pinned_region_status(this); _heuristics->choose_collection_set(collection_set); } @@ -305,16 +310,9 @@ void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) { ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset : ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset); ShenandoahHeapLocker locker(heap->lock()); - - // We are preparing for evacuation. + // At start of evacation, we do NOT compute_old_generation_balance() size_t young_trashed_regions, old_trashed_regions, first_old, last_old, num_old; _free_set->prepare_to_rebuild(young_trashed_regions, old_trashed_regions, first_old, last_old, num_old); - if (heap->mode()->is_generational()) { - ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap(); - size_t allocation_runway = - gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_trashed_regions); - gen_heap->compute_old_generation_balance(allocation_runway, old_trashed_regions, young_trashed_regions); - } _free_set->finish_rebuild(young_trashed_regions, old_trashed_regions, num_old); } } @@ -358,8 +356,7 @@ void ShenandoahGeneration::cancel_marking() { set_concurrent_mark_in_progress(false); } -ShenandoahGeneration::ShenandoahGeneration(ShenandoahGenerationType type, - uint max_workers) : +ShenandoahGeneration::ShenandoahGeneration(ShenandoahGenerationType type, uint max_workers) : _type(type), _task_queues(new ShenandoahObjToScanQueueSet(max_workers)), _ref_processor(new ShenandoahReferenceProcessor(this, MAX2(max_workers, 1U))), @@ -420,12 +417,6 @@ size_t ShenandoahGeneration::available() const { return result; } -// For ShenandoahYoungGeneration, Include the young available that may have been reserved for the Collector. -size_t ShenandoahGeneration::available_with_reserve() const { - size_t result = available(max_capacity()); - return result; -} - size_t ShenandoahGeneration::soft_mutator_available() const { size_t result = available(ShenandoahHeap::heap()->soft_max_capacity() * (100.0 - ShenandoahEvacReserve) / 100); return result; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp index 946f2b91520..9f8944127c0 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp @@ -63,7 +63,7 @@ private: // Return available assuming that we can allocate no more than capacity bytes within this generation. size_t available(size_t capacity) const; - public: +public: ShenandoahGeneration(ShenandoahGenerationType type, uint max_workers); ~ShenandoahGeneration(); @@ -83,10 +83,10 @@ private: ShenandoahReferenceProcessor* ref_processor() { return _ref_processor; } virtual ShenandoahHeuristics* initialize_heuristics(ShenandoahMode* gc_mode); + virtual void post_initialize_heuristics(); virtual void post_initialize(ShenandoahHeap* heap); - virtual size_t bytes_allocated_since_gc_start() const override = 0; virtual size_t used() const override = 0; virtual size_t used_regions() const = 0; virtual size_t used_regions_size() const = 0; @@ -96,7 +96,6 @@ private: virtual size_t max_capacity() const override = 0; size_t available() const override; - size_t available_with_reserve() const; // Returns the memory available based on the _soft_ max heap capacity (soft_max_heap - used). // The soft max heap size may be adjusted lower than the max heap size to cause the trigger @@ -144,7 +143,7 @@ private: virtual bool contains(ShenandoahAffiliation affiliation) const = 0; // Return true if this region is affiliated with this generation. - virtual bool contains(ShenandoahHeapRegion* region) const = 0; + virtual bool contains(ShenandoahHeapRegion* region) const override = 0; // Return true if this object is affiliated with this generation. virtual bool contains(oop obj) const = 0; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.cpp index 3b57190cc75..bbad82de1dc 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.cpp @@ -24,6 +24,7 @@ * */ +#include "gc/shenandoah/shenandoahAgeCensus.hpp" #include "gc/shenandoah/shenandoahAsserts.hpp" #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" #include "gc/shenandoah/shenandoahConcurrentGC.hpp" @@ -55,7 +56,7 @@ ShenandoahGenerationalControlThread::ShenandoahGenerationalControlThread() : _heap(ShenandoahGenerationalHeap::heap()), _age_period(0) { shenandoah_assert_generational(); - set_name("Shenandoah Control Thread"); + set_name("ShenControl"); create_and_start(); } @@ -120,10 +121,11 @@ void ShenandoahGenerationalControlThread::check_for_request(ShenandoahGCRequest& assert(request.generation != nullptr, "Must know which generation to use for degenerated cycle"); } } else { - if (request.cause == GCCause::_shenandoah_concurrent_gc) { - // This is a regulator request. It is also possible that the regulator "canceled" an old mark, - // so we can clear that here. This clear operation will only clear the cancellation if it is - // a regulator request. + if (request.cause == GCCause::_shenandoah_concurrent_gc || ShenandoahCollectorPolicy::is_explicit_gc(request.cause)) { + // This is a regulator request or an explicit gc request. Note that an explicit gc request is allowed to + // "upgrade" a regulator request. It is possible that the regulator "canceled" an old mark, so we must + // clear that cancellation here or the explicit gc cycle will erroneously detect it as a cancellation. + // This clear operation will only clear the cancellation if it was set by regulator request. _heap->clear_cancellation(GCCause::_shenandoah_concurrent_gc); } request.generation = _requested_generation; @@ -186,7 +188,7 @@ ShenandoahGenerationalControlThread::GCMode ShenandoahGenerationalControlThread: global_heuristics->record_requested_gc(); if (ShenandoahCollectorPolicy::should_run_full_gc(request.cause)) { - return stw_full;; + return stw_full; } else { // Unload and clean up everything. Note that this is an _explicit_ request and so does not use // the same `should_unload_classes` call as the regulator's concurrent gc request. @@ -254,7 +256,8 @@ void ShenandoahGenerationalControlThread::run_gc_cycle(const ShenandoahGCRequest GCIdMark gc_id_mark; - if (gc_mode() != servicing_old) { + if ((gc_mode() != servicing_old) && (gc_mode() != stw_degenerated)) { + // If mode is stw_degenerated, count bytes allocated from the start of the conc GC that experienced alloc failure. _heap->reset_bytes_allocated_since_gc_start(); } @@ -271,6 +274,12 @@ void ShenandoahGenerationalControlThread::run_gc_cycle(const ShenandoahGCRequest // Cannot uncommit bitmap slices during concurrent reset ShenandoahNoUncommitMark forbid_region_uncommit(_heap); + // When a whitebox full GC is requested, set the tenuring threshold to zero + // so that all young objects are promoted to old. This ensures that tests + // using WB.fullGC() to promote objects to old gen will not loop forever. + ShenandoahTenuringOverride tenuring_override(request.cause == GCCause::_wb_full_gc, + _heap->age_census()); + _heap->print_before_gc(); switch (gc_mode()) { case concurrent_normal: { @@ -413,12 +422,11 @@ void ShenandoahGenerationalControlThread::service_concurrent_old_cycle(const She } // Coalescing threads completed and nothing was cancelled. it is safe to transition from this state. - old_generation->transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP); + old_generation->transition_to(ShenandoahOldGeneration::IDLE); return; } - case ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP: - old_generation->transition_to(ShenandoahOldGeneration::BOOTSTRAPPING); - case ShenandoahOldGeneration::BOOTSTRAPPING: { + case ShenandoahOldGeneration::IDLE: + old_generation->transition_to(ShenandoahOldGeneration::MARKING); // Configure the young generation's concurrent mark to put objects in // old regions into the concurrent mark queues associated with the old // generation. The young cycle will run as normal except that rather than @@ -441,8 +449,6 @@ void ShenandoahGenerationalControlThread::service_concurrent_old_cycle(const She // and init mark for the concurrent mark. All of that work will have been // done by the bootstrapping young cycle. set_gc_mode(servicing_old); - old_generation->transition_to(ShenandoahOldGeneration::MARKING); - } case ShenandoahOldGeneration::MARKING: { ShenandoahGCSession session(request.cause, old_generation); bool marking_complete = resume_concurrent_old_cycle(old_generation, request.cause); @@ -624,8 +630,8 @@ void ShenandoahGenerationalControlThread::service_stw_degenerated_cycle(const Sh assert(_degen_point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set"); _heap->increment_total_collections(false); - ShenandoahGCSession session(request.cause, request.generation); - + ShenandoahGCSession session(request.cause, request.generation, true, + _degen_point == ShenandoahGC::ShenandoahDegenPoint::_degenerated_outside_cycle); ShenandoahDegenGC gc(_degen_point, request.generation); gc.collect(request.cause); _degen_point = ShenandoahGC::_degenerated_unset; @@ -634,12 +640,6 @@ void ShenandoahGenerationalControlThread::service_stw_degenerated_cycle(const Sh if (request.generation->is_global()) { assert(_heap->old_generation()->task_queues()->is_empty(), "Unexpected old generation marking tasks"); assert(_heap->global_generation()->task_queues()->is_empty(), "Unexpected global generation marking tasks"); - } else { - assert(request.generation->is_young(), "Expected degenerated young cycle, if not global."); - ShenandoahOldGeneration* old = _heap->old_generation(); - if (old->is_bootstrapping()) { - old->transition_to(ShenandoahOldGeneration::MARKING); - } } } @@ -671,7 +671,7 @@ bool ShenandoahGenerationalControlThread::request_concurrent_gc(ShenandoahGenera // Cancel the old GC and wait for the control thread to start servicing the new request. log_info(gc)("Preempting old generation mark to allow %s GC", generation->name()); while (gc_mode() == servicing_old) { - ShenandoahHeap::heap()->cancel_gc(GCCause::_shenandoah_concurrent_gc); + _heap->cancel_gc(GCCause::_shenandoah_concurrent_gc); notify_control_thread(ml, GCCause::_shenandoah_concurrent_gc, generation); ml.wait(); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp index 6912750378e..ca15c6db443 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp @@ -133,3 +133,4 @@ void ShenandoahGenerationalEvacuationTask::evacuate_and_promote_regions() { } } } + diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp index 2c2e5533c01..1694121b955 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp @@ -90,11 +90,23 @@ ShenandoahGenerationalHeap::ShenandoahGenerationalHeap(ShenandoahCollectorPolicy assert(is_aligned(_max_plab_size, CardTable::card_size_in_words()), "max_plab_size must be aligned"); } +void ShenandoahGenerationalHeap::initialize_generations() { + ShenandoahHeap::initialize_generations(); + _young_generation->post_initialize(this); + _old_generation->post_initialize(this); +} + void ShenandoahGenerationalHeap::post_initialize() { ShenandoahHeap::post_initialize(); _age_census = new ShenandoahAgeCensus(); } +void ShenandoahGenerationalHeap::post_initialize_heuristics() { + ShenandoahHeap::post_initialize_heuristics(); + _young_generation->post_initialize_heuristics(); + _old_generation->post_initialize_heuristics(); +} + void ShenandoahGenerationalHeap::print_init_logger() const { ShenandoahGenerationalInitLogger logger; logger.print_all(); @@ -110,12 +122,6 @@ void ShenandoahGenerationalHeap::initialize_heuristics() { _old_generation->initialize_heuristics(mode()); } -void ShenandoahGenerationalHeap::post_initialize_heuristics() { - ShenandoahHeap::post_initialize_heuristics(); - _young_generation->post_initialize(this); - _old_generation->post_initialize(this); -} - void ShenandoahGenerationalHeap::initialize_serviceability() { assert(mode()->is_generational(), "Only for the generational mode"); _young_gen_memory_pool = new ShenandoahYoungGenMemoryPool(this); @@ -152,6 +158,10 @@ void ShenandoahGenerationalHeap::stop() { regulator_thread()->stop(); } +void ShenandoahGenerationalHeap::start_idle_span() { + young_generation()->heuristics()->start_idle_span(); +} + bool ShenandoahGenerationalHeap::requires_barriers(stackChunkOop obj) const { if (is_idle()) { return false; @@ -259,27 +269,25 @@ oop ShenandoahGenerationalHeap::try_evacuate_object(oop p, Thread* thread, uint break; } case OLD_GENERATION: { - PLAB* plab = ShenandoahThreadLocalData::plab(thread); - if (plab != nullptr) { + ShenandoahPLAB* shenandoah_plab = ShenandoahThreadLocalData::shenandoah_plab(thread); + if (shenandoah_plab != nullptr) { has_plab = true; - copy = allocate_from_plab(thread, size, is_promotion); - if ((copy == nullptr) && (size < ShenandoahThreadLocalData::plab_size(thread)) && - ShenandoahThreadLocalData::plab_retries_enabled(thread)) { + copy = shenandoah_plab->allocate(size, is_promotion); + if (copy == nullptr && size < shenandoah_plab->desired_size() && shenandoah_plab->retries_enabled()) { // PLAB allocation failed because we are bumping up against the limit on old evacuation reserve or because // the requested object does not fit within the current plab but the plab still has an "abundance" of memory, // where abundance is defined as >= ShenGenHeap::plab_min_size(). In the former case, we try shrinking the // desired PLAB size to the minimum and retry PLAB allocation to avoid cascading of shared memory allocations. // Shrinking the desired PLAB size may allow us to eke out a small PLAB while staying beneath evacuation reserve. - if (plab->words_remaining() < plab_min_size()) { - ShenandoahThreadLocalData::set_plab_size(thread, plab_min_size()); - copy = allocate_from_plab(thread, size, is_promotion); - // If we still get nullptr, we'll try a shared allocation below. + if (shenandoah_plab->plab()->words_remaining() < plab_min_size()) { + shenandoah_plab->set_desired_size(plab_min_size()); + copy = shenandoah_plab->allocate(size, is_promotion); if (copy == nullptr) { - // If retry fails, don't continue to retry until we have success (probably in next GC pass) - ShenandoahThreadLocalData::disable_plab_retries(thread); + // If we still get nullptr, we'll try a shared allocation below. + // However, don't continue to retry until we have success (probably in next GC pass) + shenandoah_plab->disable_retries(); } } - // else, copy still equals nullptr. this causes shared allocation below, preserving this plab for future needs. } } break; @@ -338,26 +346,23 @@ oop ShenandoahGenerationalHeap::try_evacuate_object(oop p, Thread* thread, uint increase_object_age(copy_val, from_region_age + 1); } + // Relativize stack chunks before publishing the copy. After the forwarding CAS, + // mutators can see the copy and thaw it via the fast path if flags == 0. We must + // relativize derived pointers and set gc_mode before that happens. Skip if the + // copy's mark word is already a forwarding pointer (another thread won the race + // and overwrote the original's header before we copied it). + if (!ShenandoahForwarding::is_forwarded(copy_val)) { + ContinuationGCSupport::relativize_stack_chunk(copy_val); + } + // Try to install the new forwarding pointer. oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val); if (result == copy_val) { // Successfully evacuated. Our copy is now the public one! - - // This is necessary for virtual thread support. This uses the mark word without - // considering that it may now be a forwarding pointer (and could therefore crash). - // Secondarily, we do not want to spend cycles relativizing stack chunks for oops - // that lost the evacuation race (and will therefore not become visible). It is - // safe to do this on the public copy (this is also done during concurrent mark). - ContinuationGCSupport::relativize_stack_chunk(copy_val); - if (ShenandoahEvacTracking) { // Record that the evacuation succeeded evac_tracker()->end_evacuation(thread, size * HeapWordSize, FROM_GENERATION, TO_GENERATION); } - - if (TO_GENERATION == OLD_GENERATION) { - old_generation()->handle_evacuation(copy, size); - } } else { // Failed to evacuate. We need to deal with the object that is left behind. Since this // new allocation is certainly after TAMS, it will be considered live in the next cycle. @@ -374,9 +379,9 @@ oop ShenandoahGenerationalHeap::try_evacuate_object(oop p, Thread* thread, uint break; } case OLD_GENERATION: { - ShenandoahThreadLocalData::plab(thread)->undo_allocation(copy, size); + ShenandoahThreadLocalData::shenandoah_plab(thread)->plab()->undo_allocation(copy, size); if (is_promotion) { - ShenandoahThreadLocalData::subtract_from_plab_promoted(thread, size * HeapWordSize); + ShenandoahThreadLocalData::shenandoah_plab(thread)->subtract_from_promoted(size * HeapWordSize); } break; } @@ -401,184 +406,14 @@ template oop ShenandoahGenerationalHeap::try_evacuate_object(oop p, Thread* thread, uint from_region_age); template oop ShenandoahGenerationalHeap::try_evacuate_object(oop p, Thread* thread, uint from_region_age); -inline HeapWord* ShenandoahGenerationalHeap::allocate_from_plab(Thread* thread, size_t size, bool is_promotion) { - assert(UseTLAB, "TLABs should be enabled"); - - PLAB* plab = ShenandoahThreadLocalData::plab(thread); - HeapWord* obj; - - if (plab == nullptr) { - assert(!thread->is_Java_thread() && !thread->is_Worker_thread(), "Performance: thread should have PLAB: %s", thread->name()); - // No PLABs in this thread, fallback to shared allocation - return nullptr; - } else if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) { - return nullptr; - } - // if plab->word_size() <= 0, thread's plab not yet initialized for this pass, so allow_plab_promotions() is not trustworthy - obj = plab->allocate(size); - if ((obj == nullptr) && (plab->words_remaining() < plab_min_size())) { - // allocate_from_plab_slow will establish allow_plab_promotions(thread) for future invocations - obj = allocate_from_plab_slow(thread, size, is_promotion); - } - // if plab->words_remaining() >= ShenGenHeap::heap()->plab_min_size(), just return nullptr so we can use a shared allocation - if (obj == nullptr) { - return nullptr; - } - - if (is_promotion) { - ShenandoahThreadLocalData::add_to_plab_promoted(thread, size * HeapWordSize); - } - return obj; -} - -// Establish a new PLAB and allocate size HeapWords within it. -HeapWord* ShenandoahGenerationalHeap::allocate_from_plab_slow(Thread* thread, size_t size, bool is_promotion) { - assert(mode()->is_generational(), "PLABs only relevant to generational GC"); - - const size_t plab_min_size = this->plab_min_size(); - // PLABs are aligned to card boundaries to avoid synchronization with concurrent - // allocations in other PLABs. - const size_t min_size = (size > plab_min_size)? align_up(size, CardTable::card_size_in_words()): plab_min_size; - - // Figure out size of new PLAB, using value determined at last refill. - size_t cur_size = ShenandoahThreadLocalData::plab_size(thread); - if (cur_size == 0) { - cur_size = plab_min_size; - } - - // Expand aggressively, doubling at each refill in this epoch, ceiling at plab_max_size() - const size_t future_size = MIN2(cur_size * 2, plab_max_size()); - // Doubling, starting at a card-multiple, should give us a card-multiple. (Ceiling and floor - // are card multiples.) - assert(is_aligned(future_size, CardTable::card_size_in_words()), "Card multiple by construction, future_size: %zu" - ", card_size: %u, cur_size: %zu, max: %zu", - future_size, CardTable::card_size_in_words(), cur_size, plab_max_size()); - - // Record new heuristic value even if we take any shortcut. This captures - // the case when moderately-sized objects always take a shortcut. At some point, - // heuristics should catch up with them. Note that the requested cur_size may - // not be honored, but we remember that this is the preferred size. - log_debug(gc, plab)("Set next PLAB refill size: %zu bytes", future_size * HeapWordSize); - ShenandoahThreadLocalData::set_plab_size(thread, future_size); - - if (cur_size < size) { - // The PLAB to be allocated is still not large enough to hold the object. Fall back to shared allocation. - // This avoids retiring perfectly good PLABs in order to represent a single large object allocation. - log_debug(gc, plab)("Current PLAB size (%zu) is too small for %zu", cur_size * HeapWordSize, size * HeapWordSize); - return nullptr; - } - - // Retire current PLAB, and allocate a new one. - PLAB* plab = ShenandoahThreadLocalData::plab(thread); - if (plab->words_remaining() < plab_min_size) { - // Retire current PLAB. This takes care of any PLAB book-keeping. - // retire_plab() registers the remnant filler object with the remembered set scanner without a lock. - // Since PLABs are card-aligned, concurrent registrations in other PLABs don't interfere. - retire_plab(plab, thread); - - size_t actual_size = 0; - HeapWord* plab_buf = allocate_new_plab(min_size, cur_size, &actual_size); - if (plab_buf == nullptr) { - if (min_size == plab_min_size) { - // Disable PLAB promotions for this thread because we cannot even allocate a minimal PLAB. This allows us - // to fail faster on subsequent promotion attempts. - ShenandoahThreadLocalData::disable_plab_promotions(thread); - } - return nullptr; - } else { - ShenandoahThreadLocalData::enable_plab_retries(thread); - } - // Since the allocated PLAB may have been down-sized for alignment, plab->allocate(size) below may still fail. - if (ZeroTLAB) { - // ... and clear it. - Copy::zero_to_words(plab_buf, actual_size); - } else { - // ...and zap just allocated object. -#ifdef ASSERT - // Skip mangling the space corresponding to the object header to - // ensure that the returned space is not considered parsable by - // any concurrent GC thread. - size_t hdr_size = oopDesc::header_size(); - Copy::fill_to_words(plab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal); -#endif // ASSERT - } - assert(is_aligned(actual_size, CardTable::card_size_in_words()), "Align by design"); - plab->set_buf(plab_buf, actual_size); - if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) { - return nullptr; - } - return plab->allocate(size); - } else { - // If there's still at least min_size() words available within the current plab, don't retire it. Let's nibble - // away on this plab as long as we can. Meanwhile, return nullptr to force this particular allocation request - // to be satisfied with a shared allocation. By packing more promotions into the previously allocated PLAB, we - // reduce the likelihood of evacuation failures, and we reduce the need for downsizing our PLABs. - return nullptr; - } -} - -HeapWord* ShenandoahGenerationalHeap::allocate_new_plab(size_t min_size, size_t word_size, size_t* actual_size) { - // Align requested sizes to card-sized multiples. Align down so that we don't violate max size of TLAB. - assert(is_aligned(min_size, CardTable::card_size_in_words()), "Align by design"); - assert(word_size >= min_size, "Requested PLAB is too small"); - - ShenandoahAllocRequest req = ShenandoahAllocRequest::for_plab(min_size, word_size); - // Note that allocate_memory() sets a thread-local flag to prohibit further promotions by this thread - // if we are at risk of infringing on the old-gen evacuation budget. - HeapWord* res = allocate_memory(req); - if (res != nullptr) { - *actual_size = req.actual_size(); - } else { - *actual_size = 0; - } - assert(is_aligned(res, CardTable::card_size_in_words()), "Align by design"); - return res; -} - -void ShenandoahGenerationalHeap::retire_plab(PLAB* plab, Thread* thread) { - // We don't enforce limits on plab evacuations. We let it consume all available old-gen memory in order to reduce - // probability of an evacuation failure. We do enforce limits on promotion, to make sure that excessive promotion - // does not result in an old-gen evacuation failure. Note that a failed promotion is relatively harmless. Any - // object that fails to promote in the current cycle will be eligible for promotion in a subsequent cycle. - - // When the plab was instantiated, its entirety was treated as if the entire buffer was going to be dedicated to - // promotions. Now that we are retiring the buffer, we adjust for the reality that the plab is not entirely promotions. - // 1. Some of the plab may have been dedicated to evacuations. - // 2. Some of the plab may have been abandoned due to waste (at the end of the plab). - size_t not_promoted = - ShenandoahThreadLocalData::get_plab_actual_size(thread) - ShenandoahThreadLocalData::get_plab_promoted(thread); - ShenandoahThreadLocalData::reset_plab_promoted(thread); - ShenandoahThreadLocalData::set_plab_actual_size(thread, 0); - if (not_promoted > 0) { - log_debug(gc, plab)("Retire PLAB, unexpend unpromoted: %zu", not_promoted * HeapWordSize); - old_generation()->unexpend_promoted(not_promoted); - } - const size_t original_waste = plab->waste(); - HeapWord* const top = plab->top(); - - // plab->retire() overwrites unused memory between plab->top() and plab->hard_end() with a dummy object to make memory parsable. - // It adds the size of this unused memory, in words, to plab->waste(). - plab->retire(); - if (top != nullptr && plab->waste() > original_waste && is_in_old(top)) { - // If retiring the plab created a filler object, then we need to register it with our card scanner so it can - // safely walk the region backing the plab. - log_debug(gc, plab)("retire_plab() is registering remnant of size %zu at " PTR_FORMAT, - (plab->waste() - original_waste) * HeapWordSize, p2i(top)); - // No lock is necessary because the PLAB memory is aligned on card boundaries. - old_generation()->card_scan()->register_object_without_lock(top); - } -} - -void ShenandoahGenerationalHeap::retire_plab(PLAB* plab) { - Thread* thread = Thread::current(); - retire_plab(plab, thread); -} - +// Call this function at the end of a GC cycle in order to establish proper sizes of young and old reserves, +// setting the old-generation balance so that GC can perform the anticipated evacuations. +// // Make sure old-generation is large enough, but no larger than is necessary, to hold mixed evacuations // and promotions, if we anticipate either. Any deficit is provided by the young generation, subject to // mutator_xfer_limit, and any surplus is transferred to the young generation. mutator_xfer_limit is -// the maximum we're able to transfer from young to old. This is called at the end of GC, as we prepare -// for the idle span that precedes the next GC. +// the maximum we're able to transfer from young to old. The mutator_xfer_limit constrains the transfer +// of memory from young to old. It does not limit young reserves. void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_xfer_limit, size_t old_trashed_regions, size_t young_trashed_regions) { shenandoah_assert_heaplocked(); @@ -628,9 +463,19 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x bound_on_old_reserve: MIN2((young_reserve * ShenandoahOldEvacPercent) / (100 - ShenandoahOldEvacPercent), bound_on_old_reserve)); + assert(mutator_xfer_limit <= young_available, + "Cannot transfer (%zu) memory that is not available (%zu)", mutator_xfer_limit, young_available); + if (young_reserve > young_available) { young_reserve = young_available; } + // We allow young_reserve to exceed mutator_xfer_limit. Essentially, this means the GC is already behind the pace + // of mutator allocations, and we'll need to trigger the next GC as soon as possible. + if (mutator_xfer_limit > young_reserve) { + mutator_xfer_limit -= young_reserve; + } else { + mutator_xfer_limit = 0; + } // Decide how much old space we should reserve for a mixed collection size_t proposed_reserve_for_mixed = 0; @@ -1150,9 +995,7 @@ void ShenandoahGenerationalHeap::complete_degenerated_cycle() { coalesce_and_fill_old_regions(false); } - log_info(gc, cset)("Degenerated cycle complete, promotions reserved: %zu, promotions expended: %zu, failed count: %zu, failed bytes: %zu", - old_generation()->get_promoted_reserve(), old_generation()->get_promoted_expended(), - old_generation()->get_promotion_failed_count(), old_generation()->get_promotion_failed_words() * HeapWordSize); + old_generation()->maybe_log_promotion_failure_stats(false); } void ShenandoahGenerationalHeap::complete_concurrent_cycle() { @@ -1167,9 +1010,7 @@ void ShenandoahGenerationalHeap::complete_concurrent_cycle() { entry_global_coalesce_and_fill(); } - log_info(gc, cset)("Concurrent cycle complete, promotions reserved: %zu, promotions expended: %zu, failed count: %zu, failed bytes: %zu", - old_generation()->get_promoted_reserve(), old_generation()->get_promoted_expended(), - old_generation()->get_promotion_failed_count(), old_generation()->get_promotion_failed_words() * HeapWordSize); + old_generation()->maybe_log_promotion_failure_stats(true); } void ShenandoahGenerationalHeap::entry_global_coalesce_and_fill() { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp index 719bae52a83..d6893dc011e 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp @@ -40,6 +40,7 @@ class ShenandoahGenerationalHeap : public ShenandoahHeap { public: explicit ShenandoahGenerationalHeap(ShenandoahCollectorPolicy* policy); void post_initialize() override; + void initialize_generations() override; void initialize_heuristics() override; void post_initialize_heuristics() override; @@ -82,6 +83,8 @@ public: inline bool is_tenurable(const ShenandoahHeapRegion* r) const; + void start_idle_span() override; + // Ages regions that haven't been used for allocations in the current cycle. // Resets ages for regions that have been used for allocations. void update_region_ages(ShenandoahMarkingContext* ctx); @@ -99,9 +102,6 @@ public: size_t plab_min_size() const { return _min_plab_size; } size_t plab_max_size() const { return _max_plab_size; } - void retire_plab(PLAB* plab); - void retire_plab(PLAB* plab, Thread* thread); - // ---------- Update References // // In the generational mode, we will use this function for young, mixed, and global collections. @@ -110,10 +110,6 @@ public: void final_update_refs_update_region_states() override; private: - HeapWord* allocate_from_plab(Thread* thread, size_t size, bool is_promotion); - HeapWord* allocate_from_plab_slow(Thread* thread, size_t size, bool is_promotion); - HeapWord* allocate_new_plab(size_t min_size, size_t word_size, size_t* actual_size); - const size_t _min_plab_size; const size_t _max_plab_size; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index d78bdae6a51..4b01ea1cd52 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -261,7 +261,7 @@ jint ShenandoahHeap::initialize() { // // Worker threads must be initialized after the barrier is configured // - _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers); + _workers = new ShenandoahWorkerThreads("ShenWorker", _max_workers); if (_workers == nullptr) { vm_exit_during_initialization("Failed necessary allocation."); } else { @@ -435,7 +435,7 @@ jint ShenandoahHeap::initialize() { } _free_set = new ShenandoahFreeSet(this, _num_regions); - post_initialize_heuristics(); + initialize_generations(); // We are initializing free set. We ignore cset region tallies. size_t young_trashed_regions, old_trashed_regions, first_old, last_old, num_old; @@ -492,16 +492,17 @@ jint ShenandoahHeap::initialize() { _phase_timings = new ShenandoahPhaseTimings(max_workers()); ShenandoahCodeRoots::initialize(); + // Initialization of controller makes use of variables established by initialize_heuristics. initialize_controller(); + // Certain initialization of heuristics must be deferred until after controller is initialized. + post_initialize_heuristics(); + start_idle_span(); if (ShenandoahUncommit) { _uncommit_thread = new ShenandoahUncommitThread(this); } - print_init_logger(); - FullGCForwarding::initialize(_heap_region); - return JNI_OK; } @@ -545,10 +546,6 @@ void ShenandoahHeap::initialize_heuristics() { _global_generation->initialize_heuristics(mode()); } -void ShenandoahHeap::post_initialize_heuristics() { - _global_generation->post_initialize(this); -} - #ifdef _MSC_VER #pragma warning( push ) #pragma warning( disable:4355 ) // 'this' : used in base member initializer list @@ -690,6 +687,11 @@ public: } }; +void ShenandoahHeap::initialize_generations() { + _global_generation->post_initialize(this); +} + +// We do not call this explicitly It is called by Hotspot infrastructure. void ShenandoahHeap::post_initialize() { CollectedHeap::post_initialize(); @@ -717,6 +719,10 @@ void ShenandoahHeap::post_initialize() { JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers();) } +void ShenandoahHeap::post_initialize_heuristics() { + _global_generation->post_initialize_heuristics(); +} + ShenandoahHeuristics* ShenandoahHeap::heuristics() { return _global_generation->heuristics(); } @@ -760,6 +766,7 @@ void ShenandoahHeap::set_soft_max_capacity(size_t v) { "Should be in bounds: %zu <= %zu <= %zu", min_capacity(), v, max_capacity()); _soft_max_size.store_relaxed(v); + heuristics()->compute_headroom_adjustment(); } size_t ShenandoahHeap::min_capacity() const { @@ -835,6 +842,10 @@ void ShenandoahHeap::notify_heap_changed() { _heap_changed.try_set(); } +void ShenandoahHeap::start_idle_span() { + heuristics()->start_idle_span(); +} + void ShenandoahHeap::set_forced_counters_update(bool value) { monitoring_support()->set_forced_counters_update(value); } @@ -1171,20 +1182,20 @@ public: } if (ShenandoahHeap::heap()->mode()->is_generational()) { - PLAB* plab = ShenandoahThreadLocalData::plab(thread); - assert(plab != nullptr, "PLAB should be initialized for %s", thread->name()); + ShenandoahPLAB* shenandoah_plab = ShenandoahThreadLocalData::shenandoah_plab(thread); + assert(shenandoah_plab != nullptr, "PLAB should be initialized for %s", thread->name()); // There are two reasons to retire all plabs between old-gen evacuation passes. // 1. We need to make the plab memory parsable by remembered-set scanning. // 2. We need to establish a trustworthy UpdateWaterMark value within each old-gen heap region - ShenandoahGenerationalHeap::heap()->retire_plab(plab, thread); + shenandoah_plab->retire(); // Re-enable promotions for the next evacuation phase. - ShenandoahThreadLocalData::enable_plab_promotions(thread); + shenandoah_plab->enable_promotions(); // Reset the fill size for next evacuation phase. - if (_resize && ShenandoahThreadLocalData::plab_size(thread) > 0) { - ShenandoahThreadLocalData::set_plab_size(thread, 0); + if (_resize && shenandoah_plab->desired_size() > 0) { + shenandoah_plab->set_desired_size(0); } } } @@ -1342,12 +1353,21 @@ oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapReg // Copy the object: Copy::aligned_disjoint_words(cast_from_oop(p), copy, size); - // Try to install the new forwarding pointer. oop copy_val = cast_to_oop(copy); + + // Relativize stack chunks before publishing the copy. After the forwarding CAS, + // mutators can see the copy and thaw it via the fast path if flags == 0. We must + // relativize derived pointers and set gc_mode before that happens. Skip if the + // copy's mark word is already a forwarding pointer (another thread won the race + // and overwrote the original's header before we copied it). + if (!ShenandoahForwarding::is_forwarded(copy_val)) { + ContinuationGCSupport::relativize_stack_chunk(copy_val); + } + + // Try to install the new forwarding pointer. oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val); if (result == copy_val) { // Successfully evacuated. Our copy is now the public one! - ContinuationGCSupport::relativize_stack_chunk(copy_val); shenandoah_assert_correct(nullptr, copy_val); if (ShenandoahEvacTracking) { evac_tracker()->end_evacuation(thread, size * HeapWordSize, from_region->affiliation(), target_gen); @@ -1454,9 +1474,9 @@ public: assert(gclab->words_remaining() == 0, "GCLAB should not need retirement"); if (ShenandoahHeap::heap()->mode()->is_generational()) { - PLAB* plab = ShenandoahThreadLocalData::plab(thread); - assert(plab != nullptr, "PLAB should be initialized for %s", thread->name()); - assert(plab->words_remaining() == 0, "PLAB should not need retirement"); + ShenandoahPLAB* shenandoah_plab = ShenandoahThreadLocalData::shenandoah_plab(thread); + assert(shenandoah_plab != nullptr, "PLAB should be initialized for %s", thread->name()); + assert(shenandoah_plab->plab()->words_remaining() == 0, "PLAB should not need retirement"); } } }; @@ -1630,7 +1650,8 @@ void ShenandoahHeap::set_active_generation(ShenandoahGeneration* generation) { _active_generation = generation; } -void ShenandoahHeap::on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation) { +void ShenandoahHeap::on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation, + bool is_degenerated, bool is_out_of_cycle) { shenandoah_policy()->record_collection_cause(cause); const GCCause::Cause current = gc_cause(); @@ -1639,7 +1660,11 @@ void ShenandoahHeap::on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* set_gc_cause(cause); - generation->heuristics()->record_cycle_start(); + if (is_degenerated) { + generation->heuristics()->record_degenerated_cycle_start(is_out_of_cycle); + } else { + generation->heuristics()->record_cycle_start(); + } } void ShenandoahHeap::on_cycle_end(ShenandoahGeneration* generation) { @@ -1935,6 +1960,26 @@ void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const } } +class ShenandoahHeapRegionIteratorTask : public WorkerTask { +private: + ShenandoahRegionIterator _regions; + ShenandoahHeapRegionClosure* _closure; + +public: + ShenandoahHeapRegionIteratorTask(ShenandoahHeapRegionClosure* closure) + : WorkerTask("Shenandoah Heap Region Iterator") + , _closure(closure) {} + + void work(uint worker_id) override { + ShenandoahParallelWorkerSession worker_session(worker_id); + ShenandoahHeapRegion* region = _regions.next(); + while (region != nullptr) { + _closure->heap_region_do(region); + region = _regions.next(); + } + } +}; + class ShenandoahParallelHeapRegionTask : public WorkerTask { private: ShenandoahHeap* const _heap; @@ -1991,6 +2036,11 @@ void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* b } } +void ShenandoahHeap::heap_region_iterator(ShenandoahHeapRegionClosure* closure) const { + ShenandoahHeapRegionIteratorTask task(closure); + workers()->run_task(&task); +} + class ShenandoahRendezvousHandshakeClosure : public HandshakeClosure { public: inline ShenandoahRendezvousHandshakeClosure(const char* name) : HandshakeClosure(name) {} @@ -2689,10 +2739,7 @@ GrowableArray ShenandoahHeap::memory_pools() { } MemoryUsage ShenandoahHeap::memory_usage() { - assert(_initial_size <= ShenandoahHeap::heap()->max_capacity(), "sanity"); - assert(used() <= ShenandoahHeap::heap()->max_capacity(), "sanity"); - assert(committed() <= ShenandoahHeap::heap()->max_capacity(), "sanity"); - return MemoryUsage(_initial_size, used(), committed(), max_capacity()); + return shenandoah_memory_usage(_initial_size, used(), committed(), max_capacity()); } ShenandoahRegionIterator::ShenandoahRegionIterator() : diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp index 85ad339469d..bed26a093d0 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp @@ -195,6 +195,7 @@ public: ShenandoahHeap(ShenandoahCollectorPolicy* policy); jint initialize() override; void post_initialize() override; + virtual void initialize_generations(); void initialize_mode(); virtual void initialize_heuristics(); virtual void post_initialize_heuristics(); @@ -297,6 +298,7 @@ public: void heap_region_iterate(ShenandoahHeapRegionClosure* blk) const; void parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const; + void heap_region_iterator(ShenandoahHeapRegionClosure* blk) const; inline ShenandoahMmuTracker* mmu_tracker() { return &_mmu_tracker; }; @@ -393,6 +395,8 @@ public: return _heap_changed.try_unset(); } + virtual void start_idle_span(); + void set_concurrent_young_mark_in_progress(bool in_progress); void set_concurrent_old_mark_in_progress(bool in_progress); void set_evacuation_in_progress(bool in_progress); @@ -560,7 +564,7 @@ public: return _evac_tracker; } - void on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation); + void on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation, bool is_degenerated, bool is_out_of_cycle); void on_cycle_end(ShenandoahGeneration* generation); ShenandoahVerifier* verifier(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp index 02f2beaf4e0..6d77cccaa6a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp @@ -344,6 +344,8 @@ uint ShenandoahHeap::get_object_age(oop obj) { } if (w.has_monitor()) { w = w.monitor()->header(); + } else { + assert(!w.has_displaced_mark_helper(), "Mark word should not be displaced"); } assert(w.age() <= markWord::max_age, "Impossible!"); return w.age(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp index afc6b24e168..c031569b7c6 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp @@ -67,6 +67,7 @@ ShenandoahHeapRegion::ShenandoahHeapRegion(HeapWord* start, size_t index, bool c _new_top(nullptr), _empty_time(os::elapsedTime()), _top_before_promoted(nullptr), + _top_at_evac_start(start), _state(committed ? _empty_committed : _empty_uncommitted), _top(start), _tlab_allocs(0), @@ -565,25 +566,34 @@ void ShenandoahHeapRegion::recycle_internal() { assert(_recycling.is_set() && is_trash(), "Wrong state"); ShenandoahHeap* heap = ShenandoahHeap::heap(); + _top_at_evac_start = _bottom; _mixed_candidate_garbage_words = 0; - set_top(bottom()); clear_live_data(); reset_alloc_metadata(); heap->marking_context()->reset_top_at_mark_start(this); set_update_watermark(bottom()); + if (is_old()) { + heap->old_generation()->clear_cards_for(this); + } + if (ZapUnusedHeapArea) { SpaceMangler::mangle_region(MemRegion(bottom(), end())); } - - make_empty(); + set_top(bottom()); set_affiliation(FREE); + + // Lastly, set region state to empty + make_empty(); } // Upon return, this region has been recycled. We try to recycle it. // We may fail if some other thread recycled it before we do. void ShenandoahHeapRegion::try_recycle_under_lock() { shenandoah_assert_heaplocked(); - if (is_trash() && _recycling.try_set()) { + if (!is_trash()) { + return; + } + if (_recycling.try_set()) { if (is_trash()) { // At freeset rebuild time, which precedes recycling of collection set, we treat all cset regions as // part of capacity, as empty, as fully available, and as unaffiliated. This provides short-lived optimism @@ -603,6 +613,7 @@ void ShenandoahHeapRegion::try_recycle_under_lock() { os::naked_yield(); } } + assert(!is_trash(), "Must not"); } } @@ -610,7 +621,10 @@ void ShenandoahHeapRegion::try_recycle_under_lock() { // some GC worker thread has taken responsibility to recycle the region, eventually. void ShenandoahHeapRegion::try_recycle() { shenandoah_assert_not_heaplocked(); - if (is_trash() && _recycling.try_set()) { + if (!is_trash()) { + return; + } + if (_recycling.try_set()) { // Double check region state after win the race to set recycling flag if (is_trash()) { // At freeset rebuild time, which precedes recycling of collection set, we treat all cset regions as @@ -834,7 +848,7 @@ void ShenandoahHeapRegion::set_state(RegionState to) { evt.set_to(to); evt.commit(); } - _state.store_relaxed(to); + _state.release_store(to); } void ShenandoahHeapRegion::record_pin() { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp index 3a0ac042f57..e27bbbb737d 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp @@ -218,7 +218,7 @@ public: bool is_alloc_allowed() const { auto cur_state = state(); return is_empty_state(cur_state) || cur_state == _regular || cur_state == _pinned; } bool is_stw_move_allowed() const { auto cur_state = state(); return cur_state == _regular || cur_state == _cset || (ShenandoahHumongousMoves && cur_state == _humongous_start); } - RegionState state() const { return _state.load_relaxed(); } + RegionState state() const { return _state.load_acquire(); } int state_ordinal() const { return region_state_to_ordinal(state()); } void record_pin(); @@ -246,6 +246,7 @@ private: double _empty_time; HeapWord* _top_before_promoted; + HeapWord* _top_at_evac_start; // Seldom updated fields Atomic _state; @@ -365,12 +366,15 @@ public: } // Returns true iff this region was promoted in place subsequent to the most recent start of concurrent old marking. - inline bool was_promoted_in_place() { + bool was_promoted_in_place() const { return _promoted_in_place; } inline void restore_top_before_promote(); inline size_t garbage_before_padded_for_promote() const; + HeapWord* get_top_at_evac_start() const { return _top_at_evac_start; } + void record_top_at_evac_start() { _top_at_evac_start = _top; } + // If next available memory is not aligned on address that is multiple of alignment, fill the empty space // so that returned object is aligned on an address that is a multiple of alignment_in_bytes. Requested // size is in words. It is assumed that this->is_old(). A pad object is allocated, filled, and registered diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionClosures.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionClosures.cpp index 3c6fe1a3df1..7554a9c9a2c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionClosures.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionClosures.cpp @@ -80,6 +80,11 @@ void ShenandoahFinalMarkUpdateRegionStateClosure::heap_region_do(ShenandoahHeapR // Remember limit for updating refs. It's guaranteed that we get no // from-space-refs written from here on. r->set_update_watermark_at_safepoint(r->top()); + + if (r->is_old()) { + // Record where we need to start updating the remembered set + r->record_top_at_evac_start(); + } } else { assert(!r->has_live(), "Region %zu should have no live data", r->index()); assert(_ctx == nullptr || _ctx->top_at_mark_start(r) == r->top(), diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.cpp index aed3faef906..aaf152e2890 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.cpp @@ -32,7 +32,6 @@ #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" #include "logging/logStream.hpp" #include "memory/resourceArea.hpp" -#include "runtime/atomicAccess.hpp" #include "runtime/perfData.inline.hpp" #include "utilities/defaultStream.hpp" @@ -79,6 +78,7 @@ ShenandoahHeapRegionCounters::ShenandoahHeapRegionCounters() : ShenandoahHeapRegionCounters::~ShenandoahHeapRegionCounters() { if (_name_space != nullptr) FREE_C_HEAP_ARRAY(char, _name_space); + if (_regions_data != nullptr) FREE_C_HEAP_ARRAY(PerfVariable*, _regions_data); } void ShenandoahHeapRegionCounters::write_snapshot(PerfLongVariable** regions, @@ -106,8 +106,8 @@ void ShenandoahHeapRegionCounters::write_snapshot(PerfLongVariable** regions, void ShenandoahHeapRegionCounters::update() { if (ShenandoahRegionSampling) { jlong current = nanos_to_millis(os::javaTimeNanos()); - jlong last = _last_sample_millis; - if (current - last > ShenandoahRegionSamplingRate && AtomicAccess::cmpxchg(&_last_sample_millis, last, current) == last) { + jlong last = _last_sample_millis.load_relaxed(); + if (current - last > ShenandoahRegionSamplingRate && _last_sample_millis.compare_exchange(last, current) == last) { ShenandoahHeap* heap = ShenandoahHeap::heap(); _status->set_value(encode_heap_status(heap)); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.hpp index 508b40e49a8..d50188bf70c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.hpp @@ -28,6 +28,7 @@ #include "logging/logFileStreamOutput.hpp" #include "memory/allocation.hpp" +#include "runtime/atomic.hpp" /** * This provides the following in JVMStat: @@ -88,7 +89,7 @@ private: PerfLongVariable** _regions_data; PerfLongVariable* _timestamp; PerfLongVariable* _status; - volatile jlong _last_sample_millis; + Atomic _last_sample_millis; void write_snapshot(PerfLongVariable** regions, PerfLongVariable* ts, diff --git a/src/hotspot/share/gc/shenandoah/shenandoahInPlacePromoter.cpp b/src/hotspot/share/gc/shenandoah/shenandoahInPlacePromoter.cpp index 83f4217df83..153193fa3a3 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahInPlacePromoter.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahInPlacePromoter.cpp @@ -26,38 +26,11 @@ #include "gc/shenandoah/shenandoahFreeSet.hpp" #include "gc/shenandoah/shenandoahGenerationalHeap.inline.hpp" #include "gc/shenandoah/shenandoahHeap.inline.hpp" -#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" #include "gc/shenandoah/shenandoahInPlacePromoter.hpp" #include "gc/shenandoah/shenandoahMarkingContext.hpp" #include "gc/shenandoah/shenandoahOldGeneration.hpp" #include "gc/shenandoah/shenandoahYoungGeneration.hpp" -ShenandoahInPlacePromotionPlanner::RegionPromotions::RegionPromotions(ShenandoahFreeSet* free_set) - : _low_idx(free_set->max_regions()) - , _high_idx(-1) - , _regions(0) - , _bytes(0) - , _free_set(free_set) -{ -} - -void ShenandoahInPlacePromotionPlanner::RegionPromotions::increment(idx_t region_index, size_t remnant_bytes) { - if (region_index < _low_idx) { - _low_idx = region_index; - } - if (region_index > _high_idx) { - _high_idx = region_index; - } - _regions++; - _bytes += remnant_bytes; -} - -void ShenandoahInPlacePromotionPlanner::RegionPromotions::update_free_set(ShenandoahFreeSetPartitionId partition_id) const { - if (_regions > 0) { - _free_set->shrink_interval_if_range_modifies_either_boundary(partition_id, _low_idx, _high_idx, _regions); - } -} - ShenandoahInPlacePromotionPlanner::ShenandoahInPlacePromotionPlanner(const ShenandoahGenerationalHeap* heap) : _old_garbage_threshold(ShenandoahHeapRegion::region_size_bytes() * heap->old_generation()->heuristics()->get_old_garbage_threshold() / 100) , _pip_used_threshold(ShenandoahHeapRegion::region_size_bytes() * ShenandoahGenerationalMinPIPUsage / 100) @@ -75,6 +48,7 @@ bool ShenandoahInPlacePromotionPlanner::is_eligible(const ShenandoahHeapRegion* } void ShenandoahInPlacePromotionPlanner::prepare(ShenandoahHeapRegion* r) { + assert(!r->is_humongous_continuation(), "Should not call for humongous continuations"); HeapWord* tams = _marking_context->top_at_mark_start(r); HeapWord* original_top = r->top(); @@ -86,6 +60,20 @@ void ShenandoahInPlacePromotionPlanner::prepare(ShenandoahHeapRegion* r) { return; } + if (r->is_humongous_start()) { + if (const oop obj = cast_to_oop(r->bottom()); !obj->is_typeArray()) { + // Nothing else to do for humongous, we just update the stats and move on. The humongous regions + // themselves will be discovered and promoted by gc workers during evacuation. Note that humongous + // primitive arrays are not promoted. + const size_t num_regions = ShenandoahHeapRegion::required_regions(obj->size() * HeapWordSize); + for (size_t i = r->index(); i < r->index() + num_regions; i++) { + _pip_humongous_stats.update(_heap->get_region(i)); + } + } + return; + } + + _pip_regular_stats.update(r); // No allocations from this region have been made during concurrent mark. It meets all the criteria // for in-place-promotion. Though we only need the value of top when we fill the end of the region, // we use this field to indicate that this region should be promoted in place during the evacuation @@ -128,8 +116,14 @@ void ShenandoahInPlacePromotionPlanner::prepare(ShenandoahHeapRegion* r) { } } -void ShenandoahInPlacePromotionPlanner::update_free_set() const { +void ShenandoahInPlacePromotionPlanner::complete_planning() const { _heap->old_generation()->set_pad_for_promote_in_place(_pip_padding_bytes); + _heap->old_generation()->set_expected_humongous_region_promotions(_pip_humongous_stats.count); + _heap->old_generation()->set_expected_regular_region_promotions(_pip_regular_stats.count); + log_info(gc, ergo)("Planning to promote in place %zu humongous regions and %zu" + " regular regions, spanning a total of %zu used bytes", + _pip_humongous_stats.count, _pip_regular_stats.count, + _pip_humongous_stats.usage + _pip_regular_stats.usage); if (_mutator_regions._regions + _collector_regions._regions > 0) { _free_set->account_for_pip_regions(_mutator_regions._regions, _mutator_regions._bytes, @@ -244,6 +238,9 @@ void ShenandoahInPlacePromoter::promote(ShenandoahHeapRegion* region) const { // is_collector_free range. We'll add it to that range below. region->restore_top_before_promote(); + // We also need to record where those allocations begin so that we can later update the remembered set. + region->record_top_at_evac_start(); + assert(region->used() + pip_pad_bytes + pip_unpadded == region_size_bytes, "invariant"); // The update_watermark was likely established while we had the artificially high value of top. Make it sane now. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahInPlacePromoter.hpp b/src/hotspot/share/gc/shenandoah/shenandoahInPlacePromoter.hpp index 939107dd3ac..d2cb644a59e 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahInPlacePromoter.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahInPlacePromoter.hpp @@ -25,16 +25,21 @@ #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHINPLACEPROMOTER_HPP #define SHARE_GC_SHENANDOAH_SHENANDOAHINPLACEPROMOTER_HPP +#include "gc/shenandoah/shenandoahFreeSet.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" #include "gc/shenandoah/shenandoahSimpleBitMap.hpp" -class ShenandoahFreeSet; class ShenandoahMarkingContext; class ShenandoahGenerationalHeap; -class ShenandoahHeapRegion; +// This class is responsible for identifying regions that can be +// promoted in place. It also prepares these regions by preventing +// them from being used for allocations. Finally, it notifies the +// freeset which regions are to be promoted in place. class ShenandoahInPlacePromotionPlanner { using idx_t = ShenandoahSimpleBitMap::idx_t; + // Used to inform free set of regions being promoted struct RegionPromotions { idx_t _low_idx; idx_t _high_idx; @@ -42,9 +47,47 @@ class ShenandoahInPlacePromotionPlanner { size_t _bytes; ShenandoahFreeSet* _free_set; - explicit RegionPromotions(ShenandoahFreeSet* free_set); - void increment(idx_t region_index, size_t remnant_bytes); - void update_free_set(ShenandoahFreeSetPartitionId partition_id) const; + explicit RegionPromotions(ShenandoahFreeSet* free_set) + : _low_idx(free_set->max_regions()) + , _high_idx(-1) + , _regions(0) + , _bytes(0) + , _free_set(free_set) + { + } + + void increment(idx_t region_index, size_t remnant_bytes) { + if (region_index < _low_idx) { + _low_idx = region_index; + } + if (region_index > _high_idx) { + _high_idx = region_index; + } + _regions++; + _bytes += remnant_bytes; + } + + void update_free_set(ShenandoahFreeSetPartitionId partition_id) const { + if (_regions > 0) { + _free_set->shrink_interval_if_range_modifies_either_boundary(partition_id, _low_idx, _high_idx, _regions); + } + } + }; + + // Used to track metrics about the regions being promoted in place + struct RegionPromotionStats { + size_t count; + size_t usage; + size_t free; + size_t garbage; + + RegionPromotionStats() : count(0), usage(0), free(0), garbage(0) {} + void update(ShenandoahHeapRegion* region) { + count++; + usage += region->used(); + free += region->free(); + garbage += region->garbage(); + } }; const size_t _old_garbage_threshold; @@ -60,6 +103,11 @@ class ShenandoahInPlacePromotionPlanner { // Tracks the padding of space above top in regions eligible for promotion in place size_t _pip_padding_bytes; + + // Tracks stats for in place promotions + RegionPromotionStats _pip_regular_stats; + RegionPromotionStats _pip_humongous_stats; + public: explicit ShenandoahInPlacePromotionPlanner(const ShenandoahGenerationalHeap* heap); @@ -69,12 +117,17 @@ public: // Prepares the region for promotion by moving top to the end to prevent allocations void prepare(ShenandoahHeapRegion* region); - // Notifies the free set of in place promotions - void update_free_set() const; + // Notifies the free set and old generation of in place promotions + void complete_planning() const; + + const RegionPromotionStats& regular_region_stats() const { return _pip_regular_stats; } + const RegionPromotionStats& humongous_region_stats() const { return _pip_humongous_stats; } size_t old_garbage_threshold() const { return _old_garbage_threshold; } }; +// For regions that have been selected and prepared for promotion, this class +// will perform the actual promotion. class ShenandoahInPlacePromoter { ShenandoahGenerationalHeap* _heap; public: @@ -89,3 +142,4 @@ private: }; #endif // SHARE_GC_SHENANDOAH_SHENANDOAHINPLACEPROMOTER_HPP + diff --git a/src/hotspot/share/gc/shenandoah/shenandoahLock.hpp b/src/hotspot/share/gc/shenandoah/shenandoahLock.hpp index 7c91df191e5..5041419b2c7 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahLock.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahLock.hpp @@ -38,15 +38,19 @@ private: shenandoah_padding(0); Atomic _state; shenandoah_padding(1); +#ifdef ASSERT Atomic _owner; shenandoah_padding(2); +#endif template void contended_lock_internal(JavaThread* java_thread); static void yield_or_sleep(int &yields); public: - ShenandoahLock() : _state(unlocked), _owner(nullptr) {}; + ShenandoahLock() : _state(unlocked) { + DEBUG_ONLY(_owner.store_relaxed(nullptr);) + }; void lock(bool allow_block_for_safepoint = false) { assert(_owner.load_relaxed() != Thread::current(), "reentrant locking attempt, would deadlock"); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMark.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahMark.inline.hpp index ba24e890769..0d42b95164b 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahMark.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMark.inline.hpp @@ -77,10 +77,9 @@ void ShenandoahMark::do_task(ShenandoahObjToScanQueue* q, T* cl, ShenandoahLiveD if (task->is_not_chunked()) { if (obj->is_instance()) { // Case 1: Normal oop, process as usual. - if (ContinuationGCSupport::relativize_stack_chunk(obj)) { - // Loom doesn't support mixing of weak marking and strong marking of - // stack chunks. - cl->set_weak(false); + if (obj->is_stackChunk()) { + // Loom doesn't support mixing of weak marking and strong marking of stack chunks. + cl->set_weak(false); } obj->oop_iterate(cl); @@ -118,13 +117,11 @@ inline void ShenandoahMark::count_liveness(ShenandoahLiveData* live_data, oop ob // Age census for objects in the young generation if (GENERATION == YOUNG || (GENERATION == GLOBAL && region->is_young())) { assert(heap->mode()->is_generational(), "Only if generational"); - if (ShenandoahGenerationalAdaptiveTenuring) { - assert(region->is_young(), "Only for young objects"); - uint age = ShenandoahHeap::get_object_age(obj); - ShenandoahAgeCensus* const census = ShenandoahGenerationalHeap::heap()->age_census(); - CENSUS_NOISE(census->add(age, region->age(), region->youth(), size, worker_id);) - NO_CENSUS_NOISE(census->add(age, region->age(), size, worker_id);) - } + assert(region->is_young(), "Only for young objects"); + const uint age = ShenandoahHeap::get_object_age(obj); + ShenandoahAgeCensus* const census = ShenandoahGenerationalHeap::heap()->age_census(); + CENSUS_NOISE(census->add(age, region->age(), region->youth(), size, worker_id);) + NO_CENSUS_NOISE(census->add(age, region->age(), size, worker_id);) } if (!region->is_humongous_start()) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.cpp b/src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.cpp index d55d1bd8147..dfc34dcc3c3 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.cpp @@ -48,25 +48,7 @@ ShenandoahMemoryPool::ShenandoahMemoryPool(ShenandoahHeap* heap, MemoryUsage ShenandoahMemoryPool::get_memory_usage() { - size_t initial = initial_size(); - size_t max = max_size(); - size_t used = used_in_bytes(); - size_t committed = _heap->committed(); - - // These asserts can never fail: max is stable, and all updates to other values never overflow max. - assert(initial <= max, "initial: %zu, max: %zu", initial, max); - assert(used <= max, "used: %zu, max: %zu", used, max); - assert(committed <= max, "committed: %zu, max: %zu", committed, max); - - // Committed and used are updated concurrently and independently. They can momentarily break - // the assert below, which would also fail in downstream code. To avoid that, adjust values - // to make sense under the race. See JDK-8207200. - committed = MAX2(used, committed); - assert(used <= committed, "used: %zu, committed: %zu", used, committed); - assert(initial <= _heap->max_capacity(), "sanity"); - assert(committed <= _heap->max_capacity(), "sanity"); - assert(max <= _heap->max_capacity(), "sanity"); - return MemoryUsage(initial, used, committed, max); + return shenandoah_memory_usage(initial_size(), used_in_bytes(), _heap->committed(), max_size()); } size_t ShenandoahMemoryPool::used_in_bytes() { @@ -83,16 +65,7 @@ ShenandoahGenerationalMemoryPool::ShenandoahGenerationalMemoryPool(ShenandoahHea _generation(generation) { } MemoryUsage ShenandoahGenerationalMemoryPool::get_memory_usage() { - size_t initial = initial_size(); - size_t max = max_size(); - size_t used = used_in_bytes(); - size_t committed = _generation->used_regions_size(); - - assert(initial <= _heap->max_capacity(), "sanity"); - assert(used <= _heap->max_capacity(), "sanity"); - assert(committed <= _heap->max_capacity(), "sanity"); - assert(max <= _heap->max_capacity(), "sanity"); - return MemoryUsage(initial, used, committed, max); + return shenandoah_memory_usage(initial_size(), used_in_bytes(), _generation->used_regions_size(), max_size()); } size_t ShenandoahGenerationalMemoryPool::used_in_bytes() { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.hpp b/src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.hpp index ccdfdddede9..d466087b9b7 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.hpp @@ -30,6 +30,17 @@ #include "services/memoryPool.hpp" #include "services/memoryUsage.hpp" +// Constructs a MemoryUsage from concurrently sampled values, clamping committed +// to be at least as large as used to account for concurrent updates. See JDK-8207200. +inline MemoryUsage shenandoah_memory_usage(size_t initial, size_t used, size_t committed, size_t max) { + assert(initial <= max, "initial: %zu, max: %zu", initial, max); + assert(used <= max, "used: %zu, max: %zu", used, max); + assert(committed <= max, "committed: %zu, max: %zu", committed, max); + committed = MAX2(used, committed); + assert(used <= committed, "used: %zu, committed: %zu", used, committed); + return MemoryUsage(initial, used, committed, max); +} + class ShenandoahMemoryPool : public CollectedMemoryPool { protected: ShenandoahHeap* _heap; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.cpp b/src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.cpp index 32c63e9b186..1ddd8e1c032 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.cpp @@ -198,11 +198,11 @@ void BinaryMagnitudeSeq::clear() { for (int c = 0; c < BitsPerSize_t; c++) { _mags[c] = 0; } - _sum = 0; + _sum.store_relaxed(0); } void BinaryMagnitudeSeq::add(size_t val) { - AtomicAccess::add(&_sum, val); + _sum.add_then_fetch(val); int mag = log2i_graceful(val) + 1; @@ -237,7 +237,7 @@ size_t BinaryMagnitudeSeq::num() const { } size_t BinaryMagnitudeSeq::sum() const { - return _sum; + return _sum.load_relaxed(); } int BinaryMagnitudeSeq::min_level() const { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.hpp b/src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.hpp index 68f3cfba97a..1a14f930174 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.hpp @@ -25,6 +25,7 @@ #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHNUMBERSEQ_HPP #define SHARE_GC_SHENANDOAH_SHENANDOAHNUMBERSEQ_HPP +#include "runtime/atomic.hpp" #include "utilities/numberSeq.hpp" // HDR sequence stores the low-resolution high-dynamic-range values. @@ -59,7 +60,7 @@ public: // is not needed, it is preferred over HdrSeq. class BinaryMagnitudeSeq : public CHeapObj { private: - size_t _sum; + Atomic _sum; size_t* _mags; public: diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp index 4fda65b4030..37de5966554 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp @@ -38,6 +38,7 @@ #include "gc/shenandoah/shenandoahOldGeneration.hpp" #include "gc/shenandoah/shenandoahReferenceProcessor.hpp" #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp" +#include "gc/shenandoah/shenandoahThreadLocalData.hpp" #include "gc/shenandoah/shenandoahUtils.hpp" #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" #include "gc/shenandoah/shenandoahYoungGeneration.hpp" @@ -109,13 +110,11 @@ ShenandoahOldGeneration::ShenandoahOldGeneration(uint max_queues) _promoted_expended(0), _promotion_potential(0), _pad_for_promote_in_place(0), - _promotion_failure_count(0), - _promotion_failure_words(0), _promotable_humongous_regions(0), _promotable_regular_regions(0), _is_parsable(true), _card_scan(nullptr), - _state(WAITING_FOR_BOOTSTRAP), + _state(IDLE), _growth_percent_before_collection(INITIAL_GROWTH_PERCENT_BEFORE_COLLECTION) { assert(type() == ShenandoahGenerationType::OLD, "OO sanity"); @@ -148,8 +147,50 @@ void ShenandoahOldGeneration::augment_promoted_reserve(size_t increment) { void ShenandoahOldGeneration::reset_promoted_expended() { shenandoah_assert_heaplocked_or_safepoint(); _promoted_expended.store_relaxed(0); - _promotion_failure_count.store_relaxed(0); - _promotion_failure_words.store_relaxed(0); +} + +void ShenandoahOldGeneration::maybe_log_promotion_failure_stats(bool concurrent) const { + LogTarget(Info, gc, plab) plab_info; + if (plab_info.is_enabled()) { + size_t failed_count = 0; + size_t failed_words = 0; + + class AggregatePromotionFailuresClosure : public ThreadClosure { + private: + size_t _total_count; + size_t _total_words; + public: + AggregatePromotionFailuresClosure() : _total_count(0), _total_words(0) {} + + void do_thread(Thread* thread) override { + ShenandoahPLAB* plab = ShenandoahThreadLocalData::shenandoah_plab(thread); + if (plab != nullptr) { + _total_count += plab->get_promotion_failure_count(); + _total_words += plab->get_promotion_failure_words(); + plab->reset_promotion_failures(); + } + } + + size_t total_count() const { return _total_count; } + size_t total_words() const { return _total_words; } + }; + + AggregatePromotionFailuresClosure cl; + if (concurrent) { + MutexLocker lock(Threads_lock); + Threads::threads_do(&cl); + } else { + Threads::threads_do(&cl); + } + + failed_count = cl.total_count(); + failed_words = cl.total_words(); + + LogStream ls(plab_info); + ls.print_cr("Cycle complete, promotions reserved: %zu, promotions expended: %zu, failed count: %zu, failed bytes: %zu", + get_promoted_reserve(), get_promoted_expended(), + failed_count, failed_words * HeapWordSize); + } } size_t ShenandoahOldGeneration::expend_promoted(size_t increment) { @@ -199,7 +240,8 @@ ShenandoahOldGeneration::configure_plab_for_current_thread(const ShenandoahAlloc // We've created a new plab. Now we configure it whether it will be used for promotions // and evacuations - or just evacuations. Thread* thread = Thread::current(); - ShenandoahThreadLocalData::reset_plab_promoted(thread); + ShenandoahPLAB* shenandoah_plab = ShenandoahThreadLocalData::shenandoah_plab(thread); + shenandoah_plab->reset_promoted(); // The actual size of the allocation may be larger than the requested bytes (due to alignment on card boundaries). // If this puts us over our promotion budget, we need to disable future PLAB promotions for this thread. @@ -209,12 +251,12 @@ ShenandoahOldGeneration::configure_plab_for_current_thread(const ShenandoahAlloc log_debug(gc, plab)("Thread can promote using PLAB of %zu bytes. Expended: %zu, available: %zu", actual_size, get_promoted_expended(), get_promoted_reserve()); expend_promoted(actual_size); - ShenandoahThreadLocalData::enable_plab_promotions(thread); - ShenandoahThreadLocalData::set_plab_actual_size(thread, actual_size); + shenandoah_plab->enable_promotions(); + shenandoah_plab->set_actual_size(actual_size); } else { // Disable promotions in this thread because entirety of this PLAB must be available to hold old-gen evacuations. - ShenandoahThreadLocalData::disable_plab_promotions(thread); - ShenandoahThreadLocalData::set_plab_actual_size(thread, 0); + shenandoah_plab->disable_promotions(); + shenandoah_plab->set_actual_size(0); log_debug(gc, plab)("Thread cannot promote using PLAB of %zu bytes. Expended: %zu, available: %zu, mixed evacuations? %s", actual_size, get_promoted_expended(), get_promoted_reserve(), BOOL_TO_STR(ShenandoahHeap::heap()->collection_set()->has_old_regions())); } @@ -276,6 +318,11 @@ void ShenandoahOldGeneration::heap_region_iterate(ShenandoahHeapRegionClosure* c ShenandoahHeap::heap()->heap_region_iterate(&old_regions_cl); } +void ShenandoahOldGeneration::heap_region_iterator(ShenandoahHeapRegionClosure* cl) { + ShenandoahIncludeRegionClosure old_regions_cl(cl); + ShenandoahHeap::heap()->heap_region_iterator(&old_regions_cl); +} + void ShenandoahOldGeneration::set_concurrent_mark_in_progress(bool in_progress) { ShenandoahHeap::heap()->set_concurrent_old_mark_in_progress(in_progress); } @@ -284,6 +331,12 @@ bool ShenandoahOldGeneration::is_concurrent_mark_in_progress() { return ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress(); } +void ShenandoahOldGeneration::record_tops_at_evac_start() { + for_each_region([](ShenandoahHeapRegion* region) { + region->record_top_at_evac_start(); + }); +} + void ShenandoahOldGeneration::cancel_marking() { if (is_concurrent_mark_in_progress()) { log_debug(gc)("Abandon SATB buffers"); @@ -297,7 +350,7 @@ void ShenandoahOldGeneration::cancel_gc() { shenandoah_assert_safepoint(); if (is_idle()) { #ifdef ASSERT - validate_waiting_for_bootstrap(); + validate_idle(); #endif } else { log_info(gc)("Terminating old gc cycle."); @@ -308,7 +361,7 @@ void ShenandoahOldGeneration::cancel_gc() { // Remove old generation access to young generation mark queues ShenandoahHeap::heap()->young_generation()->set_old_gen_task_queues(nullptr); // Transition to IDLE now. - transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP); + transition_to(ShenandoahOldGeneration::IDLE); } } @@ -422,6 +475,7 @@ void ShenandoahOldGeneration::prepare_regions_and_collection_set(bool concurrent // At the end of old-gen, we may find that we have reclaimed immediate garbage, allowing a longer allocation runway. // We may also find that we have accumulated canddiate regions for mixed evacuation. If so, we will want to expand // the OldCollector reserve in order to make room for these mixed evacuations. + assert(ShenandoahHeap::heap()->mode()->is_generational(), "sanity"); assert(young_trash_regions == 0, "sanity"); ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap(); @@ -434,9 +488,8 @@ void ShenandoahOldGeneration::prepare_regions_and_collection_set(bool concurrent const char* ShenandoahOldGeneration::state_name(State state) { switch (state) { - case WAITING_FOR_BOOTSTRAP: return "Waiting for Bootstrap"; + case IDLE: return "Idle"; case FILLING: return "Coalescing"; - case BOOTSTRAPPING: return "Bootstrapping"; case MARKING: return "Marking"; case EVACUATING: return "Evacuating"; case EVACUATING_AFTER_GLOBAL: return "Evacuating (G)"; @@ -474,7 +527,7 @@ void ShenandoahOldGeneration::transition_to(State new_state) { // the old generation in the respective states (EVACUATING or FILLING). After a Full GC, // the mark bitmaps are all reset, all regions are parsable and the mark context will // not be "complete". After a Full GC, remembered set scans will _not_ use the mark bitmap -// and we expect the old generation to be waiting for bootstrap. +// and we expect the old generation to be idle. // // +-----------------+ // +------------> | FILLING | <---+ @@ -483,19 +536,12 @@ void ShenandoahOldGeneration::transition_to(State new_state) { // | | | | // | | | Filling Complete | <-> A global collection may // | | v | move the old generation -// | | +-----------------+ | directly from waiting for -// +-- |-- |--------> | WAITING | | bootstrap to filling or -// | | | +---- | FOR BOOTSTRAP | ----+ evacuating. It may also -// | | | | +-----------------+ move from filling to waiting -// | | | | | for bootstrap. -// | | | | | Reset Bitmap -// | | | | v -// | | | | +-----------------+ +----------------------+ -// | | | | | BOOTSTRAP | <-> | YOUNG GC | -// | | | | | | | (RSet Parses Region) | -// | | | | +-----------------+ +----------------------+ +// | | +-----------------+ | directly from idle to +// +-- |-- |--------> | IDLE | | filling or evacuating. +// | | | +---- | | ----+ It may also move from +// | | | | +-----------------+ filling to idle. // | | | | | -// | | | | | Old Marking +// | | | | | Reset Bitmap + Start Marking // | | | | v // | | | | +-----------------+ +----------------------+ // | | | | | MARKING | <-> | YOUNG GC | @@ -521,29 +567,23 @@ void ShenandoahOldGeneration::validate_transition(State new_state) { ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap(); switch (new_state) { case FILLING: - assert(_state != BOOTSTRAPPING, "Cannot begin making old regions parsable after bootstrapping"); assert(is_mark_complete(), "Cannot begin filling without first completing marking, state is '%s'", state_name(_state)); assert(_old_heuristics->has_coalesce_and_fill_candidates(), "Cannot begin filling without something to fill."); break; - case WAITING_FOR_BOOTSTRAP: + case IDLE: // GC cancellation can send us back here from any state. - validate_waiting_for_bootstrap(); - break; - case BOOTSTRAPPING: - assert(_state == WAITING_FOR_BOOTSTRAP, "Cannot reset bitmap without making old regions parsable, state is '%s'", state_name(_state)); - assert(_old_heuristics->unprocessed_old_collection_candidates() == 0, "Cannot bootstrap with mixed collection candidates"); - assert(!heap->is_prepare_for_old_mark_in_progress(), "Cannot still be making old regions parsable."); + validate_idle(); break; case MARKING: - assert(_state == BOOTSTRAPPING, "Must have finished bootstrapping before marking, state is '%s'", state_name(_state)); - assert(heap->young_generation()->old_gen_task_queues() != nullptr, "Young generation needs old mark queues."); - assert(heap->is_concurrent_old_mark_in_progress(), "Should be marking old now."); + assert(_state == IDLE, "Must be idle before marking, state is '%s'", state_name(_state)); + assert(_old_heuristics->unprocessed_old_collection_candidates() == 0, "Cannot start marking with mixed collection candidates"); + assert(!heap->is_prepare_for_old_mark_in_progress(), "Cannot still be making old regions parsable."); break; case EVACUATING_AFTER_GLOBAL: assert(_state == EVACUATING, "Must have been evacuating, state is '%s'", state_name(_state)); break; case EVACUATING: - assert(_state == WAITING_FOR_BOOTSTRAP || _state == MARKING, "Cannot have old collection candidates without first marking, state is '%s'", state_name(_state)); + assert(_state == IDLE || _state == MARKING, "Cannot have old collection candidates without first marking, state is '%s'", state_name(_state)); assert(_old_heuristics->unprocessed_old_collection_candidates() > 0, "Must have collection candidates here."); break; default: @@ -551,10 +591,10 @@ void ShenandoahOldGeneration::validate_transition(State new_state) { } } -bool ShenandoahOldGeneration::validate_waiting_for_bootstrap() { +bool ShenandoahOldGeneration::validate_idle() { ShenandoahHeap* heap = ShenandoahHeap::heap(); - assert(!heap->is_concurrent_old_mark_in_progress(), "Cannot become ready for bootstrap during old mark."); - assert(heap->young_generation()->old_gen_task_queues() == nullptr, "Cannot become ready for bootstrap when still setup for bootstrapping."); + assert(!heap->is_concurrent_old_mark_in_progress(), "Cannot be idle during old mark."); + assert(heap->young_generation()->old_gen_task_queues() == nullptr, "Cannot be idle when still setup for bootstrapping."); assert(!is_concurrent_mark_in_progress(), "Cannot be marking in IDLE"); assert(!heap->young_generation()->is_bootstrap_cycle(), "Cannot have old mark queues if IDLE"); assert(!_old_heuristics->has_coalesce_and_fill_candidates(), "Cannot have coalesce and fill candidates in IDLE"); @@ -581,13 +621,21 @@ void ShenandoahOldGeneration::handle_failed_evacuation() { } } -void ShenandoahOldGeneration::handle_failed_promotion(Thread* thread, size_t size) { - _promotion_failure_count.add_then_fetch(1UL); - _promotion_failure_words.add_then_fetch(size); +void ShenandoahOldGeneration::handle_failed_promotion(Thread* thread, size_t size) const { + LogTarget(Info, gc, plab) plab_info; + if (plab_info.is_enabled()) { + ShenandoahPLAB* plab = ShenandoahThreadLocalData::shenandoah_plab(thread); + if (plab != nullptr) { + plab->record_promotion_failure(size); + } else { + ResourceMark for_thread_name; + log_debug(gc, plab)("Thread: %s has no plab", thread->name()); + } + } - LogTarget(Debug, gc, plab) lt; - LogStream ls(lt); - if (lt.is_enabled()) { + LogTarget(Debug, gc, plab) plab_debug; + if (plab_debug.is_enabled()) { + LogStream ls(plab_debug); log_failed_promotion(ls, thread, size); } } @@ -602,9 +650,10 @@ void ShenandoahOldGeneration::log_failed_promotion(LogStream& ls, Thread* thread const size_t gc_id = heap->control_thread()->get_gc_id(); if ((gc_id != last_report_epoch) || (epoch_report_count++ < MaxReportsPerEpoch)) { // Promotion failures should be very rare. Invest in providing useful diagnostic info. - PLAB* const plab = ShenandoahThreadLocalData::plab(thread); + ShenandoahPLAB* const shenandoah_plab = ShenandoahThreadLocalData::shenandoah_plab(thread); + PLAB* const plab = (shenandoah_plab == nullptr)? nullptr: shenandoah_plab->plab(); const size_t words_remaining = (plab == nullptr)? 0: plab->words_remaining(); - const char* promote_enabled = ShenandoahThreadLocalData::allow_plab_promotions(thread)? "enabled": "disabled"; + const char* promote_enabled = (shenandoah_plab != nullptr && shenandoah_plab->allows_promotion())? "enabled": "disabled"; // Promoted reserve is only changed by vm or control thread. Promoted expended is always accessed atomically. const size_t promotion_reserve = get_promoted_reserve(); @@ -624,15 +673,19 @@ void ShenandoahOldGeneration::log_failed_promotion(LogStream& ls, Thread* thread } } -void ShenandoahOldGeneration::handle_evacuation(HeapWord* obj, size_t words) const { - // Only register the copy of the object that won the evacuation race. - _card_scan->register_object_without_lock(obj); - - // Mark the entire range of the evacuated object as dirty. At next remembered set scan, - // we will clear dirty bits that do not hold interesting pointers. It's more efficient to - // do this in batch, in a background GC thread than to try to carefully dirty only cards - // that hold interesting pointers right now. - _card_scan->mark_range_as_dirty(obj, words); +void ShenandoahOldGeneration::update_card_table() { + for_each_region([this](ShenandoahHeapRegion* region) { + if (region->is_regular()) { + // Humongous regions are promoted in place, remembered set maintenance is handled there + // Regular regions that are promoted in place have their rset maintenance handled for + // the objects in the region when it was promoted. We record TEAS for such a region + // when the in-place-promotion is completed. Such a region may be used for additional + // promotions in the same cycle it was itself promoted. + if (region->top() > region->get_top_at_evac_start()) { + _card_scan->update_card_table(region->get_top_at_evac_start(), region->top()); + } + } + }); } bool ShenandoahOldGeneration::has_unprocessed_collection_candidates() { @@ -647,7 +700,7 @@ void ShenandoahOldGeneration::abandon_collection_candidates() { _old_heuristics->abandon_collection_candidates(); } -void ShenandoahOldGeneration::prepare_for_mixed_collections_after_global_gc() { +void ShenandoahOldGeneration::transition_old_generation_after_global_gc() { assert(is_mark_complete(), "Expected old generation mark to be complete after global cycle."); _old_heuristics->prepare_for_old_collections(); log_info(gc, ergo)("After choosing global collection set, mixed candidates: " UINT32_FORMAT ", coalescing candidates: %zu", @@ -681,7 +734,7 @@ void ShenandoahOldGeneration::set_parsable(bool parsable) { // that we would unload classes and make everything parsable. But, we know // that now so we can override this state. abandon_collection_candidates(); - transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP); + transition_to(ShenandoahOldGeneration::IDLE); break; default: // We can get here during a full GC. The full GC will cancel anything @@ -698,7 +751,7 @@ void ShenandoahOldGeneration::complete_mixed_evacuations() { assert(is_doing_mixed_evacuations(), "Mixed evacuations should be in progress"); if (!_old_heuristics->has_coalesce_and_fill_candidates()) { // No candidate regions to coalesce and fill - transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP); + transition_to(ShenandoahOldGeneration::IDLE); return; } @@ -712,7 +765,7 @@ void ShenandoahOldGeneration::complete_mixed_evacuations() { // more to do. assert(state() == ShenandoahOldGeneration::EVACUATING_AFTER_GLOBAL, "Should be evacuating after a global cycle"); abandon_collection_candidates(); - transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP); + transition_to(ShenandoahOldGeneration::IDLE); } void ShenandoahOldGeneration::abandon_mixed_evacuations() { @@ -722,7 +775,7 @@ void ShenandoahOldGeneration::abandon_mixed_evacuations() { break; case ShenandoahOldGeneration::EVACUATING_AFTER_GLOBAL: abandon_collection_candidates(); - transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP); + transition_to(ShenandoahOldGeneration::IDLE); break; default: log_warning(gc)("Abandon mixed evacuations in unexpected state: %s", state_name(state())); @@ -765,6 +818,7 @@ size_t ShenandoahOldGeneration::used_regions_size() const { return used_regions * ShenandoahHeapRegion::region_size_bytes(); } +// For the old generation, max_capacity() equals soft_max_capacity() size_t ShenandoahOldGeneration::max_capacity() const { size_t total_regions = _free_set->total_old_regions(); return total_regions * ShenandoahHeapRegion::region_size_bytes(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp index 5ebad461f3c..942f93c5c68 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp @@ -76,11 +76,6 @@ private: // objects). This field records the total amount of padding used for such regions. size_t _pad_for_promote_in_place; - // Keep track of the number and size of promotions that failed. Perhaps we should use this to increase - // the size of the old generation for the next collection cycle. - Atomic _promotion_failure_count; - Atomic _promotion_failure_words; - // During construction of the collection set, we keep track of regions that are eligible // for promotion in place. These fields track the count of those humongous and regular regions. // This data is used to force the evacuation phase even when the collection set is otherwise @@ -125,9 +120,8 @@ public: // This is used on the allocation path to gate promotions that would exceed the reserve size_t get_promoted_expended() const; - // Return the count and size (in words) of failed promotions since the last reset - size_t get_promotion_failed_count() const { return _promotion_failure_count.load_relaxed(); } - size_t get_promotion_failed_words() const { return _promotion_failure_words.load_relaxed(); } + // Aggregate and log promotion failure stats if logging is enabled + void maybe_log_promotion_failure_stats(bool concurrent) const; // Test if there is enough memory reserved for this promotion bool can_promote(size_t requested_bytes) const { @@ -175,11 +169,11 @@ public: void handle_failed_evacuation(); // Increment promotion failure counters, optionally log a more detailed message - void handle_failed_promotion(Thread* thread, size_t size); + void handle_failed_promotion(Thread* thread, size_t size) const; void log_failed_promotion(LogStream& ls, Thread* thread, size_t size) const; - // A successful evacuation re-dirties the cards and registers the object with the remembered set - void handle_evacuation(HeapWord* obj, size_t words) const; + // Iterate over recently promoted objects to update card table and object registrations + void update_card_table(); // Clear the flag after it is consumed by the control thread bool clear_failed_evacuation() { @@ -205,11 +199,36 @@ public: // Mark card for this location as dirty void mark_card_as_dirty(void* location); + template + class ShenandoahHeapRegionLambda : public ShenandoahHeapRegionClosure { + T _region_lambda; + public: + explicit ShenandoahHeapRegionLambda(T region_lambda) : _region_lambda(region_lambda) {} + + void heap_region_do(ShenandoahHeapRegion* r) override { + _region_lambda(r); + } + + bool is_thread_safe() override { + return true; + } + + size_t parallel_region_stride() override { + // Temporarily override to force parallelism when updating card table + return 8; + } + }; + + template + void for_each_region(LambdaT lambda) { + ShenandoahHeapRegionLambda l(lambda); + heap_region_iterator(&l); + } + void parallel_heap_region_iterate(ShenandoahHeapRegionClosure* cl) override; - void parallel_heap_region_iterate_free(ShenandoahHeapRegionClosure* cl) override; - void heap_region_iterate(ShenandoahHeapRegionClosure* cl) override; + void heap_region_iterator(ShenandoahHeapRegionClosure* cl); bool contains(ShenandoahAffiliation affiliation) const override; bool contains(ShenandoahHeapRegion* region) const override; @@ -218,8 +237,17 @@ public: void set_concurrent_mark_in_progress(bool in_progress) override; bool is_concurrent_mark_in_progress() override; + // For old regions, objects between top at evac start and top represent promoted objects. + // These objects will need to have their cards dirtied and their offsets within the cards registered. + void record_tops_at_evac_start(); + bool entry_coalesce_and_fill(); - void prepare_for_mixed_collections_after_global_gc(); + + // Global collections touch old regions, so the old generation needs to be informed of this. + // The old generation may decide to schedule additional mixed collections, or may decide to + // immediately coalesce-and-fill old objects in regions that were not collected. + void transition_old_generation_after_global_gc(); + void prepare_gc() override; void prepare_regions_and_collection_set(bool concurrent) override; void record_success_concurrent(bool abbreviated) override; @@ -262,11 +290,7 @@ public: } bool is_idle() const { - return state() == WAITING_FOR_BOOTSTRAP; - } - - bool is_bootstrapping() const { - return state() == BOOTSTRAPPING; + return state() == IDLE; } // Amount of live memory (bytes) in regions waiting for mixed collections @@ -277,11 +301,11 @@ public: public: enum State { - FILLING, WAITING_FOR_BOOTSTRAP, BOOTSTRAPPING, MARKING, EVACUATING, EVACUATING_AFTER_GLOBAL + FILLING, IDLE, MARKING, EVACUATING, EVACUATING_AFTER_GLOBAL }; #ifdef ASSERT - bool validate_waiting_for_bootstrap(); + bool validate_idle(); #endif private: @@ -324,7 +348,7 @@ public: size_t usage_trigger_threshold() const; bool can_start_gc() { - return _state == WAITING_FOR_BOOTSTRAP; + return _state == IDLE; } static const char* state_name(State state); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahPLAB.cpp b/src/hotspot/share/gc/shenandoah/shenandoahPLAB.cpp new file mode 100644 index 00000000000..5049113b665 --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahPLAB.cpp @@ -0,0 +1,213 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "gc/shared/cardTable.hpp" +#include "gc/shenandoah/shenandoahAllocRequest.hpp" +#include "gc/shenandoah/shenandoahGenerationalHeap.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahOldGeneration.hpp" +#include "gc/shenandoah/shenandoahPLAB.hpp" +#include "logging/log.hpp" +#include "runtime/globals.hpp" +#include "runtime/javaThread.hpp" +#include "utilities/copy.hpp" + +ShenandoahPLAB::ShenandoahPLAB() : + _plab(nullptr), + _desired_size(0), + _actual_size(0), + _promoted(0), + _promotion_failure_count(0), + _promotion_failure_words(0), + _allows_promotion(false), + _retries_enabled(false), + _heap(ShenandoahGenerationalHeap::heap()) { + _plab = new PLAB(align_up(PLAB::min_size(), CardTable::card_size_in_words())); +} + +ShenandoahPLAB::~ShenandoahPLAB() { + if (_plab != nullptr) { + delete _plab; + } +} + +void ShenandoahPLAB::subtract_from_promoted(size_t increment) { + assert(_promoted >= increment, "Cannot subtract more than remaining promoted"); + _promoted -= increment; +} + +HeapWord* ShenandoahPLAB::allocate(size_t size, bool is_promotion) { + assert(UseTLAB, "TLABs should be enabled"); + + if (_plab == nullptr) { + // No PLABs in this thread, fallback to shared allocation + return nullptr; + } + + if (is_promotion && !_allows_promotion) { + // Thread is not allowed to promote + return nullptr; + } + + HeapWord* obj = _plab->allocate(size); + if (obj == nullptr) { + if (_plab->words_remaining() < _heap->plab_min_size()) { + // allocate_slow will establish _allows_promotion for future invocations + obj = allocate_slow(size, is_promotion); + } + } + + // if plab->words_remaining() >= ShenGenHeap::heap()->plab_min_size(), just return nullptr so we can use a shared allocation + if (obj == nullptr) { + return nullptr; + } + + if (is_promotion) { + add_to_promoted(size * HeapWordSize); + } + return obj; +} + +// Establish a new PLAB and allocate size HeapWords within it. +HeapWord* ShenandoahPLAB::allocate_slow(size_t size, bool is_promotion) { + assert(_heap->mode()->is_generational(), "PLABs only relevant to generational GC"); + + // PLABs are aligned to card boundaries to avoid synchronization with concurrent + // allocations in other PLABs. + const size_t plab_min_size = _heap->plab_min_size(); + const size_t min_size = (size > plab_min_size)? align_up(size, CardTable::card_size_in_words()): plab_min_size; + + // Figure out size of new PLAB, using value determined at last refill. + size_t cur_size = _desired_size; + if (cur_size == 0) { + cur_size = plab_min_size; + } + + // Expand aggressively, doubling at each refill in this epoch, ceiling at plab_max_size() + // Doubling, starting at a card-multiple, should give us a card-multiple. (Ceiling and floor + // are card multiples.) + const size_t future_size = MIN2(cur_size * 2, _heap->plab_max_size()); + assert(is_aligned(future_size, CardTable::card_size_in_words()), "Card multiple by construction, future_size: %zu" + ", card_size: %u, cur_size: %zu, max: %zu", + future_size, CardTable::card_size_in_words(), cur_size, _heap->plab_max_size()); + + // Record new heuristic value even if we take any shortcut. This captures + // the case when moderately-sized objects always take a shortcut. At some point, + // heuristics should catch up with them. Note that the requested cur_size may + // not be honored, but we remember that this is the preferred size. + log_debug(gc, plab)("Set next PLAB refill size: %zu bytes", future_size * HeapWordSize); + set_desired_size(future_size); + + if (cur_size < size) { + // The PLAB to be allocated is still not large enough to hold the object. Fall back to shared allocation. + // This avoids retiring perfectly good PLABs in order to represent a single large object allocation. + log_debug(gc, plab)("Current PLAB size (%zu) is too small for %zu", cur_size * HeapWordSize, size * HeapWordSize); + return nullptr; + } + + if (_plab->words_remaining() < plab_min_size) { + // Retire current PLAB. This takes care of any PLAB book-keeping. + // retire_plab() registers the remnant filler object with the remembered set scanner without a lock. + // Since PLABs are card-aligned, concurrent registrations in other PLABs don't interfere. + retire(); + + size_t actual_size = 0; + HeapWord* plab_buf = allocate_new_plab(min_size, cur_size, &actual_size); + if (plab_buf == nullptr) { + if (min_size == plab_min_size) { + // Disable PLAB promotions for this thread because we cannot even allocate a minimal PLAB. This allows us + // to fail faster on subsequent promotion attempts. + disable_promotions(); + } + return nullptr; + } + + enable_retries(); + + // Since the allocated PLAB may have been down-sized for alignment, plab->allocate(size) below may still fail. + if (ZeroTLAB) { + // Skip mangling the space corresponding to the object header to + // ensure that the returned space is not considered parsable by + // any concurrent GC thread. + Copy::zero_to_words(plab_buf, actual_size); + } else { +#ifdef ASSERT + size_t hdr_size = oopDesc::header_size(); + Copy::fill_to_words(plab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal); +#endif + } + assert(is_aligned(actual_size, CardTable::card_size_in_words()), "Align by design"); + _plab->set_buf(plab_buf, actual_size); + if (is_promotion && !_allows_promotion) { + return nullptr; + } + return _plab->allocate(size); + } + + // If there's still at least min_size() words available within the current plab, don't retire it. Let's nibble + // away on this plab as long as we can. Meanwhile, return nullptr to force this particular allocation request + // to be satisfied with a shared allocation. By packing more promotions into the previously allocated PLAB, we + // reduce the likelihood of evacuation failures, and we reduce the need for downsizing our PLABs. + return nullptr; +} + +HeapWord* ShenandoahPLAB::allocate_new_plab(size_t min_size, size_t word_size, size_t* actual_size) { + assert(is_aligned(min_size, CardTable::card_size_in_words()), "Align by design"); + assert(word_size >= min_size, "Requested PLAB is too small"); + + ShenandoahAllocRequest req = ShenandoahAllocRequest::for_plab(min_size, word_size); + HeapWord* res = _heap->allocate_memory(req); + if (res != nullptr) { + *actual_size = req.actual_size(); + } else { + *actual_size = 0; + } + assert(is_aligned(res, CardTable::card_size_in_words()), "Align by design"); + return res; +} + +void ShenandoahPLAB::retire() { + // We don't enforce limits on plab evacuations. We let it consume all available old-gen memory in order to reduce + // probability of an evacuation failure. We do enforce limits on promotion, to make sure that excessive promotion + // does not result in an old-gen evacuation failure. Note that a failed promotion is relatively harmless. Any + // object that fails to promote in the current cycle will be eligible for promotion in a subsequent cycle. + + // When the plab was instantiated, its entirety was treated as if the entire buffer was going to be dedicated to + // promotions. Now that we are retiring the buffer, we adjust for the reality that the plab is not entirely promotions. + // 1. Some of the plab may have been dedicated to evacuations. + // 2. Some of the plab may have been abandoned due to waste (at the end of the plab). + size_t not_promoted = _actual_size - _promoted; + reset_promoted(); + set_actual_size(0); + if (not_promoted > 0) { + log_debug(gc, plab)("Retire PLAB, unexpend unpromoted: %zu", not_promoted * HeapWordSize); + _heap->old_generation()->unexpend_promoted(not_promoted); + } + const size_t original_waste = _plab->waste(); + HeapWord* const top = _plab->top(); + + // plab->retire() overwrites unused memory between plab->top() and plab->hard_end() with a dummy object to make memory parsable. + // It adds the size of this unused memory, in words, to plab->waste(). + _plab->retire(); +} diff --git a/src/hotspot/share/gc/shenandoah/shenandoahPLAB.hpp b/src/hotspot/share/gc/shenandoah/shenandoahPLAB.hpp new file mode 100644 index 00000000000..4278eb65b10 --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahPLAB.hpp @@ -0,0 +1,130 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHPLAB_HPP +#define SHARE_GC_SHENANDOAH_SHENANDOAHPLAB_HPP + +#include "gc/shared/plab.hpp" +#include "memory/allocation.hpp" + +class ShenandoahGenerationalHeap; + +class ShenandoahPLAB : public CHeapObj { +private: + // The actual allocation buffer + PLAB* _plab; + + // Heuristics will grow the desired size of plabs. + size_t _desired_size; + + // Once the plab has been allocated, and we know the actual size, we record it here. + size_t _actual_size; + + // As the plab is used for promotions, this value is incremented. When the plab is + // retired, the difference between 'actual_size' and 'promoted' will be returned to + // the old generation's promotion reserve (i.e., it will be 'unexpended'). + size_t _promoted; + + // Track failed promotion attempts per thread + size_t _promotion_failure_count; + size_t _promotion_failure_words; + + // If false, no more promotion by this thread during this evacuation phase. + bool _allows_promotion; + + // If true, evacuations may attempt to allocate a smaller plab if the original size fails. + bool _retries_enabled; + + // Use for allocations, min/max plab sizes + ShenandoahGenerationalHeap* _heap; + + // Enable retry logic for PLAB allocation failures + void enable_retries() { _retries_enabled = true; } + + // Establish a new PLAB and allocate from it + HeapWord* allocate_slow(size_t size, bool is_promotion); + // Allocate a new PLAB buffer from the heap + HeapWord* allocate_new_plab(size_t min_size, size_t word_size, size_t* actual_size); + +public: + ShenandoahPLAB(); + ~ShenandoahPLAB(); + + // Access the underlying PLAB buffer + PLAB* plab() const { return _plab; } + + // Heuristic size for next PLAB allocation + size_t desired_size() const { return _desired_size; } + // Update heuristic size for next PLAB allocation + void set_desired_size(size_t v) { _desired_size = v; } + + // Check if retry logic is enabled + bool retries_enabled() const { return _retries_enabled; } + // Disable retry logic for PLAB allocation failures + void disable_retries() { _retries_enabled = false; } + + // Allow this thread to promote objects + void enable_promotions() { _allows_promotion = true; } + // Prevent this thread from promoting objects + void disable_promotions() { _allows_promotion = false; } + // Check if this thread is allowed to promote objects + bool allows_promotion() const { return _allows_promotion; } + + // Reset promotion tracking for new evacuation phase + void reset_promoted() { _promoted = 0; } + // When a plab is retired, subtract from the expended promotion budget + void subtract_from_promoted(size_t increment); + // Bytes promoted through this PLAB + size_t get_promoted() const { return _promoted; } + // Track promoted bytes in this PLAB + void add_to_promoted(size_t increment) { _promoted += increment; } + + // Track failed promotion attempts + void record_promotion_failure(size_t size) { + _promotion_failure_count++; + _promotion_failure_words += size; + } + // Get failed promotion count for aggregation + size_t get_promotion_failure_count() const { return _promotion_failure_count; } + // Get failed promotion words for aggregation + size_t get_promotion_failure_words() const { return _promotion_failure_words; } + // Reset failure tracking for new evacuation phase + void reset_promotion_failures() { + _promotion_failure_count = 0; + _promotion_failure_words = 0; + } + + // Record actual allocated PLAB size + void set_actual_size(size_t value) { _actual_size = value; } + // Actual allocated PLAB size + size_t get_actual_size() const { return _actual_size; } + + // Allocate from this PLAB + HeapWord* allocate(size_t size, bool is_promotion); + + // Retire this PLAB and return unused promotion budget + void retire(); +}; + +#endif // SHARE_GC_SHENANDOAH_SHENANDOAHPLAB_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp b/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp index e890008b916..6a316e2265a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp @@ -109,10 +109,11 @@ class outputStream; f(conc_strong_roots, "Concurrent Strong Roots") \ SHENANDOAH_PAR_PHASE_DO(conc_strong_roots_, " CSR: ", f) \ f(conc_evac, "Concurrent Evacuation") \ + f(conc_update_card_table, "Concurrent Update Cards") \ f(conc_final_roots, "Concurrent Final Roots") \ f(promote_in_place, " Promote Regions") \ - f(final_roots_gross, "Pause Verify Final Roots (G)") \ - f(final_roots, "Pause Verify Final Roots (N)") \ + f(final_verify_gross, "Pause Final Verify (G)") \ + f(final_verify, "Pause Final Verify (N)") \ \ f(init_update_refs_gross, "Pause Init Update Refs (G)") \ f(init_update_refs, "Pause Init Update Refs (N)") \ @@ -254,7 +255,7 @@ public: void flush_cycle_to_global(); static const char* phase_name(Phase phase) { - assert(phase >= 0 && phase < _num_phases, "Out of bound"); + assert(phase >= 0 && phase < _num_phases, "Out of bounds: %d", phase); return _phase_names[phase]; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRegulatorThread.cpp b/src/hotspot/share/gc/shenandoah/shenandoahRegulatorThread.cpp index ec4b7c7217c..28094a1d57d 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahRegulatorThread.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahRegulatorThread.cpp @@ -36,13 +36,14 @@ ShenandoahRegulatorThread::ShenandoahRegulatorThread(ShenandoahGenerationalContr _heap(ShenandoahHeap::heap()), _control_thread(control_thread), _sleep(ShenandoahControlIntervalMin), - _last_sleep_adjust_time(os::elapsedTime()) { + _most_recent_wake_time(os::elapsedTime()), + _last_sleep_adjust_time(_most_recent_wake_time) { shenandoah_assert_generational(); _old_heuristics = _heap->old_generation()->heuristics(); _young_heuristics = _heap->young_generation()->heuristics(); _global_heuristics = _heap->global_generation()->heuristics(); - set_name("Shenandoah Regulator Thread"); + set_name("ShenRegulator"); create_and_start(); } @@ -115,19 +116,22 @@ void ShenandoahRegulatorThread::regulator_sleep() { // Wait before performing the next action. If allocation happened during this wait, // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle, // back off exponentially. - double current = os::elapsedTime(); - + double before_sleep_time = _most_recent_wake_time; if (ShenandoahHeap::heap()->has_changed()) { _sleep = ShenandoahControlIntervalMin; - } else if ((current - _last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){ + } else if ((before_sleep_time - _last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){ _sleep = MIN2(ShenandoahControlIntervalMax, MAX2(1u, _sleep * 2)); - _last_sleep_adjust_time = current; + _last_sleep_adjust_time = before_sleep_time; } SuspendibleThreadSetLeaver leaver; os::naked_short_sleep(_sleep); + double wake_time = os::elapsedTime(); + _most_recent_period = wake_time - _most_recent_wake_time; + _most_recent_wake_time = wake_time; + _young_heuristics->update_should_start_query_times(_most_recent_wake_time, double(_sleep) / 1000.0); if (LogTarget(Debug, gc, thread)::is_enabled()) { - double elapsed = os::elapsedTime() - current; + double elapsed = _most_recent_wake_time - before_sleep_time; double hiccup = elapsed - double(_sleep); if (hiccup > 0.001) { log_debug(gc, thread)("Regulator hiccup time: %.3fs", hiccup); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRegulatorThread.hpp b/src/hotspot/share/gc/shenandoah/shenandoahRegulatorThread.hpp index 2519025b6fb..cc41bc2c65b 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahRegulatorThread.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahRegulatorThread.hpp @@ -79,7 +79,10 @@ class ShenandoahRegulatorThread: public ConcurrentGCThread { ShenandoahOldHeuristics* _old_heuristics; ShenandoahHeuristics* _global_heuristics; + // duration of planned regulator sleep period, in ms uint _sleep; + double _most_recent_wake_time; + double _most_recent_period; double _last_sleep_adjust_time; }; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp index 9e6b1960708..80825ac43ad 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp @@ -45,7 +45,7 @@ ShenandoahJavaThreadsIterator::ShenandoahJavaThreadsIterator(ShenandoahPhaseTimi } uint ShenandoahJavaThreadsIterator::claim() { - return AtomicAccess::fetch_then_add(&_claimed, _stride, memory_order_relaxed); + return _claimed.fetch_then_add(_stride, memory_order_relaxed); } void ShenandoahJavaThreadsIterator::threads_do(ThreadClosure* cl, uint worker_id) { @@ -200,9 +200,6 @@ ShenandoahRootAdjuster::ShenandoahRootAdjuster(uint n_workers, ShenandoahPhaseTi void ShenandoahRootAdjuster::roots_do(uint worker_id, OopClosure* oops) { NMethodToOopClosure code_blob_cl(oops, NMethodToOopClosure::FixRelocations); ShenandoahNMethodAndDisarmClosure nmethods_and_disarm_Cl(oops); - NMethodToOopClosure* adjust_code_closure = ShenandoahCodeRoots::use_nmethod_barriers_for_mark() ? - static_cast(&nmethods_and_disarm_Cl) : - static_cast(&code_blob_cl); CLDToOopClosure adjust_cld_closure(oops, ClassLoaderData::_claim_strong); // Process light-weight/limited parallel roots then @@ -211,7 +208,7 @@ void ShenandoahRootAdjuster::roots_do(uint worker_id, OopClosure* oops) { _cld_roots.cld_do(&adjust_cld_closure, worker_id); // Process heavy-weight/fully parallel roots the last - _code_roots.nmethods_do(adjust_code_closure, worker_id); + _code_roots.nmethods_do(&nmethods_and_disarm_Cl, worker_id); _thread_roots.oops_do(oops, nullptr, worker_id); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp index 29d8c9fac2d..55367e706a2 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp @@ -33,6 +33,7 @@ #include "gc/shenandoah/shenandoahSharedVariables.hpp" #include "gc/shenandoah/shenandoahUtils.hpp" #include "memory/iterator.hpp" +#include "runtime/atomic.hpp" #include "runtime/threads.hpp" template @@ -73,7 +74,7 @@ private: ThreadsListHandle _threads; uint const _length; uint const _stride; - volatile uint _claimed; + Atomic _claimed; ShenandoahPhaseTimings::Phase _phase; uint claim(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.inline.hpp index 6aebec28163..4504ac96819 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.inline.hpp @@ -172,10 +172,6 @@ template void ShenandoahRootUpdater::roots_do(uint worker_id, IsAlive* is_alive, KeepAlive* keep_alive) { NMethodToOopClosure update_nmethods(keep_alive, NMethodToOopClosure::FixRelocations); ShenandoahNMethodAndDisarmClosure nmethods_and_disarm_Cl(keep_alive); - NMethodToOopClosure* codes_cl = ShenandoahCodeRoots::use_nmethod_barriers_for_mark() ? - static_cast(&nmethods_and_disarm_Cl) : - static_cast(&update_nmethods); - CLDToOopClosure clds(keep_alive, ClassLoaderData::_claim_strong); // Process light-weight/limited parallel roots then @@ -184,7 +180,7 @@ void ShenandoahRootUpdater::roots_do(uint worker_id, IsAlive* is_alive, KeepAliv _cld_roots.cld_do(&clds, worker_id); // Process heavy-weight/fully parallel roots the last - _code_roots.nmethods_do(codes_cl, worker_id); + _code_roots.nmethods_do(&nmethods_and_disarm_Cl, worker_id); _thread_roots.oops_do(keep_alive, nullptr, worker_id); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahSTWMark.cpp b/src/hotspot/share/gc/shenandoah/shenandoahSTWMark.cpp index 117984a6d41..5b4ce6d0bc9 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahSTWMark.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahSTWMark.cpp @@ -74,7 +74,7 @@ void ShenandoahSTWMark::mark() { // Arm all nmethods. Even though this is STW mark, some marking code // piggybacks on nmethod barriers for special instances. - ShenandoahCodeRoots::arm_nmethods_for_mark(); + ShenandoahCodeRoots::arm_nmethods(); // Weak reference processing ShenandoahReferenceProcessor* rp = _generation->ref_processor(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp index 05af25f13ad..8d7ba2dc46f 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp @@ -31,6 +31,33 @@ #include "logging/log.hpp" #include "runtime/threads.hpp" +// A closure that takes an oop in the old generation and, if it's pointing +// into the young generation, dirties the corresponding remembered set entry. +class ShenandoahDirtyRememberedSetClosure : public BasicOopIterateClosure { +protected: + ShenandoahGenerationalHeap* const _heap; + ShenandoahScanRemembered* const _scanner; + +public: + ShenandoahDirtyRememberedSetClosure() : + _heap(ShenandoahGenerationalHeap::heap()), + _scanner(_heap->old_generation()->card_scan()) {} + + template + void work(T* p) { + assert(_heap->is_in_old(p), "Expecting to get an old gen address"); + if (T o = RawAccess<>::oop_load(p); !CompressedOops::is_null(o)) { + if (const oop obj = CompressedOops::decode_not_null(o); _heap->is_in_young(obj)) { + // Dirty the card containing the cross-generational pointer. + _scanner->mark_card_as_dirty((HeapWord*) p); + } + } + } + + void do_oop(narrowOop* p) override { work(p); } + void do_oop(oop* p) override { work(p); } +}; + size_t ShenandoahDirectCardMarkRememberedSet::last_valid_index() const { return _card_table->last_valid_index(); } @@ -161,7 +188,6 @@ void ShenandoahCardCluster::register_object_without_lock(HeapWord* address) { uint8_t offset_in_card = checked_cast(pointer_delta(address, card_start_address)); if (!starts_object(card_at_start)) { - set_starts_object_bit(card_at_start); set_first_start(card_at_start, offset_in_card); set_last_start(card_at_start, offset_in_card); } else { @@ -172,6 +198,49 @@ void ShenandoahCardCluster::register_object_without_lock(HeapWord* address) { } } +void ShenandoahCardCluster::update_card_table(HeapWord* start, HeapWord* end) { + HeapWord* address = start; + HeapWord* previous_address = nullptr; + uint8_t previous_offset = 0; + size_t previous_card_index = -1; + ShenandoahDirtyRememberedSetClosure make_cards_dirty; + + log_debug(gc, remset)("Update remembered set from " PTR_FORMAT ", to " PTR_FORMAT, p2i(start), p2i(end)); + _rs->mark_range_as_dirty(start, pointer_delta(end, start)); + + while (address < end) { + + // Compute card and offset in card for this object + const size_t object_card_index = _rs->card_index_for_addr(address); + const HeapWord* card_start_address = _rs->addr_for_card_index(object_card_index); + const uint8_t offset_in_card = checked_cast(pointer_delta(address, card_start_address)); + + if (object_card_index != previous_card_index) { + if (previous_address != nullptr) { + // Register the previous object on the previous card, we are starting a new card here + set_last_start(previous_card_index, previous_offset); + } + + previous_card_index = object_card_index; + if (!starts_object(object_card_index)) { + // The previous cycle may have recorded an earlier start in this card. Do not overwrite it. + set_first_start(object_card_index, offset_in_card); + } + } + + previous_offset = offset_in_card; + previous_address = address; + + const oop obj = cast_to_oop(address); + address += obj->size(); + } + + // Register the last object seen in this range. + if (previous_address != nullptr) { + set_last_start(previous_card_index, previous_offset); + } +} + void ShenandoahCardCluster::coalesce_objects(HeapWord* address, size_t length_in_words) { size_t card_at_start = _rs->card_index_for_addr(address); @@ -641,36 +710,6 @@ void ShenandoahScanRemembered::merge_worker_card_stats_cumulative( } #endif -// A closure that takes an oop in the old generation and, if it's pointing -// into the young generation, dirties the corresponding remembered set entry. -// This is only used to rebuild the remembered set after a full GC. -class ShenandoahDirtyRememberedSetClosure : public BasicOopIterateClosure { -protected: - ShenandoahGenerationalHeap* const _heap; - ShenandoahScanRemembered* const _scanner; - -public: - ShenandoahDirtyRememberedSetClosure() : - _heap(ShenandoahGenerationalHeap::heap()), - _scanner(_heap->old_generation()->card_scan()) {} - - template - inline void work(T* p) { - assert(_heap->is_in_old(p), "Expecting to get an old gen address"); - T o = RawAccess<>::oop_load(p); - if (!CompressedOops::is_null(o)) { - oop obj = CompressedOops::decode_not_null(o); - if (_heap->is_in_young(obj)) { - // Dirty the card containing the cross-generational pointer. - _scanner->mark_card_as_dirty((HeapWord*) p); - } - } - } - - virtual void do_oop(narrowOop* p) { work(p); } - virtual void do_oop(oop* p) { work(p); } -}; - ShenandoahDirectCardMarkRememberedSet::ShenandoahDirectCardMarkRememberedSet(ShenandoahCardTable* card_table, size_t total_card_count) : LogCardValsPerIntPtr(log2i_exact(sizeof(intptr_t)) - log2i_exact(sizeof(CardValue))), LogCardSizeInWords(log2i_exact(CardTable::card_size_in_words())) { @@ -1024,7 +1063,7 @@ ShenandoahRegionChunkIterator::ShenandoahRegionChunkIterator(ShenandoahHeap* hea } void ShenandoahRegionChunkIterator::reset() { - _index = 0; + _index.store_relaxed(0); } ShenandoahReconstructRememberedSetTask::ShenandoahReconstructRememberedSetTask(ShenandoahRegionIterator* regions) @@ -1039,38 +1078,44 @@ void ShenandoahReconstructRememberedSetTask::work(uint worker_id) { ShenandoahDirtyRememberedSetClosure dirty_cards_for_cross_generational_pointers; while (r != nullptr) { - if (r->is_old() && r->is_active()) { - HeapWord* obj_addr = r->bottom(); - if (r->is_humongous_start()) { - // First, clear the remembered set - oop obj = cast_to_oop(obj_addr); - size_t size = obj->size(); - - size_t num_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize); - size_t region_index = r->index(); - ShenandoahHeapRegion* humongous_region = heap->get_region(region_index); - while (num_regions-- != 0) { - scanner->reset_object_range(humongous_region->bottom(), humongous_region->end()); - region_index++; - humongous_region = heap->get_region(region_index); - } - - // Then register the humongous object and DIRTY relevant remembered set cards - scanner->register_object_without_lock(obj_addr); - obj->oop_iterate(&dirty_cards_for_cross_generational_pointers); - } else if (!r->is_humongous()) { - scanner->reset_object_range(r->bottom(), r->end()); - - // Then iterate over all objects, registering object and DIRTYing relevant remembered set cards - HeapWord* t = r->top(); - while (obj_addr < t) { + if (r->is_active()) { + if (r->is_old()) { + HeapWord* obj_addr = r->bottom(); + if (r->is_humongous_start()) { + // First, clear the remembered set oop obj = cast_to_oop(obj_addr); + size_t size = obj->size(); + + size_t num_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize); + size_t region_index = r->index(); + ShenandoahHeapRegion* humongous_region = heap->get_region(region_index); + while (num_regions-- != 0) { + scanner->reset_object_range(humongous_region->bottom(), humongous_region->end()); + region_index++; + humongous_region = heap->get_region(region_index); + } + + // Then register the humongous object and DIRTY relevant remembered set cards scanner->register_object_without_lock(obj_addr); - obj_addr += obj->oop_iterate_size(&dirty_cards_for_cross_generational_pointers); - } - } // else, ignore humongous continuation region + obj->oop_iterate(&dirty_cards_for_cross_generational_pointers); + } else if (!r->is_humongous()) { + scanner->reset_object_range(r->bottom(), r->end()); + + // Then iterate over all objects, registering object and DIRTYing relevant remembered set cards + HeapWord* t = r->top(); + while (obj_addr < t) { + oop obj = cast_to_oop(obj_addr); + scanner->register_object_without_lock(obj_addr); + obj_addr += obj->oop_iterate_size(&dirty_cards_for_cross_generational_pointers); + } + } // else, ignore humongous continuation region + } else { + // The region is young, but it may become old again and we don't want stale remembered set data. + assert(r->is_young(), "Region: %zu, is active but free", r->index()); + heap->old_generation()->clear_cards_for(r); + } } - // else, this region is FREE or YOUNG or inactive and we can ignore it. + // else, this region is FREE or inactive and we can ignore it. r = _regions->next(); } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.hpp b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.hpp index c758873a040..53f00e64a03 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.hpp @@ -603,6 +603,9 @@ public: // as address. void register_object_without_lock(HeapWord* address); + // Dirty cards and register objects for the given range in memory. + void update_card_table(HeapWord* start, HeapWord* end); + // During the reference updates phase of GC, we walk through each old-gen memory region that was // not part of the collection set and we invalidate all unmarked objects. As part of this effort, // we coalesce neighboring dead objects in order to make future remembered set scanning more @@ -814,6 +817,10 @@ public: } } + void update_card_table(HeapWord* start, HeapWord* end) const { + _scc->update_card_table(start, end); + } + // Return true iff this object is "properly" registered. bool verify_registration(HeapWord* address, ShenandoahMarkingContext* ctx); @@ -973,7 +980,7 @@ private: const size_t _total_chunks; shenandoah_padding(0); - volatile size_t _index; + Atomic _index; shenandoah_padding(1); size_t _region_index[_maximum_groups]; // The region index for the first region spanned by this group diff --git a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.inline.hpp index e394daa68c0..3c82efee16c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.inline.hpp @@ -380,14 +380,14 @@ ShenandoahScanRemembered::process_region_slice(ShenandoahHeapRegion *region, siz } inline bool ShenandoahRegionChunkIterator::has_next() const { - return _index < _total_chunks; + return _index.load_relaxed() < _total_chunks; } inline bool ShenandoahRegionChunkIterator::next(struct ShenandoahRegionChunk *assignment) { - if (_index >= _total_chunks) { + if (_index.load_relaxed() >= _total_chunks) { return false; } - size_t new_index = AtomicAccess::add(&_index, (size_t) 1, memory_order_relaxed); + size_t new_index = _index.add_then_fetch((size_t) 1, memory_order_relaxed); if (new_index > _total_chunks) { // First worker that hits new_index == _total_chunks continues, other // contending workers return false. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp b/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp index 969edafbf75..dbae1b35c6f 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2016, 2024, Red Hat, Inc. All rights reserved. - * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2024, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,6 +30,7 @@ #include "gc/shared/taskTerminator.hpp" #include "gc/shenandoah/shenandoahPadding.hpp" #include "nmt/memTag.hpp" +#include "runtime/atomic.hpp" #include "runtime/atomicAccess.hpp" #include "runtime/javaThread.hpp" #include "runtime/mutex.hpp" @@ -306,7 +307,7 @@ template class ParallelClaimableQueueSet: public GenericTaskQueueSet { private: shenandoah_padding(0); - volatile jint _claimed_index; + Atomic _claimed_index; shenandoah_padding(1); DEBUG_ONLY(uint _reserved; ) @@ -319,13 +320,13 @@ public: DEBUG_ONLY(_reserved = 0; ) } - void clear_claimed() { _claimed_index = 0; } + void clear_claimed() { _claimed_index.store_relaxed(0); } T* claim_next(); // reserve queues that not for parallel claiming void reserve(uint n) { assert(n <= size(), "Sanity"); - _claimed_index = (jint)n; + _claimed_index.store_relaxed((jint)n); DEBUG_ONLY(_reserved = n;) } @@ -336,11 +337,11 @@ template T* ParallelClaimableQueueSet::claim_next() { jint size = (jint)GenericTaskQueueSet::size(); - if (_claimed_index >= size) { + if (_claimed_index.load_relaxed() >= size) { return nullptr; } - jint index = AtomicAccess::add(&_claimed_index, 1, memory_order_relaxed); + jint index = _claimed_index.add_then_fetch(1, memory_order_relaxed); if (index <= size) { return GenericTaskQueueSet::queue((uint)index - 1); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahThreadLocalData.cpp b/src/hotspot/share/gc/shenandoah/shenandoahThreadLocalData.cpp index ace5ab5e69a..1f3ce76cc1c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahThreadLocalData.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahThreadLocalData.cpp @@ -37,12 +37,7 @@ ShenandoahThreadLocalData::ShenandoahThreadLocalData() : _card_table(nullptr), _gclab(nullptr), _gclab_size(0), - _plab(nullptr), - _plab_desired_size(0), - _plab_actual_size(0), - _plab_promoted(0), - _plab_allows_promotion(true), - _plab_retries_enabled(true), + _shenandoah_plab(nullptr), _evacuation_stats(new ShenandoahEvacuationStats()) { } @@ -50,9 +45,9 @@ ShenandoahThreadLocalData::~ShenandoahThreadLocalData() { if (_gclab != nullptr) { delete _gclab; } - if (_plab != nullptr) { - ShenandoahGenerationalHeap::heap()->retire_plab(_plab); - delete _plab; + if (_shenandoah_plab != nullptr) { + _shenandoah_plab->retire(); + delete _shenandoah_plab; } delete _evacuation_stats; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahThreadLocalData.hpp b/src/hotspot/share/gc/shenandoah/shenandoahThreadLocalData.hpp index f54a65b0785..b1b923bbfce 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahThreadLocalData.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahThreadLocalData.hpp @@ -36,6 +36,7 @@ #include "gc/shenandoah/shenandoahCodeRoots.hpp" #include "gc/shenandoah/shenandoahEvacTracker.hpp" #include "gc/shenandoah/shenandoahGenerationalHeap.hpp" +#include "gc/shenandoah/shenandoahPLAB.hpp" #include "gc/shenandoah/shenandoahSATBMarkQueueSet.hpp" #include "runtime/javaThread.hpp" #include "utilities/debug.hpp" @@ -62,24 +63,7 @@ private: // Used both by mutator threads and by GC worker threads // for evacuations within the old generation and // for promotions from the young generation into the old generation. - PLAB* _plab; - - // Heuristics will grow the desired size of plabs. - size_t _plab_desired_size; - - // Once the plab has been allocated, and we know the actual size, we record it here. - size_t _plab_actual_size; - - // As the plab is used for promotions, this value is incremented. When the plab is - // retired, the difference between 'actual_size' and 'promoted' will be returned to - // the old generation's promotion reserve (i.e., it will be 'unexpended'). - size_t _plab_promoted; - - // If false, no more promotion by this thread during this evacuation phase. - bool _plab_allows_promotion; - - // If true, evacuations may attempt to allocate a smaller plab if the original size fails. - bool _plab_retries_enabled; + ShenandoahPLAB* _shenandoah_plab; ShenandoahEvacuationStats* _evacuation_stats; @@ -141,8 +125,7 @@ public: data(thread)->_gclab_size = 0; if (ShenandoahHeap::heap()->mode()->is_generational()) { - data(thread)->_plab = new PLAB(align_up(PLAB::min_size(), CardTable::card_size_in_words())); - data(thread)->_plab_desired_size = 0; + data(thread)->_shenandoah_plab = new ShenandoahPLAB(); } } @@ -170,65 +153,8 @@ public: return data(thread)->_evacuation_stats; } - static PLAB* plab(Thread* thread) { - return data(thread)->_plab; - } - - static size_t plab_size(Thread* thread) { - return data(thread)->_plab_desired_size; - } - - static void set_plab_size(Thread* thread, size_t v) { - data(thread)->_plab_desired_size = v; - } - - static void enable_plab_retries(Thread* thread) { - data(thread)->_plab_retries_enabled = true; - } - - static void disable_plab_retries(Thread* thread) { - data(thread)->_plab_retries_enabled = false; - } - - static bool plab_retries_enabled(Thread* thread) { - return data(thread)->_plab_retries_enabled; - } - - static void enable_plab_promotions(Thread* thread) { - data(thread)->_plab_allows_promotion = true; - } - - static void disable_plab_promotions(Thread* thread) { - data(thread)->_plab_allows_promotion = false; - } - - static bool allow_plab_promotions(Thread* thread) { - return data(thread)->_plab_allows_promotion; - } - - static void reset_plab_promoted(Thread* thread) { - data(thread)->_plab_promoted = 0; - } - - static void add_to_plab_promoted(Thread* thread, size_t increment) { - data(thread)->_plab_promoted += increment; - } - - static void subtract_from_plab_promoted(Thread* thread, size_t increment) { - assert(data(thread)->_plab_promoted >= increment, "Cannot subtract more than remaining promoted"); - data(thread)->_plab_promoted -= increment; - } - - static size_t get_plab_promoted(Thread* thread) { - return data(thread)->_plab_promoted; - } - - static void set_plab_actual_size(Thread* thread, size_t value) { - data(thread)->_plab_actual_size = value; - } - - static size_t get_plab_actual_size(Thread* thread) { - return data(thread)->_plab_actual_size; + static ShenandoahPLAB* shenandoah_plab(Thread* thread) { + return data(thread)->_shenandoah_plab; } // Evacuation OOM handling diff --git a/src/hotspot/share/gc/shenandoah/shenandoahTrace.cpp b/src/hotspot/share/gc/shenandoah/shenandoahTrace.cpp index bbb44348355..c28e572dd6b 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahTrace.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahTrace.cpp @@ -27,9 +27,7 @@ #include "jfr/jfrEvents.hpp" void ShenandoahTracer::report_evacuation_info(const ShenandoahCollectionSet* cset, - size_t free_regions, size_t regions_promoted_humongous, size_t regions_promoted_regular, - size_t regular_promoted_garbage, size_t regular_promoted_free, size_t regions_immediate, - size_t immediate_size) { + size_t free_regions, size_t regions_immediate, size_t immediate_size) { EventShenandoahEvacuationInformation e; if (e.should_commit()) { @@ -37,13 +35,6 @@ void ShenandoahTracer::report_evacuation_info(const ShenandoahCollectionSet* cse e.set_cSetRegions(cset->count()); e.set_cSetUsedBefore(cset->used()); e.set_cSetUsedAfter(cset->live()); - e.set_collectedOld(cset->get_live_bytes_in_old_regions()); - e.set_collectedPromoted(cset->get_live_bytes_in_tenurable_regions()); - e.set_collectedYoung(cset->get_live_bytes_in_untenurable_regions()); - e.set_regionsPromotedHumongous(regions_promoted_humongous); - e.set_regionsPromotedRegular(regions_promoted_regular); - e.set_regularPromotedGarbage(regular_promoted_garbage); - e.set_regularPromotedFree(regular_promoted_free); e.set_freeRegions(free_regions); e.set_regionsImmediate(regions_immediate); e.set_immediateBytes(immediate_size); @@ -51,3 +42,24 @@ void ShenandoahTracer::report_evacuation_info(const ShenandoahCollectionSet* cse e.commit(); } } + +void ShenandoahTracer::report_promotion_info(const ShenandoahCollectionSet* cset, + size_t regions_promoted_humongous, size_t humongous_promoted_garbage, size_t humongous_promoted_free, + size_t regions_promoted_regular, size_t regular_promoted_garbage, size_t regular_promoted_free) { + + EventShenandoahPromotionInformation e; + if (e.should_commit()) { + e.set_gcId(GCId::current()); + e.set_collectedOld(cset->get_live_bytes_in_old_regions()); + e.set_collectedPromoted(cset->get_live_bytes_in_tenurable_regions()); + e.set_collectedYoung(cset->get_live_bytes_in_untenurable_regions()); + e.set_regionsPromotedHumongous(regions_promoted_humongous); + e.set_humongousPromotedGarbage(humongous_promoted_garbage); + e.set_humongousPromotedFree(humongous_promoted_free); + e.set_regionsPromotedRegular(regions_promoted_regular); + e.set_regularPromotedGarbage(regular_promoted_garbage); + e.set_regularPromotedFree(regular_promoted_free); + + e.commit(); + } +} diff --git a/src/hotspot/share/gc/shenandoah/shenandoahTrace.hpp b/src/hotspot/share/gc/shenandoah/shenandoahTrace.hpp index 116968103de..e5c80e0705f 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahTrace.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahTrace.hpp @@ -34,11 +34,14 @@ class ShenandoahTracer : public GCTracer, public CHeapObj { public: ShenandoahTracer() : GCTracer(Shenandoah) {} - // Sends a JFR event (if enabled) summarizing the composition of the collection set + // Sends a JFR event summarizing the composition of the collection set static void report_evacuation_info(const ShenandoahCollectionSet* cset, - size_t free_regions, size_t regions_promoted_humongous, size_t regions_promoted_regular, - size_t regular_promoted_garbage, size_t regular_promoted_free, size_t regions_immediate, - size_t immediate_size); + size_t free_regions, size_t regions_immediate, size_t immediate_size); + + // Sends a JFR event summarizing in-place promotion activity (generational mode only) + static void report_promotion_info(const ShenandoahCollectionSet* cset, + size_t regions_promoted_humongous, size_t humongous_promoted_garbage, size_t humongous_promoted_free, + size_t regions_promoted_regular, size_t regular_promoted_garbage, size_t regular_promoted_free); }; #endif diff --git a/src/hotspot/share/gc/shenandoah/shenandoahUncommitThread.cpp b/src/hotspot/share/gc/shenandoah/shenandoahUncommitThread.cpp index ec708b198e7..f2fcd39daf5 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahUncommitThread.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahUncommitThread.cpp @@ -32,7 +32,7 @@ ShenandoahUncommitThread::ShenandoahUncommitThread(ShenandoahHeap* heap) : _heap(heap), _uncommit_lock(Mutex::safepoint - 2, "ShenandoahUncommit_lock", true) { - set_name("Shenandoah Uncommit Thread"); + set_name("ShenUncommit"); create_and_start(); // Allow uncommits. This is managed by the control thread during a GC. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahUtils.cpp b/src/hotspot/share/gc/shenandoah/shenandoahUtils.cpp index 176baa133c8..5af2e274833 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahUtils.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahUtils.cpp @@ -40,21 +40,36 @@ ShenandoahPhaseTimings::Phase ShenandoahTimingsTracker::_current_phase = ShenandoahPhaseTimings::_invalid_phase; -ShenandoahGCSession::ShenandoahGCSession(GCCause::Cause cause, ShenandoahGeneration* generation) : +const char* ShenandoahGCSession::cycle_end_message(ShenandoahGenerationType type) { + switch (type) { + case NON_GEN: + return "end of GC cycle"; + case GLOBAL: + return "end of Global GC cycle"; + case YOUNG: + return "end of Young GC cycle"; + case OLD: + return "end of Old GC cycle"; + default: + ShouldNotReachHere(); + return "end of GC cycle"; + } +} + +ShenandoahGCSession::ShenandoahGCSession(GCCause::Cause cause, ShenandoahGeneration* generation, + bool is_degenerated, bool is_out_of_cycle) : _heap(ShenandoahHeap::heap()), _generation(generation), _timer(_heap->gc_timer()), _tracer(_heap->tracer()) { assert(!ShenandoahGCPhase::is_current_phase_valid(), "No current GC phase"); - - _heap->on_cycle_start(cause, _generation); - + _heap->on_cycle_start(cause, _generation, is_degenerated, is_out_of_cycle); _timer->register_gc_start(); _tracer->report_gc_start(cause, _timer->gc_start()); _heap->trace_heap_before_gc(_tracer); _trace_cycle.initialize(_heap->cycle_memory_manager(), cause, - "end of GC cycle", + cycle_end_message(_generation->type()), /* allMemoryPoolsAffected */ true, /* recordGCBeginTime = */ true, /* recordPreGCUsage = */ true, diff --git a/src/hotspot/share/gc/shenandoah/shenandoahUtils.hpp b/src/hotspot/share/gc/shenandoah/shenandoahUtils.hpp index 8a508c4afd8..1ed6e43e3e1 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahUtils.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahUtils.hpp @@ -67,8 +67,11 @@ private: GCTracer* const _tracer; TraceMemoryManagerStats _trace_cycle; + + static const char* cycle_end_message(ShenandoahGenerationType type); public: - ShenandoahGCSession(GCCause::Cause cause, ShenandoahGeneration* generation); + ShenandoahGCSession(GCCause::Cause cause, ShenandoahGeneration* generation, + bool is_degenerated = false, bool is_out_of_cycle = false); ~ShenandoahGCSession(); }; @@ -185,7 +188,7 @@ public: type == VM_Operation::VMOp_ShenandoahFinalMarkStartEvac || type == VM_Operation::VMOp_ShenandoahInitUpdateRefs || type == VM_Operation::VMOp_ShenandoahFinalUpdateRefs || - type == VM_Operation::VMOp_ShenandoahFinalRoots || + type == VM_Operation::VMOp_ShenandoahFinalVerify || type == VM_Operation::VMOp_ShenandoahFullGC || type == VM_Operation::VMOp_ShenandoahDegeneratedGC; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp index 6b45842f781..97dd7e5cda1 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp @@ -135,12 +135,12 @@ void VM_ShenandoahFinalUpdateRefs::doit() { _gc->entry_final_update_refs(); } -VM_ShenandoahFinalRoots::VM_ShenandoahFinalRoots(ShenandoahConcurrentGC* gc) +VM_ShenandoahFinalVerify::VM_ShenandoahFinalVerify(ShenandoahConcurrentGC* gc) : VM_ShenandoahOperation(gc->generation()), _gc(gc) { } -void VM_ShenandoahFinalRoots::doit() { - ShenandoahGCPauseMark mark(_gc_id, "Final Roots", SvcGCMarker::CONCURRENT); +void VM_ShenandoahFinalVerify::doit() { + ShenandoahGCPauseMark mark(_gc_id, "Final Verify", SvcGCMarker::CONCURRENT); set_active_generation(); - _gc->entry_verify_final_roots(); + _gc->entry_final_verify(); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.hpp b/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.hpp index d565a3df22c..f8b99c71b14 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.hpp @@ -38,7 +38,7 @@ class ShenandoahFullGC; // - VM_ShenandoahFinalMarkStartEvac: finish up concurrent marking, and start evacuation // - VM_ShenandoahInitUpdateRefs: initiate update references // - VM_ShenandoahFinalUpdateRefs: finish up update references -// - VM_ShenandoahFinalRoots: finish up roots on a non-evacuating cycle +// - VM_ShenandoahFinalVerify: final verification at the end of the cycle // - VM_ShenandoahReferenceOperation: // - VM_ShenandoahFullGC: do full GC // - VM_ShenandoahDegeneratedGC: do STW degenerated GC @@ -127,12 +127,12 @@ public: void doit() override; }; -class VM_ShenandoahFinalRoots: public VM_ShenandoahOperation { +class VM_ShenandoahFinalVerify: public VM_ShenandoahOperation { ShenandoahConcurrentGC* const _gc; public: - explicit VM_ShenandoahFinalRoots(ShenandoahConcurrentGC* gc); - VM_Operation::VMOp_Type type() const override { return VMOp_ShenandoahFinalRoots; } - const char* name() const override { return "Shenandoah Final Roots"; } + explicit VM_ShenandoahFinalVerify(ShenandoahConcurrentGC* gc); + VM_Operation::VMOp_Type type() const override { return VMOp_ShenandoahFinalVerify; } + const char* name() const override { return "Shenandoah Final Verify"; } void doit() override; }; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp index 225339a3219..8299cbe62c6 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp @@ -1086,7 +1086,7 @@ void ShenandoahVerifier::verify_generic(ShenandoahGeneration* generation, Verify _verify_cset_disable, // cset may be inconsistent _verify_liveness_disable, // no reliable liveness data _verify_regions_disable, // no reliable region data - _verify_size_exact, // expect generation and heap sizes to match exactly + _verify_size_disable, // no reliable sizing data _verify_gcstate_disable // no data about gcstate ); } @@ -1180,7 +1180,7 @@ void ShenandoahVerifier::verify_before_update_refs(ShenandoahGeneration* generat ); } -// We have not yet cleanup (reclaimed) the collection set +// We have not yet cleaned up (reclaimed) the collection set void ShenandoahVerifier::verify_after_update_refs(ShenandoahGeneration* generation) { verify_at_safepoint( generation, @@ -1197,6 +1197,23 @@ void ShenandoahVerifier::verify_after_update_refs(ShenandoahGeneration* generati ); } +// We have not yet cleaned up (reclaimed) the collection set +void ShenandoahVerifier::verify_after_gc(ShenandoahGeneration* generation) { + verify_at_safepoint( + generation, + "After GC", + _verify_remembered_disable, // do not verify remembered set + _verify_forwarded_none, // no forwarded references + _verify_marked_complete, // bitmaps might be stale, but alloc-after-mark should be well + _verify_cset_none, // no cset references, all updated + _verify_liveness_disable, // no reliable liveness data anymore + _verify_regions_nocset, // no cset regions, trash regions have appeared + // expect generation and heap sizes to match exactly, including trash + _verify_size_exact_including_trash, + _verify_gcstate_stable // GC state was turned off + ); +} + void ShenandoahVerifier::verify_after_degenerated(ShenandoahGeneration* generation) { verify_at_safepoint( generation, diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp index 7e683cf7af8..0479d5f67ce 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp @@ -220,6 +220,7 @@ public: void verify_before_evacuation(ShenandoahGeneration* generation); void verify_before_update_refs(ShenandoahGeneration* generation); void verify_after_update_refs(ShenandoahGeneration* generation); + void verify_after_gc(ShenandoahGeneration* generation); void verify_before_fullgc(ShenandoahGeneration* generation); void verify_after_fullgc(ShenandoahGeneration* generation); void verify_after_degenerated(ShenandoahGeneration* generation); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahYoungGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahYoungGeneration.cpp index f00ce16136f..7a76bc50078 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahYoungGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahYoungGeneration.cpp @@ -131,6 +131,14 @@ size_t ShenandoahYoungGeneration::free_unaffiliated_regions() const { return _free_set->young_unaffiliated_regions(); } +size_t ShenandoahYoungGeneration::available_with_reserve() const { + shenandoah_assert_heaplocked(); + ShenandoahFreeSet* free_set = ShenandoahHeap::heap()->free_set(); + size_t mutator_available = free_set->available_locked(); + size_t collector_available = free_set->collector_available_locked(); + return mutator_available + collector_available; +} + size_t ShenandoahYoungGeneration::available() const { // The collector reserve may eat into what the mutator is allowed to use. Make sure we are looking // at what is available to the mutator when reporting how much memory is available. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahYoungGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahYoungGeneration.hpp index 930c5ff1747..c3b6944ec80 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahYoungGeneration.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahYoungGeneration.hpp @@ -82,6 +82,8 @@ public: size_t get_affiliated_region_count() const override; size_t max_capacity() const override; + // Return sum of bytes available to mutator and to Collector, assuming heap lock is held. + size_t available_with_reserve() const; size_t available() const override; size_t soft_mutator_available() const override; diff --git a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp index 3eb1a06a911..2c5ba726ef2 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp @@ -34,6 +34,59 @@ range, \ constraint) \ \ + product(uint, ShenandoahAccelerationSamplePeriod, 15, EXPERIMENTAL, \ + "When at least this much time (measured in ms) has passed " \ + "since the acceleration allocation rate was most recently " \ + "sampled, capture another allocation rate sample for the purpose "\ + "of detecting acceleration or momentary spikes in allocation " \ + "rate. A smaller value allows quicker response to changes in " \ + "allocation rates but is more vulnerable to noise and requires " \ + "more monitoring effort.") \ + range(1, 1000) \ + \ + product(uint, ShenandoahRateAccelerationSampleSize, 8, EXPERIMENTAL, \ + "In selected ShenandoahControlIntervals " \ + "(if ShenandoahAccelerationSamplePeriod ms have passed " \ + "since previous allocation rate sample), " \ + "we compute the allocation rate since the previous rate was " \ + "sampled. This many samples are analyzed to determine whether " \ + "allocation rates are accelerating. Acceleration may occur " \ + "due to increasing client demand or due to phase changes in " \ + "an application. A larger value reduces sensitivity to " \ + "noise and delays recognition of the accelerating trend. A " \ + "larger value may also cause the heuristic to miss detection " \ + "of very quick accelerations. Smaller values may cause random " \ + "noise to be perceived as acceleration of allocation rate, " \ + "triggering excess collections. Note that the acceleration " \ + "need not last the entire span of the sampled duration to be " \ + "detected. If the last several of all samples are signficantly " \ + "larger than the other samples, the best fit line through all " \ + "sampled values will have an upward slope, manifesting as " \ + "acceleration.") \ + range(1,64) \ + \ + product(uint, ShenandoahMomentaryAllocationRateSpikeSampleSize, \ + 2, EXPERIMENTAL, \ + "In selected ShenandoahControlIntervals " \ + "(if ShenandoahAccelerationSamplePeriod ms have passed " \ + "since previous allocation rate sample), we compute " \ + "the allocation rate since the previous rate was sampled. " \ + "The weighted average of this " \ + "many most recent momentary allocation rate samples is compared " \ + "against current allocation runway and anticipated GC time to " \ + "determine whether a spike in momentary allocation rate " \ + "justifies an early GC trigger. Momentary allocation spike " \ + "detection is in addition to previously implemented " \ + "ShenandoahAdaptiveInitialSpikeThreshold, the latter of which " \ + "is more effective at detecting slower spikes. The latter " \ + "spike detection samples at the rate specifieid by " \ + "ShenandoahAdaptiveSampleFrequencyHz. The value of this " \ + "parameter must be less than the value of " \ + "ShenandoahRateAccelerationSampleSize. A larger value makes " \ + "momentary spike detection less sensitive. A smaller value " \ + "may result in excessive GC triggers.") \ + range(1,64) \ + \ product(uintx, ShenandoahGenerationalMinPIPUsage, 30, EXPERIMENTAL, \ "(Generational mode only) What percent of a heap region " \ "should be used before we consider promoting a region in " \ @@ -99,9 +152,6 @@ "evvort even if the usage of old generation is below " \ "ShenandoahIgnoreOldGrowthBelowPercentage.") \ \ - product(bool, ShenandoahGenerationalAdaptiveTenuring, true, EXPERIMENTAL, \ - "(Generational mode only) Dynamically adapt tenuring age.") \ - \ product(bool, ShenandoahGenerationalCensusIgnoreOlderCohorts, true, \ EXPERIMENTAL,\ "(Generational mode only) Ignore mortality rates older than the " \ @@ -126,8 +176,7 @@ "(Generational mode only) Cohort mortality rates below this " \ "value will be treated as indicative of longevity, leading to " \ "tenuring. A lower value delays tenuring, a higher value hastens "\ - "it. Used only when ShenandoahGenerationalhenAdaptiveTenuring is "\ - "enabled.") \ + "it.") \ range(0.001,0.999) \ \ product(size_t, ShenandoahGenerationalTenuringCohortPopulationThreshold, \ @@ -511,9 +560,6 @@ product(bool, ShenandoahLoadRefBarrier, true, DIAGNOSTIC, \ "Turn on/off load-reference barriers in Shenandoah") \ \ - product(bool, ShenandoahStackWatermarkBarrier, true, DIAGNOSTIC, \ - "Turn on/off stack watermark barriers in Shenandoah") \ - \ develop(bool, ShenandoahVerifyOptoBarriers, trueInDebug, \ "Verify no missing barriers in C2.") \ \ diff --git a/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp b/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp index 650918e2d30..0a3dac1e100 100644 --- a/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp +++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -442,7 +442,7 @@ void ZBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* a assert(src_offset == dest_offset, "should be equal"); const jlong offset = src_offset->get_long(); if (offset != arrayOopDesc::base_offset_in_bytes(T_OBJECT)) { - assert(!UseCompressedClassPointers || UseCompactObjectHeaders, "should only happen without compressed class pointers"); + assert(UseCompactObjectHeaders, "should only happen with COH"); assert((arrayOopDesc::base_offset_in_bytes(T_OBJECT) - offset) == BytesPerLong, "unexpected offset"); length = phase->transform_later(new SubLNode(length, phase->longcon(1))); // Size is in longs src_offset = phase->longcon(arrayOopDesc::base_offset_in_bytes(T_OBJECT)); diff --git a/src/hotspot/share/gc/z/zBarrierSetAssembler.hpp b/src/hotspot/share/gc/z/zBarrierSetAssembler.hpp index bcc757d6132..7b15813678a 100644 --- a/src/hotspot/share/gc/z/zBarrierSetAssembler.hpp +++ b/src/hotspot/share/gc/z/zBarrierSetAssembler.hpp @@ -34,6 +34,9 @@ public: static Address load_bad_mask_from_jni_env(Register env); static Address mark_bad_mask_from_jni_env(Register env); + + virtual void register_reloc_addresses(GrowableArray
&entries, int begin, int count) { } + virtual void retrieve_reloc_addresses(address start, address end, GrowableArray
&entries) { } }; // Needs to be included after definition of ZBarrierSetAssemblerBase diff --git a/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp b/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp index d80ce4e149d..a439b3a167b 100644 --- a/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp +++ b/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp @@ -33,6 +33,7 @@ #include "gc/z/zThreadLocalData.hpp" #include "gc/z/zUncoloredRoot.inline.hpp" #include "logging/log.hpp" +#include "runtime/icache.hpp" #include "runtime/threadWXSetters.inline.hpp" bool ZBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) { @@ -70,12 +71,15 @@ bool ZBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) { return false; } - // Heal barriers - ZNMethod::nmethod_patch_barriers(nm); + { + ICacheInvalidationContext icic; + // Heal barriers + ZNMethod::nmethod_patch_barriers(nm, &icic); - // Heal oops - ZUncoloredRootProcessWeakOopClosure cl(ZNMethod::color(nm)); - ZNMethod::nmethod_oops_do_inner(nm, &cl); + // Heal oops + ZUncoloredRootProcessWeakOopClosure cl(ZNMethod::color(nm)); + ZNMethod::nmethod_oops_do_inner(nm, &cl, &icic); + } const uintptr_t prev_color = ZNMethod::color(nm); const uintptr_t new_color = *ZPointerStoreGoodMaskLowOrderBitsAddr; diff --git a/src/hotspot/share/gc/z/zBitField.hpp b/src/hotspot/share/gc/z/zBitField.hpp index 9bec4e05594..b68f5b92ce1 100644 --- a/src/hotspot/share/gc/z/zBitField.hpp +++ b/src/hotspot/share/gc/z/zBitField.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -74,7 +74,7 @@ public: static ContainerType encode(ValueType value) { assert(((ContainerType)value & (FieldMask << ValueShift)) == (ContainerType)value, "Invalid value"); - return ((ContainerType)value >> ValueShift) << FieldShift; + return checked_cast(((ContainerType)value >> ValueShift) << FieldShift); } }; diff --git a/src/hotspot/share/gc/z/zDebug.gdb b/src/hotspot/share/gc/z/zDebug.gdb index d502eea7ce3..df087c4a42d 100644 --- a/src/hotspot/share/gc/z/zDebug.gdb +++ b/src/hotspot/share/gc/z/zDebug.gdb @@ -50,11 +50,7 @@ define zpo end printf "\t Page: %llu\n", ((uintptr_t)$obj & ZAddressOffsetMask) >> ZGranuleSizeShift x/16gx $obj - if (UseCompressedClassPointers) - set $klass = (Klass*)(void*)((uintptr_t)CompressedKlassPointers::_base +((uintptr_t)$obj->_metadata->_compressed_klass << CompressedKlassPointers::_shift)) - else - set $klass = $obj->_metadata->_klass - end + set $klass = (Klass*)(void*)((uintptr_t)CompressedKlassPointers::_base +((uintptr_t)$obj->_compressed_klass << CompressedKlassPointers::_shift)) printf "Mark: 0x%016llx\tKlass: %s\n", (uintptr_t)$obj->_mark, (char*)$klass->_name->_body end diff --git a/src/hotspot/share/gc/z/zGeneration.cpp b/src/hotspot/share/gc/z/zGeneration.cpp index 27f352a624f..0f9f4e34a5e 100644 --- a/src/hotspot/share/gc/z/zGeneration.cpp +++ b/src/hotspot/share/gc/z/zGeneration.cpp @@ -58,6 +58,7 @@ #include "prims/jvmtiTagMap.hpp" #include "runtime/continuation.hpp" #include "runtime/handshake.hpp" +#include "runtime/icache.hpp" #include "runtime/safepoint.hpp" #include "runtime/threads.hpp" #include "runtime/vmOperations.hpp" @@ -1434,12 +1435,15 @@ public: virtual void do_nmethod(nmethod* nm) { ZLocker locker(ZNMethod::lock_for_nmethod(nm)); if (_bs_nm->is_armed(nm)) { - // Heal barriers - ZNMethod::nmethod_patch_barriers(nm); + { + ICacheInvalidationContext icic; + // Heal barriers + ZNMethod::nmethod_patch_barriers(nm, &icic); - // Heal oops - ZUncoloredRootProcessOopClosure cl(ZNMethod::color(nm)); - ZNMethod::nmethod_oops_do_inner(nm, &cl); + // Heal oops + ZUncoloredRootProcessOopClosure cl(ZNMethod::color(nm)); + ZNMethod::nmethod_oops_do_inner(nm, &cl, &icic); + } log_trace(gc, nmethod)("nmethod: " PTR_FORMAT " visited by old remapping", p2i(nm)); diff --git a/src/hotspot/share/gc/z/zMark.cpp b/src/hotspot/share/gc/z/zMark.cpp index 03701ae9998..ac7d86db240 100644 --- a/src/hotspot/share/gc/z/zMark.cpp +++ b/src/hotspot/share/gc/z/zMark.cpp @@ -59,6 +59,7 @@ #include "oops/oop.inline.hpp" #include "runtime/continuation.hpp" #include "runtime/handshake.hpp" +#include "runtime/icache.hpp" #include "runtime/javaThread.hpp" #include "runtime/prefetch.inline.hpp" #include "runtime/safepointMechanism.hpp" @@ -718,12 +719,15 @@ public: virtual void do_nmethod(nmethod* nm) { ZLocker locker(ZNMethod::lock_for_nmethod(nm)); if (_bs_nm->is_armed(nm)) { - // Heal barriers - ZNMethod::nmethod_patch_barriers(nm); + { + ICacheInvalidationContext icic; + // Heal barriers + ZNMethod::nmethod_patch_barriers(nm, &icic); - // Heal oops - ZUncoloredRootMarkOopClosure cl(ZNMethod::color(nm)); - ZNMethod::nmethod_oops_do_inner(nm, &cl); + // Heal oops + ZUncoloredRootMarkOopClosure cl(ZNMethod::color(nm)); + ZNMethod::nmethod_oops_do_inner(nm, &cl, &icic); + } // CodeCache unloading support nm->mark_as_maybe_on_stack(); @@ -753,10 +757,6 @@ public: if (_bs_nm->is_armed(nm)) { const uintptr_t prev_color = ZNMethod::color(nm); - // Heal oops - ZUncoloredRootMarkYoungOopClosure cl(prev_color); - ZNMethod::nmethod_oops_do_inner(nm, &cl); - // Disarm only the young marking, not any potential old marking cycle const uintptr_t old_marked_mask = ZPointerMarkedMask ^ (ZPointerMarkedYoung0 | ZPointerMarkedYoung1); @@ -767,9 +767,16 @@ public: // Check if disarming for young mark, completely disarms the nmethod entry barrier const bool complete_disarm = ZPointer::is_store_good(new_disarm_value_ptr); - if (complete_disarm) { - // We are about to completely disarm the nmethod, must take responsibility to patch all barriers before disarming - ZNMethod::nmethod_patch_barriers(nm); + { + ICacheInvalidationContext icic; + if (complete_disarm) { + // We are about to completely disarm the nmethod, must take responsibility to patch all barriers before disarming + ZNMethod::nmethod_patch_barriers(nm, &icic); + } + + // Heal oops + ZUncoloredRootMarkYoungOopClosure cl(prev_color); + ZNMethod::nmethod_oops_do_inner(nm, &cl, &icic); } _bs_nm->guard_with(nm, (int)untype(new_disarm_value_ptr)); diff --git a/src/hotspot/share/gc/z/zNMT.cpp b/src/hotspot/share/gc/z/zNMT.cpp index 1019bcfdd96..b82afac47bc 100644 --- a/src/hotspot/share/gc/z/zNMT.cpp +++ b/src/hotspot/share/gc/z/zNMT.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,9 +45,7 @@ void ZNMT::unreserve(zaddress_unsafe start, size_t size) { if (MemTracker::enabled()) { // We are the owner of the reserved memory, and any failure to unreserve - // are fatal, so so we don't need to hold a lock while unreserving memory. - - MemTracker::NmtVirtualMemoryLocker nvml; + // are fatal, so we don't need to hold a lock while unreserving memory. // The current NMT implementation does not support unreserving a memory // region that was built up from smaller memory reservations. Workaround diff --git a/src/hotspot/share/gc/z/zNMethod.cpp b/src/hotspot/share/gc/z/zNMethod.cpp index 780bc9e3bf7..a1348b63b6f 100644 --- a/src/hotspot/share/gc/z/zNMethod.cpp +++ b/src/hotspot/share/gc/z/zNMethod.cpp @@ -50,6 +50,7 @@ #include "oops/oop.inline.hpp" #include "runtime/atomicAccess.hpp" #include "runtime/continuation.hpp" +#include "runtime/icache.hpp" #include "utilities/debug.hpp" static ZNMethodData* gc_data(const nmethod* nm) { @@ -245,8 +246,16 @@ void ZNMethod::set_guard_value(nmethod* nm, int value) { } void ZNMethod::nmethod_patch_barriers(nmethod* nm) { + ICacheInvalidationContext icic; + nmethod_patch_barriers(nm, &icic); +} + +void ZNMethod::nmethod_patch_barriers(nmethod* nm, ICacheInvalidationContext* icic) { ZBarrierSetAssembler* const bs_asm = ZBarrierSet::assembler(); ZArrayIterator iter(gc_data(nm)->barriers()); + if (gc_data(nm)->barriers()->is_nonempty()) { + icic->set_has_modified_code(); + } for (ZNMethodDataBarrier barrier; iter.next(&barrier);) { bs_asm->patch_barrier_relocation(barrier._reloc_addr, barrier._reloc_format); } @@ -258,6 +267,11 @@ void ZNMethod::nmethod_oops_do(nmethod* nm, OopClosure* cl) { } void ZNMethod::nmethod_oops_do_inner(nmethod* nm, OopClosure* cl) { + ICacheInvalidationContext icic; + nmethod_oops_do_inner(nm, cl, &icic); +} + +void ZNMethod::nmethod_oops_do_inner(nmethod* nm, OopClosure* cl, ICacheInvalidationContext* icic) { // Process oops table { oop* const begin = nm->oops_begin(); @@ -283,7 +297,7 @@ void ZNMethod::nmethod_oops_do_inner(nmethod* nm, OopClosure* cl) { // Process non-immediate oops if (data->has_non_immediate_oops()) { - nm->fix_oop_relocations(); + nm->fix_oop_relocations(icic); } } diff --git a/src/hotspot/share/gc/z/zNMethod.hpp b/src/hotspot/share/gc/z/zNMethod.hpp index 865ea11e7b9..2779151c576 100644 --- a/src/hotspot/share/gc/z/zNMethod.hpp +++ b/src/hotspot/share/gc/z/zNMethod.hpp @@ -56,9 +56,11 @@ public: static void set_guard_value(nmethod* nm, int value); static void nmethod_patch_barriers(nmethod* nm); + static void nmethod_patch_barriers(nmethod* nm, ICacheInvalidationContext* icic); static void nmethod_oops_do(nmethod* nm, OopClosure* cl); static void nmethod_oops_do_inner(nmethod* nm, OopClosure* cl); + static void nmethod_oops_do_inner(nmethod* nm, OopClosure* cl, ICacheInvalidationContext* icic); static void nmethods_do_begin(bool secondary); static void nmethods_do_end(bool secondary); diff --git a/src/hotspot/share/interpreter/bytecodeTracer.cpp b/src/hotspot/share/interpreter/bytecodeTracer.cpp index 21974218957..4578a3eec4e 100644 --- a/src/hotspot/share/interpreter/bytecodeTracer.cpp +++ b/src/hotspot/share/interpreter/bytecodeTracer.cpp @@ -52,9 +52,9 @@ class BytecodePrinter { Bytecodes::Code _code; address _next_pc; // current decoding position int _flags; - bool _is_linked; + bool _use_cp_cache; - bool is_linked() const { return _is_linked; } + bool use_cp_cache() const { return _use_cp_cache; } void align() { _next_pc = align_up(_next_pc, sizeof(jint)); } int get_byte() { return *(jbyte*) _next_pc++; } // signed int get_index_u1() { return *(address)_next_pc++; } // returns 0x00 - 0xff as an int @@ -69,7 +69,7 @@ class BytecodePrinter { bool is_wide() const { return _is_wide; } Bytecodes::Code raw_code() const { return Bytecodes::Code(_code); } ConstantPool* constants() const { return method()->constants(); } - ConstantPoolCache* cpcache() const { assert(is_linked(), "must be"); return constants()->cache(); } + ConstantPoolCache* cpcache() const { assert(use_cp_cache(), "must be"); return constants()->cache(); } void print_constant(int i, outputStream* st); void print_cpcache_entry(int cpc_index, outputStream* st); @@ -78,7 +78,7 @@ class BytecodePrinter { void print_field_or_method(int cp_index, outputStream* st); void print_dynamic(int cp_index, outputStream* st); void print_attributes(int bci, outputStream* st); - void bytecode_epilog(int bci, outputStream* st); + void print_method_data_at(int bci, outputStream* st); public: BytecodePrinter(int flags = 0) : _is_wide(false), _code(Bytecodes::_illegal), _flags(flags) {} @@ -94,8 +94,9 @@ class BytecodePrinter { ResourceMark rm; bool method_changed = _current_method != method(); _current_method = method(); - _is_linked = method->method_holder()->is_linked(); - assert(_is_linked, "this function must be called on methods that are already executing"); + _use_cp_cache = method->constants()->cache() != nullptr; + assert(method->method_holder()->is_linked(), + "this function must be called on methods that are already executing"); if (method_changed) { // Note 1: This code will not work as expected with true MT/MP. @@ -150,7 +151,8 @@ class BytecodePrinter { // BytecodeStream, which will skip wide bytecodes. void trace(const methodHandle& method, address bcp, outputStream* st) { _current_method = method(); - _is_linked = method->method_holder()->is_linked(); + // This may be called during linking after bytecodes are rewritten to point to the cpCache. + _use_cp_cache = method->constants()->cache() != nullptr; ResourceMark rm; Bytecodes::Code code = Bytecodes::code_at(method(), bcp); // Set is_wide @@ -171,7 +173,9 @@ class BytecodePrinter { } _next_pc = is_wide() ? bcp+2 : bcp+1; print_attributes(bci, st); - bytecode_epilog(bci, st); + if (ClassPrinter::has_mode(_flags, ClassPrinter::PRINT_METHOD_DATA)) { + print_method_data_at(bci, st); + } } }; @@ -299,7 +303,7 @@ void BytecodePrinter::print_invokedynamic(int indy_index, int cp_index, outputSt if (ClassPrinter::has_mode(_flags, ClassPrinter::PRINT_DYNAMIC)) { print_bsm(cp_index, st); - if (is_linked()) { + if (use_cp_cache()) { ResolvedIndyEntry* indy_entry = constants()->resolved_indy_entry_at(indy_index); st->print(" ResolvedIndyEntry: "); indy_entry->print_on(st); @@ -363,7 +367,7 @@ void BytecodePrinter::print_attributes(int bci, outputStream* st) { { int cp_index; if (Bytecodes::uses_cp_cache(raw_code())) { - assert(is_linked(), "fast ldc bytecode must be in linked classes"); + assert(use_cp_cache(), "fast ldc bytecode must be in linked classes"); int obj_index = get_index_u1(); cp_index = constants()->object_to_cp_index(obj_index); } else { @@ -378,7 +382,7 @@ void BytecodePrinter::print_attributes(int bci, outputStream* st) { { int cp_index; if (Bytecodes::uses_cp_cache(raw_code())) { - assert(is_linked(), "fast ldc bytecode must be in linked classes"); + assert(use_cp_cache(), "fast ldc bytecode must be in linked classes"); int obj_index = get_native_index_u2(); cp_index = constants()->object_to_cp_index(obj_index); } else { @@ -508,7 +512,7 @@ void BytecodePrinter::print_attributes(int bci, outputStream* st) { case Bytecodes::_getfield: { int cp_index; - if (is_linked()) { + if (use_cp_cache()) { int field_index = get_native_index_u2(); cp_index = cpcache()->resolved_field_entry_at(field_index)->constant_pool_index(); } else { @@ -523,7 +527,7 @@ void BytecodePrinter::print_attributes(int bci, outputStream* st) { case Bytecodes::_invokestatic: { int cp_index; - if (is_linked()) { + if (use_cp_cache()) { int method_index = get_native_index_u2(); ResolvedMethodEntry* method_entry = cpcache()->resolved_method_entry_at(method_index); cp_index = method_entry->constant_pool_index(); @@ -531,7 +535,7 @@ void BytecodePrinter::print_attributes(int bci, outputStream* st) { if (raw_code() == Bytecodes::_invokehandle && ClassPrinter::has_mode(_flags, ClassPrinter::PRINT_METHOD_HANDLE)) { - assert(is_linked(), "invokehandle is only in rewritten methods"); + assert(use_cp_cache(), "invokehandle is only in rewritten methods"); method_entry->print_on(st); if (method_entry->has_appendix()) { st->print(" appendix: "); @@ -548,7 +552,7 @@ void BytecodePrinter::print_attributes(int bci, outputStream* st) { case Bytecodes::_invokeinterface: { int cp_index; - if (is_linked()) { + if (use_cp_cache()) { int method_index = get_native_index_u2(); cp_index = cpcache()->resolved_method_entry_at(method_index)->constant_pool_index(); } else { @@ -564,7 +568,7 @@ void BytecodePrinter::print_attributes(int bci, outputStream* st) { { int indy_index; int cp_index; - if (is_linked()) { + if (use_cp_cache()) { indy_index = get_native_index_u4(); cp_index = constants()->resolved_indy_entry_at(indy_index)->constant_pool_index(); } else { @@ -598,7 +602,7 @@ void BytecodePrinter::print_attributes(int bci, outputStream* st) { } -void BytecodePrinter::bytecode_epilog(int bci, outputStream* st) { +void BytecodePrinter::print_method_data_at(int bci, outputStream* st) { MethodData* mdo = method()->method_data(); if (mdo != nullptr) { diff --git a/src/hotspot/share/interpreter/bytecodes.cpp b/src/hotspot/share/interpreter/bytecodes.cpp index 1526b3c330e..a7914b6b93a 100644 --- a/src/hotspot/share/interpreter/bytecodes.cpp +++ b/src/hotspot/share/interpreter/bytecodes.cpp @@ -402,7 +402,7 @@ int Bytecodes::special_length_at(Bytecodes::Code code, address bcp, address end) case _fast_binaryswitch: // fall through case _fast_linearswitch: { address aligned_bcp = align_up(bcp + 1, jintSize); - if (end != nullptr && aligned_bcp + 2*jintSize >= end) { + if (end != nullptr && aligned_bcp + 2*jintSize > end) { return -1; // don't read past end of code buffer } // Promote calculation to 64 bits to do range checks, used by the verifier. diff --git a/src/hotspot/share/interpreter/interpreter.cpp b/src/hotspot/share/interpreter/interpreter.cpp index 2cc163186e8..1f327152e0c 100644 --- a/src/hotspot/share/interpreter/interpreter.cpp +++ b/src/hotspot/share/interpreter/interpreter.cpp @@ -61,10 +61,10 @@ void InterpreterCodelet::initialize(const char* description, Bytecodes::Code byt void InterpreterCodelet::verify() {} -void InterpreterCodelet::print_on(outputStream* st) const { +void InterpreterCodelet::print_on(outputStream* st, bool print_code) const { ttyLocker ttyl; - if (AbstractInterpreter::should_print_instructions()) { + if (print_code) { st->cr(); st->print_cr("----------------------------------------------------------------------"); } @@ -74,12 +74,16 @@ void InterpreterCodelet::print_on(outputStream* st) const { st->print_cr("[" INTPTR_FORMAT ", " INTPTR_FORMAT "] %d bytes", p2i(code_begin()), p2i(code_end()), code_size()); - if (AbstractInterpreter::should_print_instructions()) { + if (print_code) { st->cr(); Disassembler::decode(code_begin(), code_end(), st NOT_PRODUCT(COMMA &_asm_remarks)); } } +void InterpreterCodelet::print_on(outputStream* st) const { + print_on(st, AbstractInterpreter::should_print_instructions()); +} + void InterpreterCodelet::print() const { print_on(tty); } CodeletMark::CodeletMark(InterpreterMacroAssembler*& masm, diff --git a/src/hotspot/share/interpreter/interpreter.hpp b/src/hotspot/share/interpreter/interpreter.hpp index f7d42fcb4da..fb368638332 100644 --- a/src/hotspot/share/interpreter/interpreter.hpp +++ b/src/hotspot/share/interpreter/interpreter.hpp @@ -67,6 +67,7 @@ class InterpreterCodelet: public Stub { // Debugging void verify(); + void print_on(outputStream* st, bool print_code) const; void print_on(outputStream* st) const; void print() const; diff --git a/src/hotspot/share/interpreter/interpreterRuntime.cpp b/src/hotspot/share/interpreter/interpreterRuntime.cpp index e3cf5d589c2..cd0a062ebc8 100644 --- a/src/hotspot/share/interpreter/interpreterRuntime.cpp +++ b/src/hotspot/share/interpreter/interpreterRuntime.cpp @@ -36,6 +36,7 @@ #include "interpreter/interpreter.hpp" #include "interpreter/interpreterRuntime.hpp" #include "interpreter/linkResolver.hpp" +#include "interpreter/oopMapCache.hpp" #include "interpreter/templateTable.hpp" #include "jvm_io.h" #include "logging/log.hpp" @@ -55,7 +56,6 @@ #include "prims/jvmtiExport.hpp" #include "prims/methodHandles.hpp" #include "prims/nativeLookup.hpp" -#include "runtime/atomicAccess.hpp" #include "runtime/continuation.hpp" #include "runtime/deoptimization.hpp" #include "runtime/fieldDescriptor.inline.hpp" @@ -75,6 +75,7 @@ #include "utilities/checkedCast.hpp" #include "utilities/copy.hpp" #include "utilities/events.hpp" +#include "utilities/exceptions.hpp" #if INCLUDE_JFR #include "jfr/jfr.inline.hpp" #endif @@ -243,9 +244,9 @@ JRT_ENTRY(void, InterpreterRuntime::multianewarray(JavaThread* current, jint* fi // We may want to pass in more arguments - could make this slightly faster LastFrameAccessor last_frame(current); ConstantPool* constants = last_frame.method()->constants(); - int i = last_frame.get_index_u2(Bytecodes::_multianewarray); - Klass* klass = constants->klass_at(i, CHECK); - int nof_dims = last_frame.number_of_dimensions(); + int i = last_frame.get_index_u2(Bytecodes::_multianewarray); + Klass* klass = constants->klass_at(i, CHECK); + int nof_dims = last_frame.number_of_dimensions(); assert(klass->is_klass(), "not a class"); assert(nof_dims >= 1, "multianewarray rank must be nonzero"); @@ -352,7 +353,7 @@ JRT_ENTRY(void, InterpreterRuntime::throw_StackOverflowError(JavaThread* current vmClasses::StackOverflowError_klass(), CHECK); // Increment counter for hs_err file reporting - AtomicAccess::inc(&Exceptions::_stack_overflow_errors); + Exceptions::increment_stack_overflow_errors(); // Remove the ScopedValue bindings in case we got a StackOverflowError // while we were trying to manipulate ScopedValue bindings. current->clear_scopedValueBindings(); @@ -366,7 +367,7 @@ JRT_ENTRY(void, InterpreterRuntime::throw_delayed_StackOverflowError(JavaThread* java_lang_Throwable::set_message(exception(), Universe::delayed_stack_overflow_error_message()); // Increment counter for hs_err file reporting - AtomicAccess::inc(&Exceptions::_stack_overflow_errors); + Exceptions::increment_stack_overflow_errors(); // Remove the ScopedValue bindings in case we got a StackOverflowError // while we were trying to manipulate ScopedValue bindings. current->clear_scopedValueBindings(); @@ -756,12 +757,10 @@ JRT_LEAF(void, InterpreterRuntime::monitorexit(BasicObjectLock* elem)) elem->set_obj(nullptr); JRT_END - JRT_ENTRY(void, InterpreterRuntime::throw_illegal_monitor_state_exception(JavaThread* current)) THROW(vmSymbols::java_lang_IllegalMonitorStateException()); JRT_END - JRT_ENTRY(void, InterpreterRuntime::new_illegal_monitor_state_exception(JavaThread* current)) // Returns an illegal exception to install into the current thread. The // pending_exception flag is cleared so normal exception handling does not @@ -1529,4 +1528,17 @@ bool InterpreterRuntime::is_preemptable_call(address entry_point) { entry_point == CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache) || entry_point == CAST_FROM_FN_PTR(address, InterpreterRuntime::_new); } + +void InterpreterRuntime::generate_oop_map_alot() { + JavaThread* current = JavaThread::current(); + LastFrameAccessor last_frame(current); + if (last_frame.is_interpreted_frame()) { + ResourceMark rm(current); + InterpreterOopMap mask; + methodHandle mh(current, last_frame.method()); + int bci = last_frame.bci(); + log_info(generateoopmap)("Generating oopmap for method %s at bci %d", mh->name_and_sig_as_C_string(), bci); + OopMapCache::compute_one_oop_map(mh, bci, &mask); + } +} #endif // ASSERT diff --git a/src/hotspot/share/interpreter/interpreterRuntime.hpp b/src/hotspot/share/interpreter/interpreterRuntime.hpp index 70ceeb0b2af..3228027fa93 100644 --- a/src/hotspot/share/interpreter/interpreterRuntime.hpp +++ b/src/hotspot/share/interpreter/interpreterRuntime.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -173,6 +173,8 @@ private: // Virtual Thread Preemption DEBUG_ONLY(static bool is_preemptable_call(address entry_point);) + + DEBUG_ONLY(static void generate_oop_map_alot();) }; diff --git a/src/hotspot/share/interpreter/linkResolver.cpp b/src/hotspot/share/interpreter/linkResolver.cpp index c82398b654c..25fff580c9d 100644 --- a/src/hotspot/share/interpreter/linkResolver.cpp +++ b/src/hotspot/share/interpreter/linkResolver.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1045,7 +1045,7 @@ void LinkResolver::resolve_field(fieldDescriptor& fd, stringStream ss; ss.print("Update to %s final field %s.%s attempted from a different class (%s) than the field's declaring class", is_static ? "static" : "non-static", resolved_klass->external_name(), fd.name()->as_C_string(), - current_klass->external_name()); + current_klass->external_name()); THROW_MSG(vmSymbols::java_lang_IllegalAccessError(), ss.as_string()); } @@ -1260,7 +1260,7 @@ void LinkResolver::runtime_resolve_special_method(CallInfo& result, methodHandle sel_method(THREAD, resolved_method()); if (link_info.check_access() && - // check if the method is not + // check if the method is not , which is never inherited resolved_method->name() != vmSymbols::object_initializer_name()) { Klass* current_klass = link_info.current_klass(); @@ -1724,8 +1724,8 @@ void LinkResolver::resolve_invoke(CallInfo& result, Handle recv, const constantP } void LinkResolver::resolve_invoke(CallInfo& result, Handle& recv, - const methodHandle& attached_method, - Bytecodes::Code byte, TRAPS) { + const methodHandle& attached_method, + Bytecodes::Code byte, TRAPS) { Klass* defc = attached_method->method_holder(); Symbol* name = attached_method->name(); Symbol* type = attached_method->signature(); diff --git a/src/hotspot/share/interpreter/oopMapCache.cpp b/src/hotspot/share/interpreter/oopMapCache.cpp index 29d6825d3e5..af45f7f9bed 100644 --- a/src/hotspot/share/interpreter/oopMapCache.cpp +++ b/src/hotspot/share/interpreter/oopMapCache.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -78,14 +78,10 @@ class OopMapForCacheEntry: public GenerateOopMap { int _stack_top; virtual bool report_results() const { return false; } - virtual bool possible_gc_point (BytecodeStream *bcs); - virtual void fill_stackmap_prolog (int nof_gc_points); - virtual void fill_stackmap_epilog (); virtual void fill_stackmap_for_opcodes (BytecodeStream *bcs, CellTypeState* vars, CellTypeState* stack, int stack_top); - virtual void fill_init_vars (GrowableArray *init_vars); public: OopMapForCacheEntry(const methodHandle& method, int bci, OopMapCacheEntry *entry); @@ -96,11 +92,8 @@ class OopMapForCacheEntry: public GenerateOopMap { }; -OopMapForCacheEntry::OopMapForCacheEntry(const methodHandle& method, int bci, OopMapCacheEntry* entry) : GenerateOopMap(method) { - _bci = bci; - _entry = entry; - _stack_top = -1; -} +OopMapForCacheEntry::OopMapForCacheEntry(const methodHandle& method, int bci, OopMapCacheEntry* entry) : + GenerateOopMap(method, /*all_exception_edges*/ true), _entry(entry), _bci(bci), _stack_top(-1) { } bool OopMapForCacheEntry::compute_map(Thread* current) { @@ -111,6 +104,11 @@ bool OopMapForCacheEntry::compute_map(Thread* current) { } else { ResourceMark rm; if (!GenerateOopMap::compute_map(current)) { + // If compute_map fails, print the exception message, which is generated if + // this is a JavaThread, otherwise compute_map calls fatal so we don't get here. + if (exception() != nullptr) { + exception()->print(); + } fatal("Unrecoverable verification or out-of-memory error"); return false; } @@ -120,26 +118,6 @@ bool OopMapForCacheEntry::compute_map(Thread* current) { } -bool OopMapForCacheEntry::possible_gc_point(BytecodeStream *bcs) { - return false; // We are not reporting any result. We call result_for_basicblock directly -} - - -void OopMapForCacheEntry::fill_stackmap_prolog(int nof_gc_points) { - // Do nothing -} - - -void OopMapForCacheEntry::fill_stackmap_epilog() { - // Do nothing -} - - -void OopMapForCacheEntry::fill_init_vars(GrowableArray *init_vars) { - // Do nothing -} - - void OopMapForCacheEntry::fill_stackmap_for_opcodes(BytecodeStream *bcs, CellTypeState* vars, CellTypeState* stack, @@ -280,8 +258,8 @@ bool OopMapCacheEntry::verify_mask(CellTypeState* vars, CellTypeState* stack, in if (log) st.print("Locals (%d): ", max_locals); for(int i = 0; i < max_locals; i++) { - bool v1 = is_oop(i) ? true : false; - bool v2 = vars[i].is_reference() ? true : false; + bool v1 = is_oop(i); + bool v2 = vars[i].is_reference(); assert(v1 == v2, "locals oop mask generation error"); if (log) st.print("%d", v1 ? 1 : 0); } @@ -289,8 +267,8 @@ bool OopMapCacheEntry::verify_mask(CellTypeState* vars, CellTypeState* stack, in if (log) st.print("Stack (%d): ", stack_top); for(int j = 0; j < stack_top; j++) { - bool v1 = is_oop(max_locals + j) ? true : false; - bool v2 = stack[j].is_reference() ? true : false; + bool v1 = is_oop(max_locals + j); + bool v2 = stack[j].is_reference(); assert(v1 == v2, "stack oop mask generation error"); if (log) st.print("%d", v1 ? 1 : 0); } @@ -339,6 +317,9 @@ void OopMapCacheEntry::fill(const methodHandle& method, int bci) { } else { OopMapForCacheEntry gen(method, bci, this); if (!gen.compute_map(Thread::current())) { + if (gen.exception() != nullptr) { + gen.exception()->print(); + } fatal("Unrecoverable verification or out-of-memory error"); } } @@ -374,7 +355,7 @@ void OopMapCacheEntry::set_mask(CellTypeState *vars, CellTypeState *stack, int s } // set oop bit - if ( cell->is_reference()) { + if (cell->is_reference()) { value |= (mask << oop_bit_number ); _num_oops++; } diff --git a/src/hotspot/share/jfr/jni/jfrJavaSupport.cpp b/src/hotspot/share/jfr/jni/jfrJavaSupport.cpp index 59a9d8a9090..a6e97ab227a 100644 --- a/src/hotspot/share/jfr/jni/jfrJavaSupport.cpp +++ b/src/hotspot/share/jfr/jni/jfrJavaSupport.cpp @@ -228,7 +228,7 @@ void JfrJavaSupport::new_object_global_ref(JfrJavaArguments* args, TRAPS) { jstring JfrJavaSupport::new_string(const char* c_str, TRAPS) { assert(c_str != nullptr, "invariant"); DEBUG_ONLY(check_java_thread_in_vm(THREAD)); - const oop result = java_lang_String::create_oop_from_str(c_str, THREAD); + const oop result = java_lang_String::create_oop_from_str(c_str, CHECK_NULL); return (jstring)local_jni_handle(result, THREAD); } diff --git a/src/hotspot/share/jfr/jni/jfrJniMethod.cpp b/src/hotspot/share/jfr/jni/jfrJniMethod.cpp index 885484020bd..0183bf634f6 100644 --- a/src/hotspot/share/jfr/jni/jfrJniMethod.cpp +++ b/src/hotspot/share/jfr/jni/jfrJniMethod.cpp @@ -410,6 +410,15 @@ JVM_ENTRY_NO_ENV(jlong, jfr_host_total_swap_memory(JNIEnv* env, jclass jvm)) return static_cast(total_swap_space); JVM_END +JVM_ENTRY_NO_ENV(jlong, jfr_host_memory_usage(JNIEnv* env, jclass jvm)) + physical_memory_size_type memory_usage = 0; + if (!os::Machine::used_memory(memory_usage)) { + // Return -1 to signal failure to get memory usage. + return static_cast(-1); + } + return static_cast(memory_usage); +JVM_END + JVM_ENTRY_NO_ENV(void, jfr_emit_data_loss(JNIEnv* env, jclass jvm, jlong bytes)) EventDataLoss::commit(bytes, min_jlong); JVM_END diff --git a/src/hotspot/share/jfr/jni/jfrJniMethod.hpp b/src/hotspot/share/jfr/jni/jfrJniMethod.hpp index 9769df57bd3..bcdaf7a99b7 100644 --- a/src/hotspot/share/jfr/jni/jfrJniMethod.hpp +++ b/src/hotspot/share/jfr/jni/jfrJniMethod.hpp @@ -163,6 +163,8 @@ jlong JNICALL jfr_host_total_memory(JNIEnv* env, jclass jvm); jlong JNICALL jfr_host_total_swap_memory(JNIEnv* env, jclass jvm); +jlong JNICALL jfr_host_memory_usage(JNIEnv* env, jclass jvm); + void JNICALL jfr_emit_data_loss(JNIEnv* env, jclass jvm, jlong bytes); jlong JNICALL jfr_register_stack_filter(JNIEnv* env, jclass jvm, jobjectArray classes, jobjectArray methods); diff --git a/src/hotspot/share/jfr/jni/jfrJniMethodRegistration.cpp b/src/hotspot/share/jfr/jni/jfrJniMethodRegistration.cpp index 2979f5c5c2d..0813289e840 100644 --- a/src/hotspot/share/jfr/jni/jfrJniMethodRegistration.cpp +++ b/src/hotspot/share/jfr/jni/jfrJniMethodRegistration.cpp @@ -101,6 +101,7 @@ JfrJniMethodRegistration::JfrJniMethodRegistration(JNIEnv* env) { (char*)"isContainerized", (char*)"()Z", (void*) jfr_is_containerized, (char*)"hostTotalMemory", (char*)"()J", (void*) jfr_host_total_memory, (char*)"hostTotalSwapMemory", (char*)"()J", (void*) jfr_host_total_swap_memory, + (char*)"hostMemoryUsage", (char*)"()J", (void*) jfr_host_memory_usage, (char*)"emitDataLoss", (char*)"(J)V", (void*)jfr_emit_data_loss, (char*)"registerStackFilter", (char*)"([Ljava/lang/String;[Ljava/lang/String;)J", (void*)jfr_register_stack_filter, (char*)"unregisterStackFilter", (char*)"(J)V", (void*)jfr_unregister_stack_filter, diff --git a/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.cpp b/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.cpp index 83eee96091e..8b5819e92c4 100644 --- a/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.cpp +++ b/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.cpp @@ -34,6 +34,7 @@ #include "memory/resourceArea.hpp" #include "oops/access.inline.hpp" #include "oops/oop.inline.hpp" +#include "runtime/os.hpp" #include "utilities/align.hpp" UnifiedOopRef DFSClosure::_reference_stack[max_dfs_depth]; @@ -67,9 +68,27 @@ void DFSClosure::find_leaks_from_root_set(EdgeStore* edge_store, rs.process(); } +static address calculate_headroom_limit() { + static constexpr size_t required_headroom = K * 64; + const Thread* const t = Thread::current_or_null(); + return t->stack_end() + required_headroom; +} + DFSClosure::DFSClosure(EdgeStore* edge_store, JFRBitSet* mark_bits, const Edge* start_edge) :_edge_store(edge_store), _mark_bits(mark_bits), _start_edge(start_edge), - _max_depth(max_dfs_depth), _depth(0), _ignore_root_set(false) { + _max_depth(max_dfs_depth), _depth(0), _ignore_root_set(false), + _headroom_limit(calculate_headroom_limit()) { +} + +bool DFSClosure::have_headroom() const { + const address sp = (address) os::current_stack_pointer(); +#ifdef ASSERT + const Thread* const t = Thread::current_or_null(); + assert(t->is_VM_thread(), "invariant"); + assert(t->is_in_full_stack(_headroom_limit), "invariant"); + assert(t->is_in_full_stack(sp), "invariant"); +#endif + return sp > _headroom_limit; } void DFSClosure::closure_impl(UnifiedOopRef reference, const oop pointee) { @@ -97,7 +116,7 @@ void DFSClosure::closure_impl(UnifiedOopRef reference, const oop pointee) { } } assert(_max_depth >= 1, "invariant"); - if (_depth < _max_depth - 1) { + if (_depth < _max_depth - 1 && have_headroom()) { _depth++; pointee->oop_iterate(this); assert(_depth > 0, "invariant"); diff --git a/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.hpp b/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.hpp index 98364690422..9ee15ff07e0 100644 --- a/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.hpp +++ b/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.hpp @@ -46,12 +46,15 @@ class DFSClosure : public BasicOopIterateClosure { size_t _max_depth; size_t _depth; bool _ignore_root_set; + const address _headroom_limit; DFSClosure(EdgeStore* edge_store, JFRBitSet* mark_bits, const Edge* start_edge); void add_chain(); void closure_impl(UnifiedOopRef reference, const oop pointee); + bool have_headroom() const; + public: virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS_EXCEPT_REFERENT; } diff --git a/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.cpp b/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.cpp index 9aef92c4182..2c341f2385e 100644 --- a/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.cpp +++ b/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,8 +26,10 @@ #include "jfr/leakprofiler/chains/edgeUtils.hpp" #include "jfr/leakprofiler/sampling/objectSample.hpp" #include "jfr/leakprofiler/utilities/unifiedOopRef.inline.hpp" +#include "jfr/recorder/service/jfrOptionSet.hpp" #include "oops/oop.inline.hpp" #include "runtime/safepoint.hpp" +#include "utilities/resizableHashTable.hpp" StoredEdge::StoredEdge(const Edge* parent, UnifiedOopRef reference) : Edge(parent, reference), _gc_root_id(0), _skip_length(0) {} @@ -216,84 +218,62 @@ bool EdgeStore::put_edges(StoredEdge** previous, const Edge** current, size_t li return nullptr == *current; } -static GrowableArray* _leak_context_edges = nullptr; +typedef ResizeableHashTable SampleToLeakContextEdgeMap; +static SampleToLeakContextEdgeMap* _sample_to_leak_context_edge_map = nullptr; EdgeStore::EdgeStore() : _edges(new EdgeHashTable(this)) {} EdgeStore::~EdgeStore() { assert(_edges != nullptr, "invariant"); delete _edges; - delete _leak_context_edges; - _leak_context_edges = nullptr; + delete _sample_to_leak_context_edge_map; + _sample_to_leak_context_edge_map = nullptr; } -static int leak_context_edge_idx(const ObjectSample* sample) { +static const StoredEdge* leak_context_edge(const ObjectSample* sample) { assert(sample != nullptr, "invariant"); - return static_cast(sample->object()->mark().value()) >> markWord::lock_bits; + assert(_sample_to_leak_context_edge_map != nullptr, "invariant"); + const StoredEdge** edge = _sample_to_leak_context_edge_map->get(p2u(sample->object())); + return edge != nullptr ? *edge : nullptr; } bool EdgeStore::has_leak_context(const ObjectSample* sample) const { - const int idx = leak_context_edge_idx(sample); - if (idx == 0) { - return false; - } - assert(idx > 0, "invariant"); - assert(_leak_context_edges != nullptr, "invariant"); - assert(idx < _leak_context_edges->length(), "invariant"); - assert(_leak_context_edges->at(idx) != nullptr, "invariant"); - return true; + return _sample_to_leak_context_edge_map != nullptr && leak_context_edge(sample) != nullptr; } const StoredEdge* EdgeStore::get(const ObjectSample* sample) const { assert(sample != nullptr, "invariant"); - if (_leak_context_edges != nullptr) { + if (_sample_to_leak_context_edge_map != nullptr) { assert(SafepointSynchronize::is_at_safepoint(), "invariant"); - const int idx = leak_context_edge_idx(sample); - if (idx > 0) { - assert(idx < _leak_context_edges->length(), "invariant"); - const StoredEdge* const edge =_leak_context_edges->at(idx); - assert(edge != nullptr, "invariant"); + const StoredEdge* const edge = leak_context_edge(sample); + if (edge != nullptr) { return edge; } } return get(UnifiedOopRef::encode_in_native(sample->object_addr())); } -#ifdef ASSERT -// max_idx to ensure idx fit in lower 32-bits of markword together with lock bits. -static constexpr const int max_idx = right_n_bits(32 - markWord::lock_bits); +static constexpr const unsigned max_map_size = max_jint >> 1; -static void store_idx_precondition(oop sample_object, int idx) { - assert(sample_object != nullptr, "invariant"); - assert(sample_object->mark().is_marked(), "invariant"); - assert(idx > 0, "invariant"); - assert(idx <= max_idx, "invariant"); -} -#endif - -static void store_idx_in_markword(oop sample_object, int idx) { - DEBUG_ONLY(store_idx_precondition(sample_object, idx);) - const markWord idx_mark_word(sample_object->mark().value() | idx << markWord::lock_bits); - sample_object->set_mark(idx_mark_word); - assert(sample_object->mark().is_marked(), "must still be marked"); -} - -static const int initial_size = 64; - -static int save(const StoredEdge* edge) { - assert(edge != nullptr, "invariant"); - if (_leak_context_edges == nullptr) { - _leak_context_edges = new (mtTracing) GrowableArray(initial_size, mtTracing); - _leak_context_edges->append(nullptr); // next idx now at 1, for disambiguation in markword. +static inline unsigned map_size() { + assert(JfrOptionSet::old_object_queue_size() > 0, "invariant"); + unsigned size = JfrOptionSet::old_object_queue_size(); + size = round_up_power_of_2(size); + if (size < 1024) { + return 1024; } - return _leak_context_edges->append(edge); + size <<= 1; + return size >= max_map_size ? max_map_size : size; } -// We associate the leak context edge with the leak candidate object by saving the -// edge in an array and storing the array idx (shifted) into the markword of the candidate object. static void associate_with_candidate(const StoredEdge* leak_context_edge) { assert(leak_context_edge != nullptr, "invariant"); - store_idx_in_markword(leak_context_edge->pointee(), save(leak_context_edge)); + if (_sample_to_leak_context_edge_map == nullptr) { + const unsigned size = map_size(); + _sample_to_leak_context_edge_map = new (mtTracing) SampleToLeakContextEdgeMap(size, size); + } + assert(_sample_to_leak_context_edge_map != nullptr, "invariant"); + _sample_to_leak_context_edge_map->put(p2u(leak_context_edge->pointee()), leak_context_edge); } StoredEdge* EdgeStore::associate_leak_context_with_candidate(const Edge* edge) { diff --git a/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.hpp b/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.hpp index e920fd64ea9..460314854b7 100644 --- a/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.hpp +++ b/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,7 +39,6 @@ class StoredEdge : public Edge { size_t _skip_length; public: - StoredEdge(); StoredEdge(const Edge* parent, UnifiedOopRef reference); StoredEdge(const Edge& edge); StoredEdge(const StoredEdge& edge); diff --git a/src/hotspot/share/jfr/metadata/metadata.xml b/src/hotspot/share/jfr/metadata/metadata.xml index 2b082165005..09d9e0ccabf 100644 --- a/src/hotspot/share/jfr/metadata/metadata.xml +++ b/src/hotspot/share/jfr/metadata/metadata.xml @@ -1273,16 +1273,22 @@ + + + + + + + + + - - - diff --git a/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp b/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp index 426ba4e7650..969c9ca60c1 100644 --- a/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp +++ b/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -281,7 +281,9 @@ TRACE_REQUEST_FUNC(SystemProcess) { #if INCLUDE_JVMTI template -static void send_agent_event(AgentEvent& event, const JvmtiAgent* agent) { +static void send_agent_event(AgentEvent& event, const JvmtiAgent* agent, Ticks& timestamp) { + event.set_starttime(timestamp); + event.set_endtime(timestamp); event.set_name(agent->name()); event.set_options(agent->options()); event.set_dynamic(agent->is_dynamic()); @@ -292,29 +294,31 @@ static void send_agent_event(AgentEvent& event, const JvmtiAgent* agent) { TRACE_REQUEST_FUNC(JavaAgent) { JvmtiAgentList::Iterator it = JvmtiAgentList::java_agents(); + Ticks ticks = timestamp(); while (it.has_next()) { const JvmtiAgent* agent = it.next(); assert(agent->is_jplis(), "invariant"); EventJavaAgent event; - send_agent_event(event, agent); + send_agent_event(event, agent, ticks); } } -static void send_native_agent_events(JvmtiAgentList::Iterator& it) { +static void send_native_agent_events(JvmtiAgentList::Iterator& it, Ticks& timestamp) { while (it.has_next()) { const JvmtiAgent* agent = it.next(); assert(!agent->is_jplis(), "invariant"); EventNativeAgent event; event.set_path(agent->os_lib_path()); - send_agent_event(event, agent); + send_agent_event(event, agent, timestamp); } } TRACE_REQUEST_FUNC(NativeAgent) { + Ticks ticks = timestamp(); JvmtiAgentList::Iterator native_agents_it = JvmtiAgentList::native_agents(); - send_native_agent_events(native_agents_it); + send_native_agent_events(native_agents_it, ticks); JvmtiAgentList::Iterator xrun_agents_it = JvmtiAgentList::xrun_agents(); - send_native_agent_events(xrun_agents_it); + send_native_agent_events(xrun_agents_it, ticks); } #else // INCLUDE_JVMTI TRACE_REQUEST_FUNC(JavaAgent) {} diff --git a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdKlassQueue.cpp b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdKlassQueue.cpp index 9c57374d6c6..eab7de20545 100644 --- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdKlassQueue.cpp +++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdKlassQueue.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -75,7 +75,7 @@ static size_t element_size(bool compressed) { } static bool can_compress_element(traceid id) { - return Metaspace::using_class_space() && id < uncompressed_threshold; + return INCLUDE_CLASS_SPACE == 1 && id < uncompressed_threshold; } static size_t element_size(const Klass* klass) { diff --git a/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp b/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp index 6214f6a2746..6048c19d911 100644 --- a/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp +++ b/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp @@ -206,13 +206,8 @@ void CompilerToVM::Data::initialize(JVMCI_TRAPS) { Universe_narrow_oop_base = nullptr; Universe_narrow_oop_shift = 0; } - if (UseCompressedClassPointers) { - Universe_narrow_klass_base = CompressedKlassPointers::base(); - Universe_narrow_klass_shift = CompressedKlassPointers::shift(); - } else { - Universe_narrow_klass_base = nullptr; - Universe_narrow_klass_shift = 0; - } + Universe_narrow_klass_base = CompressedKlassPointers::base(); + Universe_narrow_klass_shift = CompressedKlassPointers::shift(); Universe_non_oop_bits = Universe::non_oop_word(); Universe_verify_oop_mask = Universe::verify_oop_mask(); Universe_verify_oop_bits = Universe::verify_oop_bits(); @@ -390,7 +385,6 @@ JVMCIObjectArray CompilerToVM::initialize_intrinsics(JVMCI_TRAPS) { X86_ONLY(do_int_flag(UseAVX)) \ do_bool_flag(UseCRC32Intrinsics) \ do_bool_flag(UseAdler32Intrinsics) \ - do_bool_flag(UseCompressedClassPointers) \ do_bool_flag(UseCompressedOops) \ X86_ONLY(do_bool_flag(UseCountLeadingZerosInstruction)) \ X86_ONLY(do_bool_flag(UseCountTrailingZerosInstruction)) \ diff --git a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp index 74314b0ad61..1fdf98588fd 100644 --- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp +++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -342,7 +342,7 @@ volatile_nonstatic_field(ObjectMonitor, _succ, int64_t) \ \ volatile_nonstatic_field(oopDesc, _mark, markWord) \ - volatile_nonstatic_field(oopDesc, _metadata._klass, Klass*) \ + volatile_nonstatic_field(oopDesc, _compressed_klass, narrowKlass) \ \ static_field(StubRoutines, _verify_oop_count, jint) \ \ @@ -450,8 +450,8 @@ nonstatic_field(ThreadLocalAllocBuffer, _pf_top, HeapWord*) \ nonstatic_field(ThreadLocalAllocBuffer, _desired_size, size_t) \ nonstatic_field(ThreadLocalAllocBuffer, _refill_waste_limit, size_t) \ - nonstatic_field(ThreadLocalAllocBuffer, _number_of_refills, unsigned) \ - nonstatic_field(ThreadLocalAllocBuffer, _slow_allocations, unsigned) \ + nonstatic_field(ThreadLocalAllocBuffer, _num_refills, unsigned) \ + nonstatic_field(ThreadLocalAllocBuffer, _num_slow_allocations, unsigned) \ \ nonstatic_field(SafepointMechanism::ThreadData, _polling_word, volatile uintptr_t) \ nonstatic_field(SafepointMechanism::ThreadData, _polling_page, volatile uintptr_t) \ diff --git a/src/hotspot/share/logging/logTag.hpp b/src/hotspot/share/logging/logTag.hpp index 3ad6a197d07..2b8d6a72be4 100644 --- a/src/hotspot/share/logging/logTag.hpp +++ b/src/hotspot/share/logging/logTag.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -90,11 +90,13 @@ class outputStream; LOG_TAG(freelist) \ LOG_TAG(gc) \ NOT_PRODUCT(LOG_TAG(generate)) \ + LOG_TAG(generateoopmap) \ LOG_TAG(handshake) \ LOG_TAG(hashtables) \ LOG_TAG(heap) \ LOG_TAG(heapdump) \ NOT_PRODUCT(LOG_TAG(heapsampling)) \ + COMPILER2_PRESENT(LOG_TAG(hotcode)) \ LOG_TAG(humongous) \ LOG_TAG(ihop) \ LOG_TAG(iklass) \ diff --git a/src/hotspot/share/memory/classLoaderMetaspace.cpp b/src/hotspot/share/memory/classLoaderMetaspace.cpp index c1ff172071d..af3b8b1b77f 100644 --- a/src/hotspot/share/memory/classLoaderMetaspace.cpp +++ b/src/hotspot/share/memory/classLoaderMetaspace.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -162,10 +162,12 @@ void ClassLoaderMetaspace::deallocate(MetaWord* ptr, size_t word_size) { MetaBlock bl(ptr, word_size); // Add to class arena only if block is usable for encodable Klass storage. MetaspaceArena* receiving_arena = non_class_space_arena(); - if (Metaspace::using_class_space() && Metaspace::is_in_class_space(ptr) && +#if INCLUDE_CLASS_SPACE + if (Metaspace::is_in_class_space(ptr) && is_aligned(ptr, class_space_arena()->allocation_alignment_bytes())) { receiving_arena = class_space_arena(); } +#endif receiving_arena->deallocate(bl); DEBUG_ONLY(InternalStats::inc_num_deallocs();) } diff --git a/src/hotspot/share/memory/classLoaderMetaspace.hpp b/src/hotspot/share/memory/classLoaderMetaspace.hpp index aa43e171708..dc782611e98 100644 --- a/src/hotspot/share/memory/classLoaderMetaspace.hpp +++ b/src/hotspot/share/memory/classLoaderMetaspace.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,9 +40,10 @@ namespace metaspace { // A ClassLoaderMetaspace manages MetaspaceArena(s) for a CLD. // -// A CLD owns one MetaspaceArena if UseCompressedClassPointers is false. Otherwise -// it owns two - one for the Klass* objects from the class space, one for the other -// types of MetaspaceObjs from the non-class space. +// 64-bit: +// +// A CLD owns two MetaspaceArenas - one for the Klass* objects from the class space, +// one for the other types of MetaspaceObjs from the non-class space. // // +------+ +----------------------+ +-------------------+ // | CLD | ---> | ClassLoaderMetaspace | ----> | (non class) Arena | @@ -58,6 +59,11 @@ namespace metaspace { // ^ // alloc top // +// 32-bit: +// +// A CLD owns just one MetaspaceArena. In that arena all metadata - Klass and other - +// are placed. + class ClassLoaderMetaspace : public CHeapObj { friend class metaspace::ClmsTester; // for gtests @@ -67,11 +73,10 @@ class ClassLoaderMetaspace : public CHeapObj { const Metaspace::MetaspaceType _space_type; // Arena for allocations from non-class metaspace - // (resp. for all allocations if -XX:-UseCompressedClassPointers). metaspace::MetaspaceArena* _non_class_space_arena; // Arena for allocations from class space - // (null if -XX:-UseCompressedClassPointers). + // (null for 32-bit). metaspace::MetaspaceArena* _class_space_arena; Mutex* lock() const { return _lock; } diff --git a/src/hotspot/share/memory/metaspace.cpp b/src/hotspot/share/memory/metaspace.cpp index e686b324004..da6ebc991c8 100644 --- a/src/hotspot/share/memory/metaspace.cpp +++ b/src/hotspot/share/memory/metaspace.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2017, 2021 SAP SE. All rights reserved. * Copyright (c) 2023, 2025, Red Hat, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -166,33 +166,33 @@ void MetaspaceUtils::print_metaspace_change(const MetaspaceCombinedStats& pre_me // it is a constant (to uninformed users, often confusingly large). For non-class space, it would // be interesting since free chunks can be uncommitted, but for now it is left out. - if (Metaspace::using_class_space()) { - log_info(gc, metaspace)(HEAP_CHANGE_FORMAT" " - HEAP_CHANGE_FORMAT" " - HEAP_CHANGE_FORMAT, - HEAP_CHANGE_FORMAT_ARGS("Metaspace", - pre_meta_values.used(), - pre_meta_values.committed(), - meta_values.used(), - meta_values.committed()), - HEAP_CHANGE_FORMAT_ARGS("NonClass", - pre_meta_values.non_class_used(), - pre_meta_values.non_class_committed(), - meta_values.non_class_used(), - meta_values.non_class_committed()), - HEAP_CHANGE_FORMAT_ARGS("Class", - pre_meta_values.class_used(), - pre_meta_values.class_committed(), - meta_values.class_used(), - meta_values.class_committed())); - } else { - log_info(gc, metaspace)(HEAP_CHANGE_FORMAT, - HEAP_CHANGE_FORMAT_ARGS("Metaspace", - pre_meta_values.used(), - pre_meta_values.committed(), - meta_values.used(), - meta_values.committed())); - } +#if INCLUDE_CLASS_SPACE + log_info(gc, metaspace)(HEAP_CHANGE_FORMAT" " + HEAP_CHANGE_FORMAT" " + HEAP_CHANGE_FORMAT, + HEAP_CHANGE_FORMAT_ARGS("Metaspace", + pre_meta_values.used(), + pre_meta_values.committed(), + meta_values.used(), + meta_values.committed()), + HEAP_CHANGE_FORMAT_ARGS("NonClass", + pre_meta_values.non_class_used(), + pre_meta_values.non_class_committed(), + meta_values.non_class_used(), + meta_values.non_class_committed()), + HEAP_CHANGE_FORMAT_ARGS("Class", + pre_meta_values.class_used(), + pre_meta_values.class_committed(), + meta_values.class_used(), + meta_values.class_committed())); +#else + log_info(gc, metaspace)(HEAP_CHANGE_FORMAT, + HEAP_CHANGE_FORMAT_ARGS("Metaspace", + pre_meta_values.used(), + pre_meta_values.committed(), + meta_values.used(), + meta_values.committed())); +#endif // INCLUDE_CLASS_SPACE } // This will print out a basic metaspace usage report but @@ -226,41 +226,36 @@ void MetaspaceUtils::print_on(outputStream* out) { stats.committed()/K, stats.reserved()/K); - if (Metaspace::using_class_space()) { - StreamIndentor si(out, 1); - out->print("class space "); - out->fill_to(17); - out->print_cr("used %zuK, " - "committed %zuK, " - "reserved %zuK", - stats.class_space_stats().used()/K, - stats.class_space_stats().committed()/K, - stats.class_space_stats().reserved()/K); - } +#if INCLUDE_CLASS_SPACE + StreamIndentor si(out, 1); + out->print("class space "); + out->fill_to(17); + out->print_cr("used %zuK, " + "committed %zuK, " + "reserved %zuK", + stats.class_space_stats().used()/K, + stats.class_space_stats().committed()/K, + stats.class_space_stats().reserved()/K); +#endif // INCLUDE_CLASS_SPACE } #ifdef ASSERT void MetaspaceUtils::verify() { if (Metaspace::initialized()) { - // Verify non-class chunkmanager... ChunkManager* cm = ChunkManager::chunkmanager_nonclass(); cm->verify(); - // ... and space list. VirtualSpaceList* vsl = VirtualSpaceList::vslist_nonclass(); vsl->verify(); - if (Metaspace::using_class_space()) { - // If we use compressed class pointers, verify class chunkmanager... - cm = ChunkManager::chunkmanager_class(); - cm->verify(); - - // ... and class spacelist. - vsl = VirtualSpaceList::vslist_class(); - vsl->verify(); - } +#if INCLUDE_CLASS_SPACE + cm = ChunkManager::chunkmanager_class(); + cm->verify(); + vsl = VirtualSpaceList::vslist_class(); + vsl->verify(); +#endif // INCLUDE_CLASS_SPACE } } #endif @@ -387,7 +382,8 @@ void MetaspaceGC::post_initialize() { bool MetaspaceGC::can_expand(size_t word_size, bool is_class) { // Check if the compressed class space is full. - if (is_class && Metaspace::using_class_space()) { +#if INCLUDE_CLASS_SPACE + if (is_class) { size_t class_committed = MetaspaceUtils::committed_bytes(Metaspace::ClassType); if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) { log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by %zu words (CompressedClassSpaceSize = %zu words)", @@ -395,6 +391,7 @@ bool MetaspaceGC::can_expand(size_t word_size, bool is_class) { return false; } } +#endif // INCLUDE_CLASS_SPACE // Check if the user has imposed a limit on the metaspace memory. size_t committed_bytes = MetaspaceUtils::committed_bytes(); @@ -548,7 +545,7 @@ const void* Metaspace::_class_space_end = nullptr; bool Metaspace::initialized() { return metaspace::MetaspaceContext::context_nonclass() != nullptr - LP64_ONLY(&& (using_class_space() ? Metaspace::class_space_is_initialized() : true)); + CLASS_SPACE_ONLY(&& Metaspace::class_space_is_initialized()); } #ifdef _LP64 @@ -566,9 +563,9 @@ void Metaspace::print_compressed_class_space(outputStream* st) { // Given a prereserved space, use that to set up the compressed class space list. void Metaspace::initialize_class_space(ReservedSpace rs) { + STATIC_ASSERT(INCLUDE_CLASS_SPACE == 1); assert(rs.size() >= CompressedClassSpaceSize, "%zu != %zu", rs.size(), CompressedClassSpaceSize); - assert(using_class_space(), "Must be using class space"); assert(rs.size() == CompressedClassSpaceSize, "%zu != %zu", rs.size(), CompressedClassSpaceSize); @@ -658,49 +655,51 @@ void Metaspace::ergo_initialize() { MaxMetaspaceSize = MAX2(MaxMetaspaceSize, commit_alignment()); - if (UseCompressedClassPointers) { - // Let Class Space not be larger than 80% of MaxMetaspaceSize. Note that is - // grossly over-dimensioned for most usage scenarios; typical ratio of - // class space : non class space usage is about 1:6. With many small classes, - // it can get as low as 1:2. It is not a big deal though since ccs is only - // reserved and will be committed on demand only. - const size_t max_ccs_size = 8 * (MaxMetaspaceSize / 10); +#if INCLUDE_CLASS_SPACE - // Sanity check. - const size_t max_klass_range = CompressedKlassPointers::max_klass_range_size(); - assert(max_klass_range >= reserve_alignment(), - "Klass range (%zu) must cover at least a full root chunk (%zu)", - max_klass_range, reserve_alignment()); + // Let Class Space not be larger than 80% of MaxMetaspaceSize. Note that is + // grossly over-dimensioned for most usage scenarios; typical ratio of + // class space : non class space usage is about 1:6. With many small classes, + // it can get as low as 1:2. It is not a big deal though since ccs is only + // reserved and will be committed on demand only. + const size_t max_ccs_size = 8 * (MaxMetaspaceSize / 10); - size_t adjusted_ccs_size = MIN3(CompressedClassSpaceSize, max_ccs_size, max_klass_range); + // Sanity check. + const size_t max_klass_range = CompressedKlassPointers::max_klass_range_size(); + assert(max_klass_range >= reserve_alignment(), + "Klass range (%zu) must cover at least a full root chunk (%zu)", + max_klass_range, reserve_alignment()); - // CCS must be aligned to root chunk size, and be at least the size of one - // root chunk. - adjusted_ccs_size = align_up(adjusted_ccs_size, reserve_alignment()); - adjusted_ccs_size = MAX2(adjusted_ccs_size, reserve_alignment()); + size_t adjusted_ccs_size = MIN3(CompressedClassSpaceSize, max_ccs_size, max_klass_range); - // Print a warning if the adjusted size differs from the users input - if (CompressedClassSpaceSize != adjusted_ccs_size) { - #define X "CompressedClassSpaceSize adjusted from user input " \ - "%zu bytes to %zu bytes", CompressedClassSpaceSize, adjusted_ccs_size - if (FLAG_IS_CMDLINE(CompressedClassSpaceSize)) { - log_warning(metaspace)(X); - } else { - log_info(metaspace)(X); - } - #undef X - } - - // Note: re-adjusting may have us left with a CompressedClassSpaceSize - // larger than MaxMetaspaceSize for very small values of MaxMetaspaceSize. - // Lets just live with that, its not a big deal. - if (adjusted_ccs_size != CompressedClassSpaceSize) { - FLAG_SET_ERGO(CompressedClassSpaceSize, adjusted_ccs_size); - log_info(metaspace)("Setting CompressedClassSpaceSize to %zu.", - CompressedClassSpaceSize); + // CCS must be aligned to root chunk size, and be at least the size of one + // root chunk. + adjusted_ccs_size = align_up(adjusted_ccs_size, reserve_alignment()); + adjusted_ccs_size = MAX2(adjusted_ccs_size, reserve_alignment()); + + // Print a warning if the adjusted size differs from the users input + if (CompressedClassSpaceSize != adjusted_ccs_size) { + #define X "CompressedClassSpaceSize adjusted from user input " \ + "%zu bytes to %zu bytes", CompressedClassSpaceSize, adjusted_ccs_size + if (FLAG_IS_CMDLINE(CompressedClassSpaceSize)) { + log_warning(metaspace)(X); + } else { + log_info(metaspace)(X); } + #undef X } + // Note: re-adjusting may have us left with a CompressedClassSpaceSize + // larger than MaxMetaspaceSize for very small values of MaxMetaspaceSize. + // Lets just live with that, its not a big deal. + if (adjusted_ccs_size != CompressedClassSpaceSize) { + FLAG_SET_ERGO(CompressedClassSpaceSize, adjusted_ccs_size); + log_info(metaspace)("Setting CompressedClassSpaceSize to %zu.", + CompressedClassSpaceSize); + } + +#endif // INCLUDE_CLASS_SPACE + // Set MetaspaceSize, MinMetaspaceExpansion and MaxMetaspaceExpansion if (MetaspaceSize > MaxMetaspaceSize) { MetaspaceSize = MaxMetaspaceSize; @@ -724,15 +723,12 @@ void Metaspace::global_initialize() { AOTMetaspace::initialize_for_static_dump(); } - // If UseCompressedClassPointers=1, we have two cases: + // We have two cases: // a) if CDS is active (runtime, Xshare=on), it will create the class space - // for us, initialize it and set up CompressedKlassPointers encoding. - // Class space will be reserved above the mapped archives. + // for us. It then will set up encoding to cover both CDS archive space and class space. // b) if CDS either deactivated (Xshare=off) or a static dump is to be done (Xshare:dump), - // we will create the class space on our own. It will be placed above the java heap, - // since we assume it has been placed in low - // address regions. We may rethink this (see JDK-8244943). Failing that, - // it will be placed anywhere. + // we will create the class space on our own and set up encoding to only cover the + // class space. #if INCLUDE_CDS // case (a) @@ -746,9 +742,9 @@ void Metaspace::global_initialize() { } #endif // INCLUDE_CDS -#ifdef _LP64 +#if INCLUDE_CLASS_SPACE - if (using_class_space() && !class_space_is_initialized()) { + if (!class_space_is_initialized()) { assert(!CDSConfig::is_using_archive(), "CDS archive is not mapped at this point"); // case (b) (No CDS) @@ -835,28 +831,23 @@ void Metaspace::global_initialize() { } #else - // +UseCompressedClassPointers on 32-bit: does not need class space. Klass can live wherever. - if (UseCompressedClassPointers) { - const address start = (address)os::vm_min_address(); // but not in the zero page - const address end = (address)CompressedKlassPointers::max_klass_range_size(); - CompressedKlassPointers::initialize(start, end - start); - } -#endif // __LP64 + // 32-bit: + const address start = (address)os::vm_min_address(); // but not in the zero page + const address end = (address)CompressedKlassPointers::max_klass_range_size(); + CompressedKlassPointers::initialize(start, end - start); +#endif // INCLUDE_CLASS_SPACE // Initialize non-class virtual space list, and its chunk manager: MetaspaceContext::initialize_nonclass_space_context(); _tracer = new MetaspaceTracer(); - if (UseCompressedClassPointers) { - // Note: "cds" would be a better fit but keep this for backward compatibility. - LogTarget(Info, gc, metaspace) lt; - if (lt.is_enabled()) { - LogStream ls(lt); - CDS_ONLY(AOTMetaspace::print_on(&ls);) - Metaspace::print_compressed_class_space(&ls); - CompressedKlassPointers::print_mode(&ls); - } + LogTarget(Info, gc, metaspace) lt; + if (lt.is_enabled()) { + LogStream ls(lt); + CDS_ONLY(AOTMetaspace::print_on(&ls);) + Metaspace::print_compressed_class_space(&ls); + CompressedKlassPointers::print_mode(&ls); } } @@ -888,15 +879,13 @@ MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); if (result != nullptr) { -#ifdef ASSERT - if (using_class_space() && mdtype == ClassType) { + if (INCLUDE_CLASS_SPACE == 1 && mdtype == ClassType) { assert(is_in_class_space(result) && is_aligned(result, CompressedKlassPointers::klass_alignment_in_bytes()), "Sanity"); } else { assert((is_in_class_space(result) || is_in_nonclass_metaspace(result)) && is_aligned(result, Metaspace::min_allocation_alignment_bytes), "Sanity"); } -#endif // Zero initialize. Copy::fill_to_words((HeapWord*)result, word_size, 0); log_trace(metaspace)("Metaspace::allocate: type %d return " PTR_FORMAT ".", (int)type, p2i(result)); @@ -1017,12 +1006,12 @@ void Metaspace::purge(bool classes_unloaded) { if (cm != nullptr) { cm->purge(); } - if (using_class_space()) { - cm = ChunkManager::chunkmanager_class(); - if (cm != nullptr) { - cm->purge(); - } +#if INCLUDE_CLASS_SPACE + cm = ChunkManager::chunkmanager_class(); + if (cm != nullptr) { + cm->purge(); } +#endif // INCLUDE_CLASS_SPACE } // Try to satisfy queued metaspace allocation requests. diff --git a/src/hotspot/share/memory/metaspace.hpp b/src/hotspot/share/memory/metaspace.hpp index 01ef4b4dd49..96f5a2459ce 100644 --- a/src/hotspot/share/memory/metaspace.hpp +++ b/src/hotspot/share/memory/metaspace.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2017, 2021 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -164,18 +164,12 @@ public: static void print_compressed_class_space(outputStream* st) NOT_LP64({}); - // Return TRUE only if UseCompressedClassPointers is True. - static bool using_class_space() { - return NOT_LP64(false) LP64_ONLY(UseCompressedClassPointers); - } - static bool is_class_space_allocation(MetadataType mdType) { - return mdType == ClassType && using_class_space(); + return CLASS_SPACE_ONLY(mdType == ClassType) NOT_CLASS_SPACE(false); } static bool initialized(); }; - #endif // SHARE_MEMORY_METASPACE_HPP diff --git a/src/hotspot/share/memory/metaspace/metaspaceReporter.cpp b/src/hotspot/share/memory/metaspace/metaspaceReporter.cpp index 3cff2a50d03..f6683f50fd1 100644 --- a/src/hotspot/share/memory/metaspace/metaspaceReporter.cpp +++ b/src/hotspot/share/memory/metaspace/metaspaceReporter.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2018, 2020 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -59,39 +59,39 @@ static void print_vs(outputStream* out, size_t scale) { const size_t committed_nc = RunningCounters::committed_words_nonclass(); const int num_nodes_nc = VirtualSpaceList::vslist_nonclass()->num_nodes(); - if (Metaspace::using_class_space()) { - const size_t reserved_c = RunningCounters::reserved_words_class(); - const size_t committed_c = RunningCounters::committed_words_class(); - const int num_nodes_c = VirtualSpaceList::vslist_class()->num_nodes(); +#if INCLUDE_CLASS_SPACE + const size_t reserved_c = RunningCounters::reserved_words_class(); + const size_t committed_c = RunningCounters::committed_words_class(); + const int num_nodes_c = VirtualSpaceList::vslist_class()->num_nodes(); - out->print(" Non-class space: "); - print_scaled_words(out, reserved_nc, scale, 7); - out->print(" reserved, "); - print_scaled_words_and_percentage(out, committed_nc, reserved_nc, scale, 7); - out->print(" committed, "); - out->print(" %d nodes.", num_nodes_nc); - out->cr(); - out->print(" Class space: "); - print_scaled_words(out, reserved_c, scale, 7); - out->print(" reserved, "); - print_scaled_words_and_percentage(out, committed_c, reserved_c, scale, 7); - out->print(" committed, "); - out->print(" %d nodes.", num_nodes_c); - out->cr(); - out->print(" Both: "); - print_scaled_words(out, reserved_c + reserved_nc, scale, 7); - out->print(" reserved, "); - print_scaled_words_and_percentage(out, committed_c + committed_nc, reserved_c + reserved_nc, scale, 7); - out->print(" committed. "); - out->cr(); - } else { - print_scaled_words(out, reserved_nc, scale, 7); - out->print(" reserved, "); - print_scaled_words_and_percentage(out, committed_nc, reserved_nc, scale, 7); - out->print(" committed, "); - out->print(" %d nodes.", num_nodes_nc); - out->cr(); - } + out->print(" Non-class space: "); + print_scaled_words(out, reserved_nc, scale, 7); + out->print(" reserved, "); + print_scaled_words_and_percentage(out, committed_nc, reserved_nc, scale, 7); + out->print(" committed, "); + out->print(" %d nodes.", num_nodes_nc); + out->cr(); + out->print(" Class space: "); + print_scaled_words(out, reserved_c, scale, 7); + out->print(" reserved, "); + print_scaled_words_and_percentage(out, committed_c, reserved_c, scale, 7); + out->print(" committed, "); + out->print(" %d nodes.", num_nodes_c); + out->cr(); + out->print(" Both: "); + print_scaled_words(out, reserved_c + reserved_nc, scale, 7); + out->print(" reserved, "); + print_scaled_words_and_percentage(out, committed_c + committed_nc, reserved_c + reserved_nc, scale, 7); + out->print(" committed. "); + out->cr(); +#else + print_scaled_words(out, reserved_nc, scale, 7); + out->print(" reserved, "); + print_scaled_words_and_percentage(out, committed_nc, reserved_nc, scale, 7); + out->print(" committed, "); + out->print(" %d nodes.", num_nodes_nc); + out->cr(); +#endif // INCLUDE_CLASS_SPACE } static void print_settings(outputStream* out, size_t scale) { @@ -102,12 +102,12 @@ static void print_settings(outputStream* out, size_t scale) { print_human_readable_size(out, MaxMetaspaceSize, scale); } out->cr(); - if (Metaspace::using_class_space()) { - out->print("CompressedClassSpaceSize: "); - print_human_readable_size(out, CompressedClassSpaceSize, scale); - } else { - out->print("No class space"); - } +#if INCLUDE_CLASS_SPACE + out->print("CompressedClassSpaceSize: "); + print_human_readable_size(out, CompressedClassSpaceSize, scale); +#else + out->print("No class space"); +#endif // INCLUDE_CLASS_SPACE out->cr(); out->print("Initial GC threshold: "); print_human_readable_size(out, MetaspaceSize, scale); @@ -117,9 +117,7 @@ static void print_settings(outputStream* out, size_t scale) { out->cr(); out->print_cr("CDS: %s", (CDSConfig::is_using_archive() ? "on" : (CDSConfig::is_dumping_static_archive() ? "dump" : "off"))); Settings::print_on(out); -#ifdef _LP64 CompressedKlassPointers::print_mode(out); -#endif } // This will print out a basic metaspace usage report but @@ -131,9 +129,7 @@ void MetaspaceReporter::print_basic_report(outputStream* out, size_t scale) { } out->cr(); out->print_cr("Usage:"); - if (Metaspace::using_class_space()) { - out->print(" Non-class: "); - } + CLASS_SPACE_ONLY(out->print(" Non-class: ");) // Note: since we want to purely rely on counters, without any locking or walking the CLDG, // for Usage stats (statistics over in-use chunks) all we can print is the @@ -144,37 +140,35 @@ void MetaspaceReporter::print_basic_report(outputStream* out, size_t scale) { print_scaled_words(out, used_nc, scale, 5); out->print(" used."); out->cr(); - if (Metaspace::using_class_space()) { - const size_t used_c = MetaspaceUtils::used_words(Metaspace::ClassType); - out->print(" Class: "); - print_scaled_words(out, used_c, scale, 5); - out->print(" used."); - out->cr(); - out->print(" Both: "); - const size_t used = used_nc + used_c; - print_scaled_words(out, used, scale, 5); - out->print(" used."); - out->cr(); - } +#if INCLUDE_CLASS_SPACE + const size_t used_c = MetaspaceUtils::used_words(Metaspace::ClassType); + out->print(" Class: "); + print_scaled_words(out, used_c, scale, 5); + out->print(" used."); + out->cr(); + out->print(" Both: "); + const size_t used = used_nc + used_c; + print_scaled_words(out, used, scale, 5); + out->print(" used."); + out->cr(); +#endif // INCLUDE_CLASS_SPACE out->cr(); out->print_cr("Virtual space:"); print_vs(out, scale); out->cr(); out->print_cr("Chunk freelists:"); - if (Metaspace::using_class_space()) { - out->print(" Non-Class: "); - } + CLASS_SPACE_ONLY(out->print(" Non-Class: ");) print_scaled_words(out, ChunkManager::chunkmanager_nonclass()->total_word_size(), scale); out->cr(); - if (Metaspace::using_class_space()) { - out->print(" Class: "); - print_scaled_words(out, ChunkManager::chunkmanager_class()->total_word_size(), scale); - out->cr(); - out->print(" Both: "); - print_scaled_words(out, ChunkManager::chunkmanager_nonclass()->total_word_size() + - ChunkManager::chunkmanager_class()->total_word_size(), scale); - out->cr(); - } +#if INCLUDE_CLASS_SPACE + out->print(" Class: "); + print_scaled_words(out, ChunkManager::chunkmanager_class()->total_word_size(), scale); + out->cr(); + out->print(" Both: "); + print_scaled_words(out, ChunkManager::chunkmanager_nonclass()->total_word_size() + + ChunkManager::chunkmanager_class()->total_word_size(), scale); + out->cr(); +#endif // INCLUDE_CLASS_SPACE out->cr(); // Print basic settings @@ -256,70 +250,70 @@ void MetaspaceReporter::print_report(outputStream* out, size_t scale, int flags) // -- Print VirtualSpaceList details. if ((flags & (int)Option::ShowVSList) > 0) { out->cr(); - out->print_cr("Virtual space list%s:", Metaspace::using_class_space() ? "s" : ""); - - if (Metaspace::using_class_space()) { - out->print_cr(" Non-Class:"); - } +#if INCLUDE_CLASS_SPACE + out->print_cr("Virtual space lists:"); + out->print_cr(" Non-Class:"); VirtualSpaceList::vslist_nonclass()->print_on(out); out->cr(); - if (Metaspace::using_class_space()) { - out->print_cr(" Class:"); - VirtualSpaceList::vslist_class()->print_on(out); - out->cr(); - } + out->print_cr(" Class:"); + VirtualSpaceList::vslist_class()->print_on(out); + out->cr(); +#else + out->print_cr("Virtual space list:"); + VirtualSpaceList::vslist_nonclass()->print_on(out); + out->cr(); +#endif // INCLUDE_CLASS_SPACE } out->cr(); //////////// Freelists (ChunkManager) section /////////////////////////// - out->cr(); - out->print_cr("Chunk freelist%s:", Metaspace::using_class_space() ? "s" : ""); - ChunkManagerStats non_class_cm_stat; ChunkManagerStats class_cm_stat; ChunkManagerStats total_cm_stat; ChunkManager::chunkmanager_nonclass()->add_to_statistics(&non_class_cm_stat); - if (Metaspace::using_class_space()) { - ChunkManager::chunkmanager_nonclass()->add_to_statistics(&non_class_cm_stat); - ChunkManager::chunkmanager_class()->add_to_statistics(&class_cm_stat); - total_cm_stat.add(non_class_cm_stat); - total_cm_stat.add(class_cm_stat); +#if INCLUDE_CLASS_SPACE + ChunkManager::chunkmanager_class()->add_to_statistics(&class_cm_stat); + total_cm_stat.add(non_class_cm_stat); + total_cm_stat.add(class_cm_stat); - out->print_cr(" Non-Class:"); - non_class_cm_stat.print_on(out, scale); - out->cr(); - out->print_cr(" Class:"); - class_cm_stat.print_on(out, scale); - out->cr(); - out->print_cr(" Both:"); - total_cm_stat.print_on(out, scale); - out->cr(); - } else { - ChunkManager::chunkmanager_nonclass()->add_to_statistics(&non_class_cm_stat); - non_class_cm_stat.print_on(out, scale); - out->cr(); - } + out->print_cr("Chunk freelists:"); + out->cr(); + out->print_cr(" Non-Class:"); + non_class_cm_stat.print_on(out, scale); + out->cr(); + out->print_cr(" Class:"); + class_cm_stat.print_on(out, scale); + out->cr(); + out->print_cr(" Both:"); + total_cm_stat.print_on(out, scale); + out->cr(); +#else + out->print_cr("Chunk freelist:"); + ChunkManager::chunkmanager_nonclass()->add_to_statistics(&non_class_cm_stat); + non_class_cm_stat.print_on(out, scale); + out->cr(); +#endif // INCLUDE_CLASS_SPACE // -- Print Chunkmanager details. if ((flags & (int)Option::ShowChunkFreeList) > 0) { out->cr(); out->print_cr("Chunk freelist details:"); - if (Metaspace::using_class_space()) { - out->print_cr(" Non-Class:"); - } +#if INCLUDE_CLASS_SPACE + out->print_cr(" Non-Class:"); ChunkManager::chunkmanager_nonclass()->print_on(out); out->cr(); - if (Metaspace::using_class_space()) { - out->print_cr(" Class:"); - ChunkManager::chunkmanager_class()->print_on(out); - out->cr(); - } + out->print_cr(" Class:"); + ChunkManager::chunkmanager_class()->print_on(out); + out->cr(); +#else + ChunkManager::chunkmanager_nonclass()->print_on(out); + out->cr(); +#endif // INCLUDE_CLASS_SPACE } out->cr(); - //////////// Waste section /////////////////////////// // As a convenience, print a summary of common waste. out->cr(); diff --git a/src/hotspot/share/memory/metaspace/metaspaceStatistics.cpp b/src/hotspot/share/memory/metaspace/metaspaceStatistics.cpp index aab46d64db5..d90e8ed090d 100644 --- a/src/hotspot/share/memory/metaspace/metaspaceStatistics.cpp +++ b/src/hotspot/share/memory/metaspace/metaspaceStatistics.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2018, 2020 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -205,28 +205,26 @@ ArenaStats ClmsStats::totals() const { void ClmsStats::print_on(outputStream* st, size_t scale, bool detailed) const { StreamIndentor si(st, 2); st->cr(); - if (Metaspace::using_class_space()) { - st->print("Non-Class: "); - } + CLASS_SPACE_ONLY(st->print("Non-Class: ");) _arena_stats_nonclass.print_on(st, scale, detailed); if (detailed) { st->cr(); } - if (Metaspace::using_class_space()) { +#if INCLUDE_CLASS_SPACE + st->cr(); + st->print(" Class: "); + _arena_stats_class.print_on(st, scale, detailed); + if (detailed) { st->cr(); - st->print(" Class: "); - _arena_stats_class.print_on(st, scale, detailed); - if (detailed) { - st->cr(); - } - st->cr(); - st->print(" Both: "); - totals().print_on(st, scale, detailed); - if (detailed) { - st->cr(); - } } st->cr(); + st->print(" Both: "); + totals().print_on(st, scale, detailed); + if (detailed) { + st->cr(); + } +#endif // INCLUDE_CLASS_SPACE + st->cr(); } #ifdef ASSERT diff --git a/src/hotspot/share/memory/metaspace/virtualSpaceNode.cpp b/src/hotspot/share/memory/metaspace/virtualSpaceNode.cpp index df4e507b104..a934a628582 100644 --- a/src/hotspot/share/memory/metaspace/virtualSpaceNode.cpp +++ b/src/hotspot/share/memory/metaspace/virtualSpaceNode.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2018, 2021 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -259,12 +259,11 @@ VirtualSpaceNode* VirtualSpaceNode::create_node(size_t word_size, } #ifndef _LP64 - // On 32-bit, with +UseCompressedClassPointers, the whole address space is the encoding range. We therefore - // don't need a class space. However, as a pragmatic workaround for pesty overflow problems on 32-bit, we leave - // a small area at the end of the address space out of the encoding range. We just assume no Klass will ever live + // On 32-bit, the whole address space is the encoding range. We therefore don't need a class space. + // However, as a pragmatic workaround for pesty overflow problems on 32-bit, we leave a small area + // at the end of the address space out of the encoding range. We just assume no Klass will ever live // there (it won't, for no OS we support on 32-bit has user-addressable memory up there). - assert(!UseCompressedClassPointers || - rs.end() <= (char*)CompressedKlassPointers::max_klass_range_size(), "Weirdly high address"); + assert(rs.end() <= (char*)CompressedKlassPointers::max_klass_range_size(), "Weirdly high address"); #endif // _LP64 MemTracker::record_virtual_memory_tag(rs, mtMetaspace); diff --git a/src/hotspot/share/memory/metaspaceClosure.cpp b/src/hotspot/share/memory/metaspaceClosure.cpp index 0239eadf692..0926b55b9a3 100644 --- a/src/hotspot/share/memory/metaspaceClosure.cpp +++ b/src/hotspot/share/memory/metaspaceClosure.cpp @@ -22,11 +22,11 @@ * */ -#include "cds/aotGrowableArray.hpp" #include "classfile/packageEntry.hpp" #include "memory/metaspaceClosure.hpp" #include "oops/array.hpp" #include "oops/instanceKlass.hpp" +#include "utilities/growableArray.hpp" // Sanity checks static_assert(!HAS_METASPACE_POINTERS_DO(int)); @@ -35,8 +35,6 @@ static_assert(HAS_METASPACE_POINTERS_DO(Array)); static_assert(HAS_METASPACE_POINTERS_DO(Array)); static_assert(HAS_METASPACE_POINTERS_DO(InstanceKlass)); static_assert(HAS_METASPACE_POINTERS_DO(PackageEntry)); -static_assert(HAS_METASPACE_POINTERS_DO(AOTGrowableArray)); -static_assert(HAS_METASPACE_POINTERS_DO(AOTGrowableArray)); void MetaspaceClosure::push_impl(MetaspaceClosure::Ref* ref) { if (_enclosing_ref != nullptr) { diff --git a/src/hotspot/share/memory/metaspaceClosure.hpp b/src/hotspot/share/memory/metaspaceClosure.hpp index b6ba69d6f63..ac42dd13c6c 100644 --- a/src/hotspot/share/memory/metaspaceClosure.hpp +++ b/src/hotspot/share/memory/metaspaceClosure.hpp @@ -25,7 +25,6 @@ #ifndef SHARE_MEMORY_METASPACECLOSURE_HPP #define SHARE_MEMORY_METASPACECLOSURE_HPP -#include "cds/aotGrowableArray.hpp" #include "cppstdlib/type_traits.hpp" #include "logging/log.hpp" #include "memory/allocation.hpp" @@ -90,7 +89,9 @@ public: // int size_in_heapwords() const; // // Currently, the iterable types include all subtypes of MetsapceObj, as well - // as GrowableArray, ModuleEntry and PackageEntry. + // as GrowableArray (C-heap allocated only), ModuleEntry, and PackageEntry. + // + // (Note that GrowableArray is supported specially and does not require the above functions.) // // Calling these functions would be trivial if these were virtual functions. // However, to save space, MetaspaceObj has NO vtable. The vtable is introduced @@ -303,11 +304,38 @@ private: }; //-------------------------------- - // Support for AOTGrowableArray + // Support for GrowableArray //-------------------------------- + // GrowableArrayRef -- iterate an instance of GrowableArray. + template class GrowableArrayRef : public Ref { + GrowableArray** _mpp; + GrowableArray* dereference() const { + return *_mpp; + } + + public: + GrowableArrayRef(GrowableArray** mpp, Writability w) : Ref(w), _mpp(mpp) {} + + virtual void** mpp() const { + return (void**)_mpp; + } + + virtual void metaspace_pointers_do(MetaspaceClosure *it) const { + GrowableArray* array = dereference(); + log_trace(aot)("Iter(GrowableArray): %p [%d]", array, array->length()); + array->assert_on_C_heap(); + it->push_c_array(array->data_addr(), array->capacity()); + } + + virtual bool is_read_only_by_default() const { return false; } + virtual bool not_null() const { return dereference() != nullptr; } + virtual int size() const { return (int)heap_word_size(sizeof(*dereference())); } + virtual MetaspaceClosureType type() const { return MetaspaceClosureType::GrowableArrayType; } + }; + // Abstract base class for MSOCArrayRef, MSOPointerCArrayRef and OtherCArrayRef. - // These are used for iterating the buffer held by AOTGrowableArray. + // These are used for iterating the buffer held by GrowableArray. template class CArrayRef : public Ref { T** _mpp; int _num_elems; // Number of elements @@ -354,7 +382,7 @@ private: // MSOCArrayRef -- iterate a C array of type T, where T has metaspace_pointer_do(). // We recursively call T::metaspace_pointers_do() for each element in this array. - // This is for supporting AOTGrowableArray. + // This is for supporting GrowableArray. // // E.g., PackageEntry* _pkg_entry_pointers[2]; // a buffer that has 2 PackageEntry objects // ... @@ -377,7 +405,7 @@ private: // MSOPointerCArrayRef -- iterate a C array of type T*, where T has metaspace_pointer_do(). // We recursively call MetaspaceClosure::push() for each pointer in this array. - // This is for supporting AOTGrowableArray. + // This is for supporting GrowableArray. // // E.g., PackageEntry** _pkg_entry_pointers[2]; // a buffer that has 2 PackageEntry pointers // ... @@ -440,11 +468,11 @@ public: // Array*>* a4 = ...; it->push(&a4); => MSOPointerArrayRef // Array* a5 = ...; it->push(&a5); => MSOPointerArrayRef // - // AOTGrowableArrays have a separate "C array" buffer, so they are scanned in two steps: + // GrowableArrays have a separate "C array" buffer, so they are scanned in two steps: // - // AOTGrowableArray* ga1 = ...; it->push(&ga1); => MSORef => OtherCArrayRef - // AOTGrowableArray* ga2 = ...; it->push(&ga2); => MSORef => MSOCArrayRef - // AOTGrowableArray* ga3 = ...; it->push(&ga3); => MSORef => MSOPointerCArrayRef + // GrowableArray* ga1 = ...; it->push(&ga1); => GrowableArrayRef => OtherCArrayRef + // GrowableArray* ga2 = ...; it->push(&ga2); => GrowableArrayRef => MSOCArrayRef + // GrowableArray* ga3 = ...; it->push(&ga3); => GrowableArrayRef => MSOPointerCArrayRef // // Note that the following will fail to compile: // @@ -476,7 +504,12 @@ public: push_with_ref>(mpp, w); } - // --- The buffer of AOTGrowableArray + template + void push(GrowableArray** mpp, Writability w = _default) { + push_with_ref>(mpp, w); + } + + // --- The buffer of GrowableArray template void push_c_array(T** mpp, int num_elems, Writability w = _default) { push_impl(new OtherCArrayRef(mpp, num_elems, w)); diff --git a/src/hotspot/share/nmt/memReporter.cpp b/src/hotspot/share/nmt/memReporter.cpp index 27a94ec7bc0..429db6ad31f 100644 --- a/src/hotspot/share/nmt/memReporter.cpp +++ b/src/hotspot/share/nmt/memReporter.cpp @@ -272,9 +272,7 @@ void MemSummaryReporter::report_summary_of_tag(MemTag mem_tag, } else if (mem_tag == mtClass) { // Metadata information report_metadata(Metaspace::NonClassType); - if (Metaspace::using_class_space()) { - report_metadata(Metaspace::ClassType); - } + CLASS_SPACE_ONLY(report_metadata(Metaspace::ClassType);) } out->cr(); } @@ -754,9 +752,9 @@ void MemSummaryDiffReporter::diff_summary_of_tag(MemTag mem_tag, void MemSummaryDiffReporter::print_metaspace_diff(const MetaspaceCombinedStats& current_ms, const MetaspaceCombinedStats& early_ms) const { print_metaspace_diff("Metadata", current_ms.non_class_space_stats(), early_ms.non_class_space_stats()); - if (Metaspace::using_class_space()) { - print_metaspace_diff("Class space", current_ms.class_space_stats(), early_ms.class_space_stats()); - } +#if INCLUDE_CLASS_SPACE + print_metaspace_diff("Class space", current_ms.class_space_stats(), early_ms.class_space_stats()); +#endif } void MemSummaryDiffReporter::print_metaspace_diff(const char* header, diff --git a/src/hotspot/share/nmt/memTracker.hpp b/src/hotspot/share/nmt/memTracker.hpp index d9ebf4dc30e..6b5b6affa14 100644 --- a/src/hotspot/share/nmt/memTracker.hpp +++ b/src/hotspot/share/nmt/memTracker.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -140,6 +140,7 @@ class MemTracker : AllStatic { assert_post_init(); if (!enabled()) return; if (addr != nullptr) { + NmtVirtualMemoryLocker nvml; VirtualMemoryTracker::Instance::remove_released_region((address)addr, size); } } diff --git a/src/hotspot/share/nmt/threadStackTracker.cpp b/src/hotspot/share/nmt/threadStackTracker.cpp index 3e649d882c4..6fb17c93782 100644 --- a/src/hotspot/share/nmt/threadStackTracker.cpp +++ b/src/hotspot/share/nmt/threadStackTracker.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2019, 2024, Red Hat, Inc. All rights reserved. - * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2024, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,7 +60,6 @@ void ThreadStackTracker::delete_thread_stack(void* base, size_t size) { assert(base != nullptr, "Should have been filtered"); align_thread_stack_boundaries_inward(base, size); - MemTracker::NmtVirtualMemoryLocker nvml; MemTracker::record_virtual_memory_release((address)base, size); _thread_count--; } diff --git a/src/hotspot/share/oops/arrayKlass.cpp b/src/hotspot/share/oops/arrayKlass.cpp index 30a2bc5102a..8a73f58b46a 100644 --- a/src/hotspot/share/oops/arrayKlass.cpp +++ b/src/hotspot/share/oops/arrayKlass.cpp @@ -41,7 +41,7 @@ #include "oops/oop.inline.hpp" #include "runtime/handles.inline.hpp" -ArrayKlass::ArrayKlass() { +ArrayKlass::ArrayKlass() : _dimension() { assert(CDSConfig::is_dumping_static_archive() || CDSConfig::is_using_archive(), "only for CDS"); } @@ -88,9 +88,9 @@ Method* ArrayKlass::uncached_lookup_method(const Symbol* name, return super()->uncached_lookup_method(name, signature, OverpassLookupMode::skip, private_mode); } -ArrayKlass::ArrayKlass(Symbol* name, KlassKind kind) : +ArrayKlass::ArrayKlass(int n, Symbol* name, KlassKind kind) : Klass(kind), - _dimension(1), + _dimension(n), _higher_dimension(nullptr), _lower_dimension(nullptr) { // Arrays don't add any new methods, so their vtable is the same size as diff --git a/src/hotspot/share/oops/arrayKlass.hpp b/src/hotspot/share/oops/arrayKlass.hpp index b9b100f18a8..738387c57b4 100644 --- a/src/hotspot/share/oops/arrayKlass.hpp +++ b/src/hotspot/share/oops/arrayKlass.hpp @@ -38,7 +38,7 @@ class ArrayKlass: public Klass { private: // If you add a new field that points to any metaspace object, you // must add this field to ArrayKlass::metaspace_pointers_do(). - int _dimension; // This is n'th-dimensional array. + const int _dimension; // This is n'th-dimensional array. ObjArrayKlass* volatile _higher_dimension; // Refers the (n+1)'th-dimensional array (if present). ArrayKlass* volatile _lower_dimension; // Refers the (n-1)'th-dimensional array (if present). @@ -46,7 +46,7 @@ class ArrayKlass: public Klass { // Constructors // The constructor with the Symbol argument does the real array // initialization, the other is a dummy - ArrayKlass(Symbol* name, KlassKind kind); + ArrayKlass(int n, Symbol* name, KlassKind kind); ArrayKlass(); public: @@ -63,7 +63,6 @@ class ArrayKlass: public Klass { // Instance variables int dimension() const { return _dimension; } - void set_dimension(int dimension) { _dimension = dimension; } ObjArrayKlass* higher_dimension() const { return _higher_dimension; } inline ObjArrayKlass* higher_dimension_acquire() const; // load with acquire semantics diff --git a/src/hotspot/share/oops/arrayOop.hpp b/src/hotspot/share/oops/arrayOop.hpp index f0c476a2486..836a1b9250d 100644 --- a/src/hotspot/share/oops/arrayOop.hpp +++ b/src/hotspot/share/oops/arrayOop.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -80,8 +80,7 @@ private: // The _length field is not declared in C++. It is allocated after the // mark-word when using compact headers (+UseCompactObjectHeaders), otherwise - // after the compressed Klass* when running with compressed class-pointers - // (+UseCompressedClassPointers), or else after the full Klass*. + // after the compressed Klass*. static int length_offset_in_bytes() { return oopDesc::base_offset_in_bytes(); } diff --git a/src/hotspot/share/oops/bsmAttribute.hpp b/src/hotspot/share/oops/bsmAttribute.hpp index a28d2757fb0..32bc58b5b07 100644 --- a/src/hotspot/share/oops/bsmAttribute.hpp +++ b/src/hotspot/share/oops/bsmAttribute.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,18 +61,18 @@ public: _argv_offset = 2 }; - int bootstrap_method_index() const { + u2 bootstrap_method_index() const { return _bootstrap_method_index; } - int argument_count() const { + u2 argument_count() const { return _argument_count; } - int argument(int n) const { - assert(checked_cast(n) < _argument_count, "oob"); + u2 argument(u2 n) const { + assert(n < _argument_count, "oob"); return argument_indexes()[n]; } - void set_argument(int index, u2 value) { + void set_argument(u2 index, u2 value) { assert(index >= 0 && index < argument_count(), "invariant"); argument_indexes()[index] = value; } diff --git a/src/hotspot/share/oops/bsmAttribute.inline.hpp b/src/hotspot/share/oops/bsmAttribute.inline.hpp index e678c280c26..8e048704e08 100644 --- a/src/hotspot/share/oops/bsmAttribute.inline.hpp +++ b/src/hotspot/share/oops/bsmAttribute.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -47,7 +47,7 @@ inline BSMAttributeEntry* BSMAttributeEntries::InsertionIterator::reserve_new_en inline void BSMAttributeEntry::copy_args_into(BSMAttributeEntry* entry) const { assert(entry->argument_count() == this->argument_count(), "must be same"); - for (int i = 0; i < argument_count(); i++) { + for (u2 i = 0; i < argument_count(); i++) { entry->set_argument(i, this->argument(i)); } } diff --git a/src/hotspot/share/oops/compressedKlass.cpp b/src/hotspot/share/oops/compressedKlass.cpp index b32d10c74d2..ca1c46d4095 100644 --- a/src/hotspot/share/oops/compressedKlass.cpp +++ b/src/hotspot/share/oops/compressedKlass.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -95,7 +95,7 @@ void CompressedKlassPointers::sanity_check_after_initialization() { // We should need a class space if address space is larger than what narrowKlass can address const bool should_need_class_space = (BytesPerWord * BitsPerByte) > narrow_klass_pointer_bits(); - ASSERT_HERE(should_need_class_space == needs_class_space()); + ASSERT_HERE(should_need_class_space == (INCLUDE_CLASS_SPACE ? true : false)); const size_t klass_align = klass_alignment_in_bytes(); @@ -318,24 +318,19 @@ void CompressedKlassPointers::initialize(address addr, size_t len) { } void CompressedKlassPointers::print_mode(outputStream* st) { - st->print_cr("UseCompressedClassPointers %d, UseCompactObjectHeaders %d", - UseCompressedClassPointers, UseCompactObjectHeaders); - if (UseCompressedClassPointers) { - st->print_cr("Narrow klass pointer bits %d, Max shift %d", - _narrow_klass_pointer_bits, _max_shift); - st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d", - p2i(base()), shift()); - st->print_cr("Encoding Range: " RANGE2FMT, RANGE2FMTARGS(_base, encoding_range_end())); - st->print_cr("Klass Range: " RANGE2FMT, RANGE2FMTARGS(_klass_range_start, _klass_range_end)); - st->print_cr("Klass ID Range: [%u - %u) (%u)", _lowest_valid_narrow_klass_id, _highest_valid_narrow_klass_id + 1, - _highest_valid_narrow_klass_id + 1 - _lowest_valid_narrow_klass_id); - if (_protection_zone_size > 0) { - st->print_cr("Protection zone: " RANGEFMT, RANGEFMTARGS(_base, _protection_zone_size)); - } else { - st->print_cr("No protection zone."); - } + st->print_cr("UseCompactObjectHeaders %d", UseCompactObjectHeaders); + st->print_cr("Narrow klass pointer bits %d, Max shift %d", + _narrow_klass_pointer_bits, _max_shift); + st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d", + p2i(base()), shift()); + st->print_cr("Encoding Range: " RANGE2FMT, RANGE2FMTARGS(_base, encoding_range_end())); + st->print_cr("Klass Range: " RANGE2FMT, RANGE2FMTARGS(_klass_range_start, _klass_range_end)); + st->print_cr("Klass ID Range: [%u - %u) (%u)", _lowest_valid_narrow_klass_id, _highest_valid_narrow_klass_id + 1, + _highest_valid_narrow_klass_id + 1 - _lowest_valid_narrow_klass_id); + if (_protection_zone_size > 0) { + st->print_cr("Protection zone: " RANGEFMT, RANGEFMTARGS(_base, _protection_zone_size)); } else { - st->print_cr("UseCompressedClassPointers off"); + st->print_cr("No protection zone."); } } diff --git a/src/hotspot/share/oops/compressedKlass.hpp b/src/hotspot/share/oops/compressedKlass.hpp index 64b9fcf9c82..fe1ce9e07ae 100644 --- a/src/hotspot/share/oops/compressedKlass.hpp +++ b/src/hotspot/share/oops/compressedKlass.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -98,7 +98,6 @@ class Klass; // If compressed klass pointers then use narrowKlass. typedef juint narrowKlass; -// For UseCompressedClassPointers. class CompressedKlassPointers : public AllStatic { friend class VMStructs; friend class ArchiveBuilder; @@ -161,7 +160,6 @@ public: // Initialization sequence: // 1) Parse arguments. The following arguments take a role: - // - UseCompressedClassPointers // - UseCompactObjectHeaders // - Xshare on off dump // - CompressedClassSpaceSize @@ -192,12 +190,6 @@ public: // resulting from the current encoding settings (base, shift), capped to a certain max. value. static size_t max_klass_range_size(); - // On 64-bit, we need the class space to confine Klass structures to the encoding range, which is determined - // by bit size of narrowKlass IDs and the shift. On 32-bit, we support compressed class pointer only - // "pro-forma": narrowKlass have the same size as addresses (32 bits), and therefore the encoding range is - // equal to the address space size. Here, we don't need a class space. - static constexpr bool needs_class_space() { return LP64_ONLY(true) NOT_LP64(false); } - // Reserve a range of memory that is to contain Klass strucutures which are referenced by narrow Klass IDs. // If optimize_for_zero_base is true, the implementation will attempt to reserve optimized for zero-based encoding. static char* reserve_address_space_for_compressed_classes(size_t size, bool aslr, bool optimize_for_zero_base); @@ -231,7 +223,7 @@ public: // Returns the alignment a Klass* is guaranteed to have. // Note: *Not* the same as 1 << shift ! Klass are always guaranteed to be at least 64-bit aligned, // so this will return 8 even if shift is 0. - static int klass_alignment_in_bytes() { return nth_bit(MAX2(3, _shift)); } + static int klass_alignment_in_bytes() { return static_cast(nth_bit(MAX2(3, _shift))); } static int klass_alignment_in_words() { return klass_alignment_in_bytes() / BytesPerWord; } // Returns the highest possible narrowKlass value given the current Klass range diff --git a/src/hotspot/share/oops/compressedKlass.inline.hpp b/src/hotspot/share/oops/compressedKlass.inline.hpp index 65732b3b289..834264286bc 100644 --- a/src/hotspot/share/oops/compressedKlass.inline.hpp +++ b/src/hotspot/share/oops/compressedKlass.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -75,7 +75,6 @@ inline narrowKlass CompressedKlassPointers::encode(const Klass* v) { #ifdef ASSERT inline void CompressedKlassPointers::check_encodable(const void* addr) { - assert(UseCompressedClassPointers, "Only call for +UseCCP"); assert(addr != nullptr, "Null Klass?"); assert(is_encodable(addr), "Address " PTR_FORMAT " is not encodable (Klass range: " RANGEFMT ", klass alignment: %d)", @@ -84,7 +83,6 @@ inline void CompressedKlassPointers::check_encodable(const void* addr) { inline void CompressedKlassPointers::check_valid_narrow_klass_id(narrowKlass nk) { check_init(_base); - assert(UseCompressedClassPointers, "Only call for +UseCCP"); assert(nk > 0, "narrow Klass ID is 0"); const uint64_t nk_mask = ~right_n_bits(narrow_klass_pointer_bits()); assert(((uint64_t)nk & nk_mask) == 0, "narrow klass id bit spillover (%u)", nk); diff --git a/src/hotspot/share/oops/constantPool.hpp b/src/hotspot/share/oops/constantPool.hpp index 6c519945f4d..b4cff2bbbe6 100644 --- a/src/hotspot/share/oops/constantPool.hpp +++ b/src/hotspot/share/oops/constantPool.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -554,8 +554,8 @@ class ConstantPool : public Metadata { u2 bootstrap_argument_index_at(int cp_index, int j) { int bsmai = bootstrap_methods_attribute_index(cp_index); BSMAttributeEntry* bsme = bsm_attribute_entry(bsmai); - assert((uint)j < (uint)bsme->argument_count(), "oob"); - return bsm_attribute_entry(bsmai)->argument(j); + assert(j < bsme->argument_count(), "oob"); + return bsm_attribute_entry(bsmai)->argument(checked_cast(j)); } // The following methods (name/signature/klass_ref_at, klass_ref_at_noresolve, diff --git a/src/hotspot/share/oops/cpCache.cpp b/src/hotspot/share/oops/cpCache.cpp index 34d7aa10299..edb5f6714c0 100644 --- a/src/hotspot/share/oops/cpCache.cpp +++ b/src/hotspot/share/oops/cpCache.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -568,6 +568,8 @@ bool ConstantPoolCache::can_archive_resolved_method(ConstantPool* src_cp, Resolv return false; } + int cp_index = method_entry->constant_pool_index(); + if (!method_entry->is_resolved(Bytecodes::_invokevirtual)) { if (method_entry->method() == nullptr) { rejection_reason = "(method entry is not resolved)"; @@ -577,9 +579,24 @@ bool ConstantPoolCache::can_archive_resolved_method(ConstantPool* src_cp, Resolv rejection_reason = "(corresponding stub is generated on demand during method resolution)"; return false; // FIXME: corresponding stub is generated on demand during method resolution (see LinkResolver::resolve_static_call). } - if (method_entry->is_resolved(Bytecodes::_invokehandle) && !CDSConfig::is_dumping_method_handles()) { - rejection_reason = "(not dumping method handles)"; - return false; + if (method_entry->is_resolved(Bytecodes::_invokehandle)) { + if (!CDSConfig::is_dumping_method_handles()) { + rejection_reason = "(not dumping method handles)"; + return false; + } + + Symbol* sig = constant_pool()->uncached_signature_ref_at(cp_index); + Klass* k; + if (!AOTConstantPoolResolver::check_methodtype_signature(constant_pool(), sig, &k, true)) { + // invokehandles that were resolved in the training run should have been filtered in + // AOTConstantPoolResolver::maybe_resolve_fmi_ref so we shouldn't come to here. + // + // If we come here it's because the AOT assembly phase has executed an invokehandle + // that uses an excluded type like jdk.jfr.Event. This should not happen because the + // AOT assembly phase should execute only a very limited set of Java code. + ResourceMark rm; + fatal("AOT assembly phase must not resolve any invokehandles whose signatures include an excluded type"); + } } if (method_entry->method()->is_method_handle_intrinsic() && !CDSConfig::is_dumping_method_handles()) { rejection_reason = "(not dumping intrinsic method handles)"; @@ -587,7 +604,6 @@ bool ConstantPoolCache::can_archive_resolved_method(ConstantPool* src_cp, Resolv } } - int cp_index = method_entry->constant_pool_index(); assert(src_cp->tag_at(cp_index).is_method() || src_cp->tag_at(cp_index).is_interface_method(), "sanity"); if (!AOTConstantPoolResolver::is_resolution_deterministic(src_cp, cp_index)) { diff --git a/src/hotspot/share/oops/fieldInfo.hpp b/src/hotspot/share/oops/fieldInfo.hpp index b6d9c4d34e5..88c982e9d1f 100644 --- a/src/hotspot/share/oops/fieldInfo.hpp +++ b/src/hotspot/share/oops/fieldInfo.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -302,7 +302,7 @@ class FieldStatus { // boilerplate: u1 _flags; - static constexpr u1 flag_mask(FieldStatusBitPosition pos) { return (u1)1 << (int)pos; } + static constexpr u1 flag_mask(FieldStatusBitPosition pos) { return checked_cast(1 << pos); } bool test_flag(FieldStatusBitPosition pos) { return (_flags & flag_mask(pos)) != 0; } // this performs an atomic update on a live status byte! void update_flag(FieldStatusBitPosition pos, bool z); diff --git a/src/hotspot/share/oops/generateOopMap.cpp b/src/hotspot/share/oops/generateOopMap.cpp index 97d8bf3d526..56f3e7e0325 100644 --- a/src/hotspot/share/oops/generateOopMap.cpp +++ b/src/hotspot/share/oops/generateOopMap.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -391,7 +391,6 @@ void CellTypeState::print(outputStream *os) { // void GenerateOopMap::initialize_bb() { - _gc_points = 0; _bb_count = 0; _bb_hdr_bits.reinitialize(method()->code_size()); } @@ -401,15 +400,13 @@ void GenerateOopMap::bb_mark_fct(GenerateOopMap *c, int bci, int *data) { if (c->is_bb_header(bci)) return; - if (TraceNewOopMapGeneration) { - tty->print_cr("Basicblock#%d begins at: %d", c->_bb_count, bci); - } + log_debug(generateoopmap)( "Basicblock#%d begins at: %d", c->_bb_count, bci); c->set_bbmark_bit(bci); c->_bb_count++; } -void GenerateOopMap::mark_bbheaders_and_count_gc_points() { +void GenerateOopMap::mark_bbheaders() { initialize_bb(); bool fellThrough = false; // False to get first BB marked. @@ -445,9 +442,6 @@ void GenerateOopMap::mark_bbheaders_and_count_gc_points() { default: break; } - - if (possible_gc_point(&bcs)) - _gc_points++; } } @@ -917,13 +911,12 @@ void GenerateOopMap::do_interpretation() // iterated more than once. int i = 0; do { -#ifndef PRODUCT - if (TraceNewOopMapGeneration) { - tty->print("\n\nIteration #%d of do_interpretation loop, method:\n", i); - method()->print_name(tty); - tty->print("\n\n"); + if (log_is_enabled(Trace, generateoopmap)) { + LogStream st(Log(generateoopmap)::trace()); + st.print("Iteration #%d of do_interpretation loop, method:", i); + method()->print_name(&st); + st.print("\n\n"); } -#endif _conflict = false; _monitor_safe = true; // init_state is now called from init_basic_blocks. The length of a @@ -1088,8 +1081,7 @@ void GenerateOopMap::initialize_vars() { void GenerateOopMap::add_to_ref_init_set(int localNo) { - if (TraceNewOopMapGeneration) - tty->print_cr("Added init vars: %d", localNo); + log_debug(generateoopmap)("Added init vars: %d", localNo); // Is it already in the set? if (_init_vars->contains(localNo) ) @@ -1176,42 +1168,46 @@ void GenerateOopMap::interp_bb(BasicBlock *bb) { } void GenerateOopMap::do_exception_edge(BytecodeStream* itr) { - // Only check exception edge, if bytecode can trap - if (!Bytecodes::can_trap(itr->code())) return; - switch (itr->code()) { - case Bytecodes::_aload_0: - // These bytecodes can trap for rewriting. We need to assume that - // they do not throw exceptions to make the monitor analysis work. - return; - case Bytecodes::_ireturn: - case Bytecodes::_lreturn: - case Bytecodes::_freturn: - case Bytecodes::_dreturn: - case Bytecodes::_areturn: - case Bytecodes::_return: - // If the monitor stack height is not zero when we leave the method, - // then we are either exiting with a non-empty stack or we have - // found monitor trouble earlier in our analysis. In either case, - // assume an exception could be taken here. - if (_monitor_top == 0) { + // Only check exception edge, if bytecode can trap or if async exceptions can be thrown + // from any bytecode in the interpreter when single stepping. + if (!_all_exception_edges) { + if (!Bytecodes::can_trap(itr->code())) return; + switch (itr->code()) { + case Bytecodes::_aload_0: + // These bytecodes can trap for rewriting. We need to assume that + // they do not throw exceptions to make the monitor analysis work. return; - } - break; - case Bytecodes::_monitorexit: - // If the monitor stack height is bad_monitors, then we have detected a - // monitor matching problem earlier in the analysis. If the - // monitor stack height is 0, we are about to pop a monitor - // off of an empty stack. In either case, the bytecode - // could throw an exception. - if (_monitor_top != bad_monitors && _monitor_top != 0) { - return; - } - break; + case Bytecodes::_ireturn: + case Bytecodes::_lreturn: + case Bytecodes::_freturn: + case Bytecodes::_dreturn: + case Bytecodes::_areturn: + case Bytecodes::_return: + // If the monitor stack height is not zero when we leave the method, + // then we are either exiting with a non-empty stack or we have + // found monitor trouble earlier in our analysis. In either case, + // assume an exception could be taken here. + if (_monitor_top == 0) { + return; + } + break; - default: - break; + case Bytecodes::_monitorexit: + // If the monitor stack height is bad_monitors, then we have detected a + // monitor matching problem earlier in the analysis. If the + // monitor stack height is 0, we are about to pop a monitor + // off of an empty stack. In either case, the bytecode + // could throw an exception. + if (_monitor_top != bad_monitors && _monitor_top != 0) { + return; + } + break; + + default: + break; + } } if (_has_exceptions) { @@ -1288,13 +1284,13 @@ void GenerateOopMap::report_monitor_mismatch(const char *msg) { void GenerateOopMap::print_states(outputStream *os, CellTypeState* vec, int num) { for (int i = 0; i < num; i++) { - vec[i].print(tty); + vec[i].print(os); } } // Print the state values at the current bytecode. -void GenerateOopMap::print_current_state(outputStream *os, - BytecodeStream *currentBC, +void GenerateOopMap::print_current_state(outputStream* os, + BytecodeStream* currentBC, bool detailed) { if (detailed) { os->print(" %4d vars = ", currentBC->bci()); @@ -1316,6 +1312,7 @@ void GenerateOopMap::print_current_state(outputStream *os, case Bytecodes::_invokestatic: case Bytecodes::_invokedynamic: case Bytecodes::_invokeinterface: { + ResourceMark rm; int idx = currentBC->has_index_u4() ? currentBC->get_index_u4() : currentBC->get_index_u2(); ConstantPool* cp = method()->constants(); int nameAndTypeIdx = cp->name_and_type_ref_index_at(idx, currentBC->code()); @@ -1346,8 +1343,9 @@ void GenerateOopMap::print_current_state(outputStream *os, // Sets the current state to be the state after executing the // current instruction, starting in the current state. void GenerateOopMap::interp1(BytecodeStream *itr) { - if (TraceNewOopMapGeneration) { - print_current_state(tty, itr, TraceNewOopMapGenerationDetailed); + if (log_is_enabled(Trace, generateoopmap)) { + LogStream st(Log(generateoopmap)::trace()); + print_current_state(&st, itr, Verbose); } // Should we report the results? Result is reported *before* the instruction at the current bci is executed. @@ -2030,9 +2028,7 @@ void GenerateOopMap::ret_jump_targets_do(BytecodeStream *bcs, jmpFct_t jmpFct, i DEBUG_ONLY(BasicBlock* target_bb = &jsr_bb[1];) assert(target_bb == get_basic_block_at(target_bci), "wrong calc. of successor basicblock"); bool alive = jsr_bb->is_alive(); - if (TraceNewOopMapGeneration) { - tty->print("pc = %d, ret -> %d alive: %s\n", bci, target_bci, alive ? "true" : "false"); - } + log_debug(generateoopmap)("pc = %d, ret -> %d alive: %s", bci, target_bci, alive ? "true" : "false"); if (alive) jmpFct(this, target_bci, data); } } @@ -2050,6 +2046,7 @@ char* GenerateOopMap::state_vec_to_string(CellTypeState* vec, int len) { return _state_vec_buf; } +#ifndef PRODUCT void GenerateOopMap::print_time() { tty->print_cr ("Accumulated oopmap times:"); tty->print_cr ("---------------------------"); @@ -2057,36 +2054,26 @@ void GenerateOopMap::print_time() { tty->print_cr (" (%3.0f bytecodes per sec) ", (double)GenerateOopMap::_total_byte_count / GenerateOopMap::_total_oopmap_time.seconds()); } +#endif // // ============ Main Entry Point =========== // -GenerateOopMap::GenerateOopMap(const methodHandle& method) { +GenerateOopMap::GenerateOopMap(const methodHandle& method, bool all_exception_edges) : // We have to initialize all variables here, that can be queried directly - _method = method; - _max_locals=0; - _init_vars = nullptr; - -#ifndef PRODUCT - // If we are doing a detailed trace, include the regular trace information. - if (TraceNewOopMapGenerationDetailed) { - TraceNewOopMapGeneration = true; - } -#endif -} + _method(method), + _max_locals(0), + _all_exception_edges(all_exception_edges), + _init_vars(nullptr) {} bool GenerateOopMap::compute_map(Thread* current) { #ifndef PRODUCT - if (TimeOopMap2) { - method()->print_short_name(tty); - tty->print(" "); - } if (TimeOopMap) { _total_byte_count += method()->code_size(); + TraceTime t_all(nullptr, &_total_oopmap_time, TimeOopMap); } #endif - TraceTime t_single("oopmap time", TimeOopMap2); - TraceTime t_all(nullptr, &_total_oopmap_time, TimeOopMap); + TraceTime t_single("oopmap time", TRACETIME_LOG(Debug, generateoopmap)); // Initialize values _got_error = false; @@ -2103,24 +2090,22 @@ bool GenerateOopMap::compute_map(Thread* current) { _did_rewriting = false; _did_relocation = false; - if (TraceNewOopMapGeneration) { - tty->print("Method name: %s\n", method()->name()->as_C_string()); - if (Verbose) { - _method->print_codes(); - tty->print_cr("Exception table:"); - ExceptionTable excps(method()); - for(int i = 0; i < excps.length(); i ++) { - tty->print_cr("[%d - %d] -> %d", - excps.start_pc(i), excps.end_pc(i), excps.handler_pc(i)); - } + if (log_is_enabled(Debug, generateoopmap)) { + ResourceMark rm; + LogStream st(Log(generateoopmap)::debug()); + st.print_cr("Method name: %s\n", method()->name()->as_C_string()); + _method->print_codes_on(&st); + st.print_cr("Exception table:"); + ExceptionTable excps(method()); + for (int i = 0; i < excps.length(); i ++) { + st.print_cr("[%d - %d] -> %d", + excps.start_pc(i), excps.end_pc(i), excps.handler_pc(i)); } } // if no code - do nothing // compiler needs info if (method()->code_size() == 0 || _max_locals + method()->max_stack() == 0) { - fill_stackmap_prolog(0); - fill_stackmap_epilog(); return true; } // Step 1: Compute all jump targets and their return value @@ -2129,7 +2114,7 @@ bool GenerateOopMap::compute_map(Thread* current) { // Step 2: Find all basic blocks and count GC points if (!_got_error) - mark_bbheaders_and_count_gc_points(); + mark_bbheaders(); // Step 3: Calculate stack maps if (!_got_error) @@ -2181,14 +2166,11 @@ void GenerateOopMap::verify_error(const char *format, ...) { // void GenerateOopMap::report_result() { - if (TraceNewOopMapGeneration) tty->print_cr("Report result pass"); + log_debug(generateoopmap)("Report result pass"); // We now want to report the result of the parse _report_result = true; - // Prolog code - fill_stackmap_prolog(_gc_points); - // Mark everything changed, then do one interpretation pass. for (int i = 0; i<_bb_count; i++) { if (_basic_blocks[i].is_reachable()) { @@ -2197,19 +2179,11 @@ void GenerateOopMap::report_result() { } } - // Note: Since we are skipping dead-code when we are reporting results, then - // the no. of encountered gc-points might be fewer than the previously number - // we have counted. (dead-code is a pain - it should be removed before we get here) - fill_stackmap_epilog(); - - // Report initvars - fill_init_vars(_init_vars); - _report_result = false; } void GenerateOopMap::result_for_basicblock(int bci) { - if (TraceNewOopMapGeneration) tty->print_cr("Report result pass for basicblock"); + log_debug(generateoopmap)("Report result pass for basicblock"); // We now want to report the result of the parse _report_result = true; @@ -2217,7 +2191,7 @@ void GenerateOopMap::result_for_basicblock(int bci) { // Find basicblock and report results BasicBlock* bb = get_basic_block_containing(bci); guarantee(bb != nullptr, "no basic block for bci"); - assert(bb->is_reachable(), "getting result from unreachable basicblock"); + assert(bb->is_reachable(), "getting result from unreachable basicblock at bci %d", bci); bb->set_changed(true); interp_bb(bb); } @@ -2229,9 +2203,7 @@ void GenerateOopMap::result_for_basicblock(int bci) { void GenerateOopMap::record_refval_conflict(int varNo) { assert(varNo>=0 && varNo< _max_locals, "index out of range"); - if (TraceOopMapRewrites) { - tty->print("### Conflict detected (local no: %d)\n", varNo); - } + log_trace(generateoopmap)("### Conflict detected (local no: %d)", varNo); if (!_new_var_map) { _new_var_map = NEW_RESOURCE_ARRAY(int, _max_locals); @@ -2270,10 +2242,12 @@ void GenerateOopMap::rewrite_refval_conflicts() // Tracing flag _did_rewriting = true; - if (TraceOopMapRewrites) { - tty->print_cr("ref/value conflict for method %s - bytecodes are getting rewritten", method()->name()->as_C_string()); - method()->print(); - method()->print_codes(); + if (log_is_enabled(Trace, generateoopmap)) { + ResourceMark rm; + LogStream st(Log(generateoopmap)::trace()); + st.print_cr("ref/value conflict for method %s - bytecodes are getting rewritten", method()->name()->as_C_string()); + method()->print_on(&st); + method()->print_codes_on(&st); } assert(_new_var_map!=nullptr, "nothing to rewrite"); @@ -2283,9 +2257,7 @@ void GenerateOopMap::rewrite_refval_conflicts() if (!_got_error) { for (int k = 0; k < _max_locals && !_got_error; k++) { if (_new_var_map[k] != k) { - if (TraceOopMapRewrites) { - tty->print_cr("Rewriting: %d -> %d", k, _new_var_map[k]); - } + log_trace(generateoopmap)("Rewriting: %d -> %d", k, _new_var_map[k]); rewrite_refval_conflict(k, _new_var_map[k]); if (_got_error) return; nof_conflicts++; @@ -2336,22 +2308,16 @@ bool GenerateOopMap::rewrite_refval_conflict_inst(BytecodeStream *itr, int from, int bci = itr->bci(); if (is_aload(itr, &index) && index == from) { - if (TraceOopMapRewrites) { - tty->print_cr("Rewriting aload at bci: %d", bci); - } + log_trace(generateoopmap)("Rewriting aload at bci: %d", bci); return rewrite_load_or_store(itr, Bytecodes::_aload, Bytecodes::_aload_0, to); } if (is_astore(itr, &index) && index == from) { if (!stack_top_holds_ret_addr(bci)) { - if (TraceOopMapRewrites) { - tty->print_cr("Rewriting astore at bci: %d", bci); - } + log_trace(generateoopmap)("Rewriting astore at bci: %d", bci); return rewrite_load_or_store(itr, Bytecodes::_astore, Bytecodes::_astore_0, to); } else { - if (TraceOopMapRewrites) { - tty->print_cr("Suppress rewriting of astore at bci: %d", bci); - } + log_trace(generateoopmap)("Suppress rewriting of astore at bci: %d", bci); } } @@ -2519,9 +2485,7 @@ void GenerateOopMap::compute_ret_adr_at_TOS() { // TDT: should this be is_good_address() ? if (_stack_top > 0 && stack()[_stack_top-1].is_address()) { _ret_adr_tos->append(bcs.bci()); - if (TraceNewOopMapGeneration) { - tty->print_cr("Ret_adr TOS at bci: %d", bcs.bci()); - } + log_debug(generateoopmap)("Ret_adr TOS at bci: %d", bcs.bci()); } interp1(&bcs); } diff --git a/src/hotspot/share/oops/generateOopMap.hpp b/src/hotspot/share/oops/generateOopMap.hpp index 0da3779d463..f0fdfeda57f 100644 --- a/src/hotspot/share/oops/generateOopMap.hpp +++ b/src/hotspot/share/oops/generateOopMap.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -307,6 +307,7 @@ class GenerateOopMap { bool _did_relocation; // was relocation necessary bool _monitor_safe; // The monitors in this method have been determined // to be safe. + bool _all_exception_edges; // All bytecodes can reach containing exception handler. // Working Cell type state int _state_len; // Size of states @@ -348,17 +349,15 @@ class GenerateOopMap { // Basicblock info BasicBlock * _basic_blocks; // Array of basicblock info - int _gc_points; int _bb_count; ResourceBitMap _bb_hdr_bits; // Basicblocks methods void initialize_bb (); - void mark_bbheaders_and_count_gc_points(); + void mark_bbheaders(); bool is_bb_header (int bci) const { return _bb_hdr_bits.at(bci); } - int gc_points () const { return _gc_points; } int bb_count () const { return _bb_count; } void set_bbmark_bit (int bci); BasicBlock * get_basic_block_at (int bci) const; @@ -450,14 +449,14 @@ class GenerateOopMap { int binsToHold (int no) { return ((no+(BitsPerWord-1))/BitsPerWord); } char *state_vec_to_string (CellTypeState* vec, int len); - // Helper method. Can be used in subclasses to fx. calculate gc_points. If the current instruction + // Helper method. If the current instruction // is a control transfer, then calls the jmpFct all possible destinations. void ret_jump_targets_do (BytecodeStream *bcs, jmpFct_t jmpFct, int varNo,int *data); bool jump_targets_do (BytecodeStream *bcs, jmpFct_t jmpFct, int *data); friend class RelocCallback; public: - GenerateOopMap(const methodHandle& method); + GenerateOopMap(const methodHandle& method, bool all_exception_edges); // Compute the map - returns true on success and false on error. bool compute_map(Thread* current); @@ -480,14 +479,7 @@ class GenerateOopMap { bool monitor_safe() { return _monitor_safe; } // Specialization methods. Intended use: - // - possible_gc_point must return true for every bci for which the stackmaps must be returned - // - fill_stackmap_prolog is called just before the result is reported. The arguments tells the estimated - // number of gc points // - fill_stackmap_for_opcodes is called once for each bytecode index in order (0...code_length-1) - // - fill_stackmap_epilog is called after all results has been reported. Note: Since the algorithm does not report - // stackmaps for deadcode, fewer gc_points might have been encountered than assumed during the epilog. It is the - // responsibility of the subclass to count the correct number. - // - fill_init_vars are called once with the result of the init_vars computation // // All these methods are used during a call to: compute_map. Note: Non of the return results are valid // after compute_map returns, since all values are allocated as resource objects. @@ -496,14 +488,10 @@ class GenerateOopMap { virtual bool allow_rewrites () const { return false; } virtual bool report_results () const { return true; } virtual bool report_init_vars () const { return true; } - virtual bool possible_gc_point (BytecodeStream *bcs) { ShouldNotReachHere(); return false; } - virtual void fill_stackmap_prolog (int nof_gc_points) { ShouldNotReachHere(); } - virtual void fill_stackmap_epilog () { ShouldNotReachHere(); } virtual void fill_stackmap_for_opcodes (BytecodeStream *bcs, CellTypeState* vars, CellTypeState* stack, int stackTop) { ShouldNotReachHere(); } - virtual void fill_init_vars (GrowableArray *init_vars) { ShouldNotReachHere();; } }; // @@ -513,19 +501,13 @@ class GenerateOopMap { class ResolveOopMapConflicts: public GenerateOopMap { private: - bool _must_clear_locals; - virtual bool report_results() const { return false; } virtual bool report_init_vars() const { return true; } virtual bool allow_rewrites() const { return true; } - virtual bool possible_gc_point (BytecodeStream *bcs) { return false; } - virtual void fill_stackmap_prolog (int nof_gc_points) {} - virtual void fill_stackmap_epilog () {} virtual void fill_stackmap_for_opcodes (BytecodeStream *bcs, CellTypeState* vars, CellTypeState* stack, int stack_top) {} - virtual void fill_init_vars (GrowableArray *init_vars) { _must_clear_locals = init_vars->length() > 0; } #ifndef PRODUCT // Statistics @@ -535,10 +517,8 @@ class ResolveOopMapConflicts: public GenerateOopMap { #endif public: - ResolveOopMapConflicts(const methodHandle& method) : GenerateOopMap(method) { _must_clear_locals = false; }; - + ResolveOopMapConflicts(const methodHandle& method) : GenerateOopMap(method, true) { } methodHandle do_potential_rewrite(TRAPS); - bool must_clear_locals() const { return _must_clear_locals; } }; @@ -551,16 +531,12 @@ class GeneratePairingInfo: public GenerateOopMap { virtual bool report_results() const { return false; } virtual bool report_init_vars() const { return false; } virtual bool allow_rewrites() const { return false; } - virtual bool possible_gc_point (BytecodeStream *bcs) { return false; } - virtual void fill_stackmap_prolog (int nof_gc_points) {} - virtual void fill_stackmap_epilog () {} virtual void fill_stackmap_for_opcodes (BytecodeStream *bcs, CellTypeState* vars, CellTypeState* stack, int stack_top) {} - virtual void fill_init_vars (GrowableArray *init_vars) {} public: - GeneratePairingInfo(const methodHandle& method) : GenerateOopMap(method) {}; + GeneratePairingInfo(const methodHandle& method) : GenerateOopMap(method, false) {}; // Call compute_map() to generate info. }; diff --git a/src/hotspot/share/oops/instanceKlass.cpp b/src/hotspot/share/oops/instanceKlass.cpp index 1963327fc78..d675e61cc05 100644 --- a/src/hotspot/share/oops/instanceKlass.cpp +++ b/src/hotspot/share/oops/instanceKlass.cpp @@ -23,6 +23,7 @@ */ #include "cds/aotClassInitializer.hpp" +#include "cds/aotLinkedClassBulkLoader.hpp" #include "cds/aotMetaspace.hpp" #include "cds/archiveUtils.hpp" #include "cds/cdsConfig.hpp" @@ -150,6 +151,7 @@ #endif // ndef DTRACE_ENABLED bool InstanceKlass::_finalization_enabled = true; +static int call_class_initializer_counter = 0; // for debugging static inline bool is_class_loader(const Symbol* class_name, const ClassFileParser& parser) { @@ -484,10 +486,8 @@ InstanceKlass* InstanceKlass::allocate_instance_klass(const ClassFileParser& par ik = new (loader_data, size, THREAD) InstanceKlass(parser); } - if (ik != nullptr && UseCompressedClassPointers) { - assert(CompressedKlassPointers::is_encodable(ik), - "Klass " PTR_FORMAT "needs a narrow Klass ID, but is not encodable", p2i(ik)); - } + assert(ik == nullptr || CompressedKlassPointers::is_encodable(ik), + "Klass " PTR_FORMAT "needs a narrow Klass ID, but is not encodable", p2i(ik)); // Check for pending exception before adding to the loader data and incrementing // class count. Can get OOM here. @@ -704,6 +704,7 @@ void InstanceKlass::deallocate_contents(ClassLoaderData* loader_data) { if (constants() != nullptr) { assert (!constants()->on_stack(), "shouldn't be called if anything is onstack"); if (!constants()->in_aot_cache()) { + HeapShared::remove_scratch_resolved_references(constants()); MetadataFactory::free_metadata(loader_data, constants()); } // Delete any cached resolution errors for the constant pool @@ -884,7 +885,9 @@ void InstanceKlass::assert_no_clinit_will_run_for_aot_initialized_class() const #endif #if INCLUDE_CDS -void InstanceKlass::initialize_with_aot_initialized_mirror(TRAPS) { +// early_init -- we are moving this class into the fully_initialized state before the +// JVM is able to execute any bytecodes. See AOTLinkedClassBulkLoader::is_initializing_classes_early(). +void InstanceKlass::initialize_with_aot_initialized_mirror(bool early_init, TRAPS) { assert(has_aot_initialized_mirror(), "must be"); assert(CDSConfig::is_loading_heap(), "must be"); assert(CDSConfig::is_using_aot_linked_classes(), "must be"); @@ -894,15 +897,36 @@ void InstanceKlass::initialize_with_aot_initialized_mirror(TRAPS) { return; } + if (log_is_enabled(Info, aot, init)) { + ResourceMark rm; + log_info(aot, init)("%s (aot-inited%s)", external_name(), early_init ? ", early" : ""); + } + if (is_runtime_setup_required()) { + assert(!early_init, "must not call"); // Need to take the slow path, which will call the runtimeSetup() function instead // of initialize(CHECK); return; } - if (log_is_enabled(Info, aot, init)) { - ResourceMark rm; - log_info(aot, init)("%s (aot-inited)", external_name()); + + LogTarget(Info, class, init) lt; + if (lt.is_enabled()) { + ResourceMark rm(THREAD); + LogStream ls(lt); + ls.print("%d Initializing ", call_class_initializer_counter++); + name()->print_value_on(&ls); + ls.print_cr("(aot-inited) (" PTR_FORMAT ") by thread \"%s\"", + p2i(this), THREAD->name()); + } + + if (early_init) { + precond(AOTLinkedClassBulkLoader::is_initializing_classes_early()); + precond(is_linked()); + precond(init_thread() == nullptr); + set_init_state(fully_initialized); + fence_and_clear_init_lock(); + return; } link_class(CHECK); @@ -1098,6 +1122,12 @@ bool InstanceKlass::link_class_impl(TRAPS) { } } } + + if (log_is_enabled(Info, class, link)) { + ResourceMark rm(THREAD); + log_info(class, link)("Linked class %s", external_name()); + } + return true; } @@ -1699,8 +1729,6 @@ ArrayKlass* InstanceKlass::array_klass_or_null() { return array_klass_or_null(1); } -static int call_class_initializer_counter = 0; // for debugging - Method* InstanceKlass::class_initializer() const { Method* clinit = find_method( vmSymbols::class_initializer_name(), vmSymbols::void_method_signature()); diff --git a/src/hotspot/share/oops/instanceKlass.hpp b/src/hotspot/share/oops/instanceKlass.hpp index e370a3b7a7c..dd563ad3492 100644 --- a/src/hotspot/share/oops/instanceKlass.hpp +++ b/src/hotspot/share/oops/instanceKlass.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -556,7 +556,7 @@ public: // initialization (virtuals from Klass) bool should_be_initialized() const override; // means that initialize should be called - void initialize_with_aot_initialized_mirror(TRAPS); + void initialize_with_aot_initialized_mirror(bool early_init, TRAPS); void assert_no_clinit_will_run_for_aot_initialized_class() const NOT_DEBUG_RETURN; void initialize(TRAPS) override; void initialize_preemptable(TRAPS) override; diff --git a/src/hotspot/share/oops/klass.cpp b/src/hotspot/share/oops/klass.cpp index 001e9eba790..84a1766a702 100644 --- a/src/hotspot/share/oops/klass.cpp +++ b/src/hotspot/share/oops/klass.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1055,14 +1055,8 @@ void Klass::verify_on(outputStream* st) { // This can be expensive, but it is worth checking that this klass is actually // in the CLD graph but not in production. -#ifdef ASSERT - if (UseCompressedClassPointers) { - // Stricter checks for both correct alignment and placement - CompressedKlassPointers::check_encodable(this); - } else { - assert(Metaspace::contains((address)this), "Should be"); - } -#endif // ASSERT + // Stricter checks for both correct alignment and placement + DEBUG_ONLY(CompressedKlassPointers::check_encodable(this)); guarantee(this->is_klass(),"should be klass"); diff --git a/src/hotspot/share/oops/klass.hpp b/src/hotspot/share/oops/klass.hpp index d59db9744cb..1c6b28127b8 100644 --- a/src/hotspot/share/oops/klass.hpp +++ b/src/hotspot/share/oops/klass.hpp @@ -466,9 +466,9 @@ protected: static const int _lh_log2_element_size_shift = BitsPerByte*0; static const int _lh_log2_element_size_mask = BitsPerLong-1; static const int _lh_element_type_shift = BitsPerByte*1; - static const int _lh_element_type_mask = right_n_bits(BitsPerByte); // shifted mask + static const int _lh_element_type_mask = right_n_bits(BitsPerByte); // shifted mask static const int _lh_header_size_shift = BitsPerByte*2; - static const int _lh_header_size_mask = right_n_bits(BitsPerByte); // shifted mask + static const int _lh_header_size_mask = right_n_bits(BitsPerByte); // shifted mask static const int _lh_array_tag_bits = 2; static const int _lh_array_tag_shift = BitsPerInt - _lh_array_tag_bits; static const int _lh_array_tag_obj_value = ~0x01; // 0x80000000 >> 30 diff --git a/src/hotspot/share/oops/markWord.hpp b/src/hotspot/share/oops/markWord.hpp index c54a9f1bf5d..4583e6bd3a1 100644 --- a/src/hotspot/share/oops/markWord.hpp +++ b/src/hotspot/share/oops/markWord.hpp @@ -54,7 +54,6 @@ // // - the two lock bits are used to describe three states: locked/unlocked and monitor. // -// [ptr | 00] locked ptr points to real header on stack (stack-locking in use) // [header | 00] locked locked regular object header (fast-locking in use) // [header | 01] unlocked regular object header // [ptr | 10] monitor inflated lock (header is swapped out, UseObjectMonitorTable == false) diff --git a/src/hotspot/share/oops/methodData.cpp b/src/hotspot/share/oops/methodData.cpp index 38bdc33c628..dc0b8fa9f81 100644 --- a/src/hotspot/share/oops/methodData.cpp +++ b/src/hotspot/share/oops/methodData.cpp @@ -329,7 +329,7 @@ static bool is_excluded(Klass* k) { log_debug(aot, training)("Purged %s from MDO: unloaded class", k->name()->as_C_string()); return true; } else { - bool excluded = SystemDictionaryShared::should_be_excluded(k); + bool excluded = SystemDictionaryShared::should_be_excluded(k) || !SystemDictionaryShared::is_builtin_loader(k->class_loader_data()); if (excluded) { log_debug(aot, training)("Purged %s from MDO: excluded class", k->name()->as_C_string()); } @@ -667,8 +667,8 @@ void MultiBranchData::print_data_on(outputStream* st, const char* extra) const { void ArgInfoData::print_data_on(outputStream* st, const char* extra) const { print_shared(st, "ArgInfoData", extra); - int nargs = number_of_args(); - for (int i = 0; i < nargs; i++) { + int args_size = size_of_args(); + for (int i = 0; i < args_size; i++) { st->print(" 0x%x", arg_modified(i)); } st->cr(); diff --git a/src/hotspot/share/oops/methodData.hpp b/src/hotspot/share/oops/methodData.hpp index 196537359b5..45529618afb 100644 --- a/src/hotspot/share/oops/methodData.hpp +++ b/src/hotspot/share/oops/methodData.hpp @@ -1751,7 +1751,7 @@ public: virtual bool is_ArgInfoData() const { return true; } - int number_of_args() const { + int size_of_args() const { return array_len(); } diff --git a/src/hotspot/share/oops/methodData.inline.hpp b/src/hotspot/share/oops/methodData.inline.hpp index dee14d49253..b417ba867fc 100644 --- a/src/hotspot/share/oops/methodData.inline.hpp +++ b/src/hotspot/share/oops/methodData.inline.hpp @@ -59,7 +59,7 @@ inline uint MethodData::arg_modified(int a) { MutexLocker ml(extra_data_lock(), Mutex::_no_safepoint_check_flag); ArgInfoData* aid = arg_info(); assert(aid != nullptr, "arg_info must be not null"); - assert(a >= 0 && a < aid->number_of_args(), "valid argument number"); + assert(a >= 0 && a < aid->size_of_args(), "valid argument number"); return aid->arg_modified(a); } @@ -68,7 +68,7 @@ inline void MethodData::set_arg_modified(int a, uint v) { MutexLocker ml(extra_data_lock(), Mutex::_no_safepoint_check_flag); ArgInfoData* aid = arg_info(); assert(aid != nullptr, "arg_info must be not null"); - assert(a >= 0 && a < aid->number_of_args(), "valid argument number"); + assert(a >= 0 && a < aid->size_of_args(), "valid argument number"); aid->set_arg_modified(a, v); } diff --git a/src/hotspot/share/oops/objArrayKlass.cpp b/src/hotspot/share/oops/objArrayKlass.cpp index 2bbe898adbd..fccd02f14e6 100644 --- a/src/hotspot/share/oops/objArrayKlass.cpp +++ b/src/hotspot/share/oops/objArrayKlass.cpp @@ -120,8 +120,7 @@ ObjArrayKlass* ObjArrayKlass::allocate_objArray_klass(ClassLoaderData* loader_da return oak; } -ObjArrayKlass::ObjArrayKlass(int n, Klass* element_klass, Symbol* name) : ArrayKlass(name, Kind) { - set_dimension(n); +ObjArrayKlass::ObjArrayKlass(int n, Klass* element_klass, Symbol* name) : ArrayKlass(n, name, Kind) { set_element_klass(element_klass); Klass* bk; diff --git a/src/hotspot/share/oops/objLayout.cpp b/src/hotspot/share/oops/objLayout.cpp index b8cd8249da1..2c426a7ddff 100644 --- a/src/hotspot/share/oops/objLayout.cpp +++ b/src/hotspot/share/oops/objLayout.cpp @@ -38,21 +38,17 @@ void ObjLayout::initialize() { _klass_mode = Compact; _oop_base_offset_in_bytes = sizeof(markWord); _oop_has_klass_gap = false; - } else if (UseCompressedClassPointers) { + } else { _klass_mode = Compressed; _oop_base_offset_in_bytes = sizeof(markWord) + sizeof(narrowKlass); _oop_has_klass_gap = true; - } else { - _klass_mode = Uncompressed; - _oop_base_offset_in_bytes = sizeof(markWord) + sizeof(Klass*); - _oop_has_klass_gap = false; } #else assert(_klass_mode == Undefined, "ObjLayout initialized twice"); assert(!UseCompactObjectHeaders, "COH unsupported on 32-bit"); - // We support +-UseCompressedClassPointers on 32-bit, but the layout + // We support narrow Klass pointers on 32-bit, but the layout // is exactly the same as it was with uncompressed klass pointers - _klass_mode = UseCompressedClassPointers ? Compressed : Uncompressed; + _klass_mode = Compressed; _oop_base_offset_in_bytes = sizeof(markWord) + sizeof(Klass*); _oop_has_klass_gap = false; #endif diff --git a/src/hotspot/share/oops/objLayout.hpp b/src/hotspot/share/oops/objLayout.hpp index e434524d4b0..37ed0b7a532 100644 --- a/src/hotspot/share/oops/objLayout.hpp +++ b/src/hotspot/share/oops/objLayout.hpp @@ -27,8 +27,8 @@ /* * This class helps to avoid loading more than one flag in some - * operations that require checking UseCompressedClassPointers, - * UseCompactObjectHeaders and possibly more. + * operations that require checking UseCompactObjectHeaders and - in the future - + * possibly more. * * This is important on some performance critical paths, e.g. where * the Klass* is accessed frequently, especially by GC oop iterators @@ -37,12 +37,10 @@ class ObjLayout { public: enum Mode { - // +UseCompactObjectHeaders (implies +UseCompressedClassPointers) + // +UseCompactObjectHeaders Compact, - // +UseCompressedClassPointers (-UseCompactObjectHeaders) + // -UseCompactObjectHeaders (compressed Klass pointers) Compressed, - // -UseCompressedClassPointers (-UseCompactObjectHeaders) - Uncompressed, // Not yet initialized Undefined }; diff --git a/src/hotspot/share/oops/objLayout.inline.hpp b/src/hotspot/share/oops/objLayout.inline.hpp index 6aa9e39ce28..adad490378d 100644 --- a/src/hotspot/share/oops/objLayout.inline.hpp +++ b/src/hotspot/share/oops/objLayout.inline.hpp @@ -32,10 +32,8 @@ inline ObjLayout::Mode ObjLayout::klass_mode() { assert(_klass_mode != Undefined, "KlassMode not yet initialized"); if (UseCompactObjectHeaders) { assert(_klass_mode == Compact, "Klass mode does not match flags"); - } else if (UseCompressedClassPointers) { - assert(_klass_mode == Compressed, "Klass mode does not match flags"); } else { - assert(_klass_mode == Uncompressed, "Klass mode does not match flags"); + assert(_klass_mode == Compressed, "Klass mode does not match flags"); } #endif return _klass_mode; diff --git a/src/hotspot/share/oops/oop.cpp b/src/hotspot/share/oops/oop.cpp index 5f453241c3d..415732af4f6 100644 --- a/src/hotspot/share/oops/oop.cpp +++ b/src/hotspot/share/oops/oop.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -152,8 +152,7 @@ bool oopDesc::is_typeArray_noinline() const { return is_typeArray(); } #if INCLUDE_CDS_JAVA_HEAP void oopDesc::set_narrow_klass(narrowKlass nk) { assert(CDSConfig::is_dumping_heap(), "Used by CDS only. Do not abuse!"); - assert(UseCompressedClassPointers, "must be"); - _metadata._compressed_klass = nk; + _compressed_klass = nk; } #endif diff --git a/src/hotspot/share/oops/oop.hpp b/src/hotspot/share/oops/oop.hpp index 0dc6590750e..d6cc71a60d8 100644 --- a/src/hotspot/share/oops/oop.hpp +++ b/src/hotspot/share/oops/oop.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -49,10 +49,7 @@ class oopDesc { friend class JVMCIVMStructs; private: volatile markWord _mark; - union _metadata { - Klass* _klass; - narrowKlass _compressed_klass; - } _metadata; + narrowKlass _compressed_klass; // There may be ordering constraints on the initialization of fields that // make use of the C++ copy/assign incorrect. @@ -338,7 +335,7 @@ class oopDesc { } else #endif { - return (int)offset_of(oopDesc, _metadata._klass); + return (int)offset_of(oopDesc, _compressed_klass); } } static int klass_gap_offset_in_bytes() { diff --git a/src/hotspot/share/oops/oop.inline.hpp b/src/hotspot/share/oops/oop.inline.hpp index b445eae933b..d5cb80e1122 100644 --- a/src/hotspot/share/oops/oop.inline.hpp +++ b/src/hotspot/share/oops/oop.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -99,9 +99,9 @@ Klass* oopDesc::klass() const { case ObjLayout::Compact: return mark().klass(); case ObjLayout::Compressed: - return CompressedKlassPointers::decode_not_null(_metadata._compressed_klass); + return CompressedKlassPointers::decode_not_null(_compressed_klass); default: - return _metadata._klass; + ShouldNotReachHere(); } } @@ -110,9 +110,9 @@ Klass* oopDesc::klass_or_null() const { case ObjLayout::Compact: return mark().klass_or_null(); case ObjLayout::Compressed: - return CompressedKlassPointers::decode(_metadata._compressed_klass); + return CompressedKlassPointers::decode(_compressed_klass); default: - return _metadata._klass; + ShouldNotReachHere(); } } @@ -121,11 +121,11 @@ Klass* oopDesc::klass_or_null_acquire() const { case ObjLayout::Compact: return mark_acquire().klass(); case ObjLayout::Compressed: { - narrowKlass narrow_klass = AtomicAccess::load_acquire(&_metadata._compressed_klass); + narrowKlass narrow_klass = AtomicAccess::load_acquire(&_compressed_klass); return CompressedKlassPointers::decode(narrow_klass); } default: - return AtomicAccess::load_acquire(&_metadata._klass); + ShouldNotReachHere(); } } @@ -134,9 +134,9 @@ Klass* oopDesc::klass_without_asserts() const { case ObjLayout::Compact: return mark().klass_without_asserts(); case ObjLayout::Compressed: - return CompressedKlassPointers::decode_without_asserts(_metadata._compressed_klass); + return CompressedKlassPointers::decode_without_asserts(_compressed_klass); default: - return _metadata._klass; + ShouldNotReachHere(); } } @@ -145,7 +145,7 @@ narrowKlass oopDesc::narrow_klass() const { case ObjLayout::Compact: return mark().narrow_klass(); case ObjLayout::Compressed: - return _metadata._compressed_klass; + return _compressed_klass; default: ShouldNotReachHere(); } @@ -154,23 +154,14 @@ narrowKlass oopDesc::narrow_klass() const { void oopDesc::set_klass(Klass* k) { assert(Universe::is_bootstrapping() || (k != nullptr && k->is_klass()), "incorrect Klass"); assert(!UseCompactObjectHeaders, "don't set Klass* with compact headers"); - if (UseCompressedClassPointers) { - _metadata._compressed_klass = CompressedKlassPointers::encode_not_null(k); - } else { - _metadata._klass = k; - } + _compressed_klass = CompressedKlassPointers::encode_not_null(k); } void oopDesc::release_set_klass(HeapWord* mem, Klass* k) { assert(Universe::is_bootstrapping() || (k != nullptr && k->is_klass()), "incorrect Klass"); assert(!UseCompactObjectHeaders, "don't set Klass* with compact headers"); char* raw_mem = ((char*)mem + klass_offset_in_bytes()); - if (UseCompressedClassPointers) { - AtomicAccess::release_store((narrowKlass*)raw_mem, - CompressedKlassPointers::encode_not_null(k)); - } else { - AtomicAccess::release_store((Klass**)raw_mem, k); - } + AtomicAccess::release_store((narrowKlass*)raw_mem, CompressedKlassPointers::encode_not_null(k)); } void oopDesc::set_klass_gap(HeapWord* mem, int v) { diff --git a/src/hotspot/share/oops/resolvedFieldEntry.cpp b/src/hotspot/share/oops/resolvedFieldEntry.cpp index 49e9115ca9a..122ecf092d8 100644 --- a/src/hotspot/share/oops/resolvedFieldEntry.cpp +++ b/src/hotspot/share/oops/resolvedFieldEntry.cpp @@ -77,10 +77,13 @@ void ResolvedFieldEntry::assert_is_valid() const { "field offset out of range %d >= %d", field_offset(), instanceOopDesc::base_offset_in_bytes()); assert(as_BasicType((TosState)tos_state()) != T_ILLEGAL, "tos_state is ILLEGAL"); assert(_flags < (1 << (max_flag_shift + 1)), "flags are too large %d", _flags); - assert((get_code() == 0 || get_code() == Bytecodes::_getstatic || get_code() == Bytecodes::_getfield), - "invalid get bytecode %d", get_code()); - assert((put_code() == 0 || put_code() == Bytecodes::_putstatic || put_code() == Bytecodes::_putfield), - "invalid put bytecode %d", put_code()); + + // Read each bytecode once. + volatile Bytecodes::Code g = (Bytecodes::Code)get_code(); + assert(g == 0 || g == Bytecodes::_getstatic || g == Bytecodes::_getfield, "invalid get bytecode %d", g); + + volatile Bytecodes::Code p = (Bytecodes::Code)put_code(); + assert(p == 0 || p == Bytecodes::_putstatic || p == Bytecodes::_putfield, "invalid put bytecode %d", p); } #endif diff --git a/src/hotspot/share/oops/trainingData.cpp b/src/hotspot/share/oops/trainingData.cpp index f52c22ad38a..7976da35374 100644 --- a/src/hotspot/share/oops/trainingData.cpp +++ b/src/hotspot/share/oops/trainingData.cpp @@ -118,10 +118,23 @@ void TrainingData::verify() { } } +static bool is_excluded(InstanceKlass* k) { + if (!k->is_loaded() || k->has_been_redefined()) { + return true; + } + if (CDSConfig::is_at_aot_safepoint()) { + // Check for AOT exclusion only at AOT safe point. + return SystemDictionaryShared::should_be_excluded(k) || !SystemDictionaryShared::is_builtin_loader(k->class_loader_data()); + } + return false; +} + MethodTrainingData* MethodTrainingData::make(const methodHandle& method, bool null_if_not_found, bool use_cache) { - MethodTrainingData* mtd = nullptr; if (!have_data() && !need_data()) { - return mtd; + return nullptr; + } + if (is_excluded(method->method_holder())) { + return nullptr; } // Try grabbing the cached value first. // Cache value is stored in MethodCounters and the following are the @@ -133,6 +146,7 @@ MethodTrainingData* MethodTrainingData::make(const methodHandle& method, bool nu // i.e. null_if_no_found == true, then just return a null. // 3. Cache value is not null. // Return it, the value of training_data_lookup_failed doesn't matter. + MethodTrainingData* mtd = nullptr; MethodCounters* mcs = method->method_counters(); if (mcs != nullptr) { mtd = mcs->method_training_data(); @@ -175,6 +189,7 @@ MethodTrainingData* MethodTrainingData::make(const methodHandle& method, bool nu return nullptr; // allocation failure } td = training_data_set()->install(mtd); + assert(!is_excluded(method->method_holder()), "Should not be excluded"); assert(td == mtd, ""); } else { mtd = nullptr; @@ -376,6 +391,9 @@ void CompileTrainingData::prepare(Visitor& visitor) { } KlassTrainingData* KlassTrainingData::make(InstanceKlass* holder, bool null_if_not_found) { + if (is_excluded(holder)) { + return nullptr; + } Key key(holder); TrainingData* td = CDS_ONLY(have_data() ? lookup_archived_training_data(&key) :) nullptr; KlassTrainingData* ktd = nullptr; @@ -401,6 +419,7 @@ KlassTrainingData* KlassTrainingData::make(InstanceKlass* holder, bool null_if_n } td = training_data_set()->install(ktd); assert(ktd == td, ""); + assert(!is_excluded(holder), "Should not be excluded"); } else { ktd = td->as_KlassTrainingData(); guarantee(ktd->holder() != nullptr, "null holder"); @@ -543,18 +562,24 @@ void TrainingData::cleanup_training_data() { } } +void TrainingData::cleanup_after_redefinition() { + if (need_data()) { + TrainingDataLocker l; + ResourceMark rm; + Visitor visitor(training_data_set()->size()); + training_data_set()->iterate([&](TrainingData* td) { + td->cleanup(visitor); + }); + } +} + void KlassTrainingData::cleanup(Visitor& visitor) { if (visitor.is_visited(this)) { return; } visitor.visit(this); if (has_holder()) { - bool is_excluded = !holder()->is_loaded(); - if (CDSConfig::is_at_aot_safepoint()) { - // Check for AOT exclusion only at AOT safe point. - is_excluded |= SystemDictionaryShared::should_be_excluded(holder()); - } - if (is_excluded) { + if (is_excluded(holder())) { ResourceMark rm; log_debug(aot, training)("Cleanup KTD %s", name()->as_klass_external_name()); _holder = nullptr; @@ -572,12 +597,8 @@ void MethodTrainingData::cleanup(Visitor& visitor) { } visitor.visit(this); if (has_holder()) { - if (CDSConfig::is_at_aot_safepoint() && SystemDictionaryShared::should_be_excluded(holder()->method_holder())) { - // Check for AOT exclusion only at AOT safe point. + if (is_excluded(holder()->method_holder())) { log_debug(aot, training)("Cleanup MTD %s::%s", name()->as_klass_external_name(), signature()->as_utf8()); - if (_final_profile != nullptr && _final_profile->method() != _holder) { - log_warning(aot, training)("Stale MDO for %s::%s", name()->as_klass_external_name(), signature()->as_utf8()); - } _final_profile = nullptr; _final_counters = nullptr; _holder = nullptr; @@ -593,6 +614,7 @@ void MethodTrainingData::cleanup(Visitor& visitor) { } void KlassTrainingData::verify() { + guarantee(!has_holder() || !is_excluded(holder()), "Bad holder"); for (int i = 0; i < comp_dep_count(); i++) { CompileTrainingData* ctd = comp_dep(i); if (!ctd->_init_deps.contains(this)) { @@ -604,6 +626,7 @@ void KlassTrainingData::verify() { } void MethodTrainingData::verify(bool verify_dep_counter) { + guarantee(!has_holder() || !is_excluded(holder()->method_holder()), "Bad holder"); iterate_compiles([&](CompileTrainingData* ctd) { ctd->verify(verify_dep_counter); }); diff --git a/src/hotspot/share/oops/trainingData.hpp b/src/hotspot/share/oops/trainingData.hpp index c549004e76e..a6decdce7f0 100644 --- a/src/hotspot/share/oops/trainingData.hpp +++ b/src/hotspot/share/oops/trainingData.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -217,11 +217,7 @@ public: return *prior; } template - void iterate(const Function& fn) const { // lambda enabled API - iterate(const_cast(fn)); - } - template - void iterate(Function& fn) const { // lambda enabled API + void iterate(Function fn) const { // lambda enabled API return _table.iterate_all([&](const TrainingData::Key* k, TrainingData* td) { fn(td); }); } int size() const { return _table.number_of_entries(); } @@ -304,13 +300,10 @@ private: } template - static void iterate(const Function& fn) { iterate(const_cast(fn)); } - - template - static void iterate(Function& fn) { // lambda enabled API + static void iterate(Function fn) { // lambda enabled API TrainingDataLocker l; if (have_data()) { - archived_training_data_dictionary()->iterate(fn); + archived_training_data_dictionary()->iterate_all(fn); } if (need_data()) { training_data_set()->iterate(fn); @@ -431,6 +424,8 @@ private: } return nullptr; } + + static void cleanup_after_redefinition(); }; // Training data that is associated with an InstanceKlass diff --git a/src/hotspot/share/oops/typeArrayKlass.cpp b/src/hotspot/share/oops/typeArrayKlass.cpp index bdf37c7db49..7dbea9ce475 100644 --- a/src/hotspot/share/oops/typeArrayKlass.cpp +++ b/src/hotspot/share/oops/typeArrayKlass.cpp @@ -78,7 +78,7 @@ u2 TypeArrayKlass::compute_modifier_flags() const { return JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC; } -TypeArrayKlass::TypeArrayKlass(BasicType type, Symbol* name) : ArrayKlass(name, Kind) { +TypeArrayKlass::TypeArrayKlass(BasicType type, Symbol* name) : ArrayKlass(1, name, Kind) { set_layout_helper(array_layout_helper(type)); assert(is_array_klass(), "sanity"); assert(is_typeArray_klass(), "sanity"); diff --git a/src/hotspot/share/opto/addnode.cpp b/src/hotspot/share/opto/addnode.cpp index e04da430ef0..5520e4ae977 100644 --- a/src/hotspot/share/opto/addnode.cpp +++ b/src/hotspot/share/opto/addnode.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -816,7 +816,7 @@ Node *AddPNode::Ideal(PhaseGVN *phase, bool can_reshape) { offset = phase->MakeConX(t2->get_con() + t12->get_con()); } else { // Else move the constant to the right. ((A+con)+B) into ((A+B)+con) - address = phase->transform(new AddPNode(in(Base),addp->in(Address),in(Offset))); + address = phase->transform(AddPNode::make_with_base(in(Base), addp->in(Address), in(Offset))); offset = addp->in(Offset); } set_req_X(Address, address, phase); @@ -838,11 +838,11 @@ Node *AddPNode::Ideal(PhaseGVN *phase, bool can_reshape) { // Convert: (ptr + (offset+con)) into (ptr+offset)+con. // The idea is to merge array_base+scaled_index groups together, // and only have different constant offsets from the same base. - const Node *add = in(Offset); - if( add->Opcode() == Op_AddX && add->in(1) != add ) { - const Type *t22 = phase->type( add->in(2) ); - if( t22->singleton() && (t22 != Type::TOP) ) { // Right input is an add of a constant? - set_req(Address, phase->transform(new AddPNode(in(Base),in(Address),add->in(1)))); + const Node* add = in(Offset); + if (add->Opcode() == Op_AddX && add->in(1) != add) { + const Type* t22 = phase->type(add->in(2)); + if (t22->singleton() && (t22 != Type::TOP)) { // Right input is an add of a constant? + set_req(Address, phase->transform(AddPNode::make_with_base(in(Base), in(Address), add->in(1)))); set_req_X(Offset, add->in(2), phase); // puts add on igvn worklist if needed return this; // Made progress } diff --git a/src/hotspot/share/opto/addnode.hpp b/src/hotspot/share/opto/addnode.hpp index 6128de00efb..793eff8dd5d 100644 --- a/src/hotspot/share/opto/addnode.hpp +++ b/src/hotspot/share/opto/addnode.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -221,17 +221,19 @@ public: // So not really an AddNode. Lives here, because people associate it with // an add. class AddPNode : public Node { +private: + AddPNode(Node* base, Node* ptr, Node* off) : Node(nullptr, base, ptr, off) { + init_class_id(Class_AddP); + assert((ptr->bottom_type() == Type::TOP) || + ((base == Compile::current()->top()) == (ptr->bottom_type()->make_ptr()->isa_oopptr() == nullptr)), + "base input only needed for heap addresses"); + } + public: enum { Control, // When is it safe to do this add? Base, // Base oop, for GC purposes Address, // Actually address, derived from base Offset } ; // Offset added to address - AddPNode(Node *base, Node *ptr, Node *off) : Node(nullptr,base,ptr,off) { - init_class_id(Class_AddP); - assert((ptr->bottom_type() == Type::TOP) || - ((base == Compile::current()->top()) == (ptr->bottom_type()->make_ptr()->isa_oopptr() == nullptr)), - "base input only needed for heap addresses"); - } virtual int Opcode() const; virtual Node* Identity(PhaseGVN* phase); virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); @@ -243,6 +245,18 @@ public: // second return value: intptr_t& offset); + static AddPNode* make_with_base(Node* base, Node* ptr, Node* offset) { + return new AddPNode(base, ptr, offset); + } + + static AddPNode* make_with_base(Node* base, Node* offset) { + return make_with_base(base, base, offset); + } + + static AddPNode* make_off_heap(Node* ptr, Node* offset) { + return make_with_base(Compile::current()->top(), ptr, offset); + } + // Collect the AddP offset values into the elements array, giving up // if there are more than length. int unpack_offsets(Node* elements[], int length) const; diff --git a/src/hotspot/share/opto/arraycopynode.cpp b/src/hotspot/share/opto/arraycopynode.cpp index 4ee6107fe54..2f64482f55b 100644 --- a/src/hotspot/share/opto/arraycopynode.cpp +++ b/src/hotspot/share/opto/arraycopynode.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -226,8 +226,8 @@ Node* ArrayCopyNode::try_clone_instance(PhaseGVN *phase, bool can_reshape, int c ciField* field = ik->nonstatic_field_at(i); const TypePtr* adr_type = phase->C->alias_type(field)->adr_type(); Node* off = phase->MakeConX(field->offset_in_bytes()); - Node* next_src = phase->transform(new AddPNode(base_src,base_src,off)); - Node* next_dest = phase->transform(new AddPNode(base_dest,base_dest,off)); + Node* next_src = phase->transform(AddPNode::make_with_base(base_src, off)); + Node* next_dest = phase->transform(AddPNode::make_with_base(base_dest, off)); assert(phase->C->get_alias_index(adr_type) == phase->C->get_alias_index(phase->type(next_src)->isa_ptr()), "slice of address and input slice don't match"); assert(phase->C->get_alias_index(adr_type) == phase->C->get_alias_index(phase->type(next_dest)->isa_ptr()), @@ -258,6 +258,19 @@ Node* ArrayCopyNode::try_clone_instance(PhaseGVN *phase, bool can_reshape, int c return mem; } +// We may have narrowed the type of base because this runs with PhaseIterGVN::_delay_transform true, explicitly +// update the type of the AddP so it's consistent with its base and load() picks the right memory slice. +Node* ArrayCopyNode::make_and_transform_addp(PhaseGVN* phase, Node* base, Node* offset) { + return make_and_transform_addp(phase, base, base, offset); +} + +Node* ArrayCopyNode::make_and_transform_addp(PhaseGVN* phase, Node* base, Node* ptr, Node* offset) { + assert(phase->is_IterGVN() == nullptr || phase->is_IterGVN()->delay_transform(), "helper method when delay transform is set"); + Node* addp = phase->transform(AddPNode::make_with_base(base, ptr, offset)); + phase->set_type(addp, addp->Value(phase)); + return addp; +} + bool ArrayCopyNode::prepare_array_copy(PhaseGVN *phase, bool can_reshape, Node*& adr_src, Node*& base_src, @@ -332,12 +345,11 @@ bool ArrayCopyNode::prepare_array_copy(PhaseGVN *phase, bool can_reshape, Node* dest_scale = phase->transform(new LShiftXNode(dest_offset, phase->intcon(shift))); - adr_src = phase->transform(new AddPNode(base_src, base_src, src_scale)); - adr_dest = phase->transform(new AddPNode(base_dest, base_dest, dest_scale)); - - adr_src = phase->transform(new AddPNode(base_src, adr_src, phase->MakeConX(header))); - adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, phase->MakeConX(header))); + adr_src = make_and_transform_addp(phase, base_src, src_scale); + adr_dest = make_and_transform_addp(phase, base_dest, dest_scale); + adr_src = make_and_transform_addp(phase, base_src, adr_src, phase->MakeConX(header)); + adr_dest = make_and_transform_addp(phase, base_dest, adr_dest, phase->MakeConX(header)); copy_type = dest_elem; } else { assert(ary_src != nullptr, "should be a clone"); @@ -355,8 +367,8 @@ bool ArrayCopyNode::prepare_array_copy(PhaseGVN *phase, bool can_reshape, return false; } - adr_src = phase->transform(new AddPNode(base_src, base_src, src_offset)); - adr_dest = phase->transform(new AddPNode(base_dest, base_dest, dest_offset)); + adr_src = make_and_transform_addp(phase, base_src, src_offset); + adr_dest = make_and_transform_addp(phase, base_dest, dest_offset); // The address is offsetted to an aligned address where a raw copy would start. // If the clone copy is decomposed into load-stores - the address is adjusted to @@ -366,8 +378,8 @@ bool ArrayCopyNode::prepare_array_copy(PhaseGVN *phase, bool can_reshape, int diff = arrayOopDesc::base_offset_in_bytes(elem) - offset; assert(diff >= 0, "clone should not start after 1st array element"); if (diff > 0) { - adr_src = phase->transform(new AddPNode(base_src, adr_src, phase->MakeConX(diff))); - adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, phase->MakeConX(diff))); + adr_src = make_and_transform_addp(phase, base_src, adr_src, phase->MakeConX(diff)); + adr_dest = make_and_transform_addp(phase, base_dest, adr_dest, phase->MakeConX(diff)); } copy_type = elem; value_type = ary_src->elem(); @@ -383,6 +395,10 @@ const TypePtr* ArrayCopyNode::get_address_type(PhaseGVN* phase, const TypePtr* a return atp->add_offset(Type::OffsetBot); } +const TypePtr* ArrayCopyNode::get_src_adr_type(PhaseGVN* phase) const { + return get_address_type(phase, _src_type, in(Src)); +} + void ArrayCopyNode::array_copy_test_overlap(PhaseGVN *phase, bool can_reshape, bool disjoint_bases, int count, Node*& forward_ctl, Node*& backward_ctl) { Node* ctl = in(TypeFunc::Control); if (!disjoint_bases && count > 1) { @@ -425,8 +441,10 @@ Node* ArrayCopyNode::array_copy_forward(PhaseGVN *phase, store(bs, phase, forward_ctl, mm, adr_dest, atp_dest, v, value_type, copy_type); for (int i = 1; i < count; i++) { Node* off = phase->MakeConX(type2aelembytes(copy_type) * i); - Node* next_src = phase->transform(new AddPNode(base_src,adr_src,off)); - Node* next_dest = phase->transform(new AddPNode(base_dest,adr_dest,off)); + Node* next_src = make_and_transform_addp(phase, base_src,adr_src,off); + Node* next_dest = make_and_transform_addp(phase, base_dest,adr_dest,off); + // Same as above + phase->set_type(next_dest, next_dest->Value(phase)); v = load(bs, phase, forward_ctl, mm, next_src, atp_src, value_type, copy_type); store(bs, phase, forward_ctl, mm, next_dest, atp_dest, v, value_type, copy_type); } @@ -463,8 +481,8 @@ Node* ArrayCopyNode::array_copy_backward(PhaseGVN *phase, if (count > 0) { for (int i = count-1; i >= 1; i--) { Node* off = phase->MakeConX(type2aelembytes(copy_type) * i); - Node* next_src = phase->transform(new AddPNode(base_src,adr_src,off)); - Node* next_dest = phase->transform(new AddPNode(base_dest,adr_dest,off)); + Node* next_src = make_and_transform_addp(phase, base_src,adr_src,off); + Node* next_dest = make_and_transform_addp(phase, base_dest,adr_dest,off); Node* v = load(bs, phase, backward_ctl, mm, next_src, atp_src, value_type, copy_type); store(bs, phase, backward_ctl, mm, next_dest, atp_dest, v, value_type, copy_type); } @@ -592,25 +610,42 @@ Node *ArrayCopyNode::Ideal(PhaseGVN *phase, bool can_reshape) { const Type* value_type = nullptr; bool disjoint_bases = false; + Node* src = in(ArrayCopyNode::Src); + Node* dest = in(ArrayCopyNode::Dest); + // EA may have moved an input to a new slice. EA stores the new address types in the ArrayCopy node itself + // (_src_type/_dest_type). phase->type(src) and _src_type or phase->type(dest) and _dest_type may be different + // when this transformation runs if igvn hasn't had a chance to propagate the new types yet. Make sure the new + // types are taken into account so new Load/Store nodes are created on the right slice. + const TypePtr* atp_src = get_address_type(phase, _src_type, src); + const TypePtr* atp_dest = get_address_type(phase, _dest_type, dest); + phase->set_type(src, phase->type(src)->join_speculative(atp_src)); + phase->set_type(dest, phase->type(dest)->join_speculative(atp_dest)); + + // Control flow is going to be created, it's easier to do with _delay_transform set to true. + + // prepare_array_copy() doesn't build control flow, but it creates AddP nodes. The src/dest type possibly gets + // narrowed above. If a newly created AddP node is commoned with a pre-existing one, then the type narrowing is lost. + // Setting _delay_transform before prepare_array_copy() guarantees this doesn't happen. + if (can_reshape) { + assert(!phase->is_IterGVN()->delay_transform(), "cannot delay transforms"); + phase->is_IterGVN()->set_delay_transform(true); + } + if (!prepare_array_copy(phase, can_reshape, adr_src, base_src, adr_dest, base_dest, copy_type, value_type, disjoint_bases)) { assert(adr_src == nullptr, "no node can be left behind"); assert(adr_dest == nullptr, "no node can be left behind"); + if (can_reshape) { + assert(phase->is_IterGVN()->delay_transform(), "cannot delay transforms"); + phase->is_IterGVN()->set_delay_transform(false); + } + return nullptr; } - Node* src = in(ArrayCopyNode::Src); - Node* dest = in(ArrayCopyNode::Dest); - const TypePtr* atp_src = get_address_type(phase, _src_type, src); - const TypePtr* atp_dest = get_address_type(phase, _dest_type, dest); Node* in_mem = in(TypeFunc::Memory); - if (can_reshape) { - assert(!phase->is_IterGVN()->delay_transform(), "cannot delay transforms"); - phase->is_IterGVN()->set_delay_transform(true); - } - Node* backward_ctl = phase->C->top(); Node* forward_ctl = phase->C->top(); array_copy_test_overlap(phase, can_reshape, disjoint_bases, count, forward_ctl, backward_ctl); @@ -670,7 +705,7 @@ Node *ArrayCopyNode::Ideal(PhaseGVN *phase, bool can_reshape) { return mem; } -bool ArrayCopyNode::may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) { +bool ArrayCopyNode::may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) const { Node* dest = in(ArrayCopyNode::Dest); if (dest->is_top()) { return false; diff --git a/src/hotspot/share/opto/arraycopynode.hpp b/src/hotspot/share/opto/arraycopynode.hpp index 83c085fd5db..aa62ee05cd0 100644 --- a/src/hotspot/share/opto/arraycopynode.hpp +++ b/src/hotspot/share/opto/arraycopynode.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -95,6 +95,7 @@ public: _arraycopy_type_Type = TypeFunc::make(domain, range); } + const TypePtr* get_src_adr_type(PhaseGVN* phase) const; private: ArrayCopyNode(Compile* C, bool alloc_tightly_coupled, bool has_negative_length_guard); @@ -103,6 +104,10 @@ private: static const TypePtr* get_address_type(PhaseGVN* phase, const TypePtr* atp, Node* n); Node* try_clone_instance(PhaseGVN *phase, bool can_reshape, int count); + + Node* make_and_transform_addp(PhaseGVN* phase, Node* base, Node* offset); + Node* make_and_transform_addp(PhaseGVN* phase, Node* base, Node* ptr, Node* offset); + bool prepare_array_copy(PhaseGVN *phase, bool can_reshape, Node*& adr_src, Node*& base_src, Node*& adr_dest, Node*& base_dest, BasicType& copy_type, const Type*& value_type, bool& disjoint_bases); @@ -183,7 +188,7 @@ public: virtual bool guaranteed_safepoint() { return false; } virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); - virtual bool may_modify(const TypeOopPtr* t_oop, PhaseValues* phase); + virtual bool may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) const; bool is_alloc_tightly_coupled() const { return _alloc_tightly_coupled; } diff --git a/src/hotspot/share/opto/block.cpp b/src/hotspot/share/opto/block.cpp index 7d3d4ec16f4..a93e2e43a29 100644 --- a/src/hotspot/share/opto/block.cpp +++ b/src/hotspot/share/opto/block.cpp @@ -179,9 +179,11 @@ int Block::is_Empty() const { // Ideal nodes (except BoxLock) are allowable in empty blocks: skip them. Only // Mach and BoxLock nodes turn directly into code via emit(). + // Keep ReachabilityFence for diagnostic purposes. while ((end_idx > 0) && !get_node(end_idx)->is_Mach() && - !get_node(end_idx)->is_BoxLock()) { + !get_node(end_idx)->is_BoxLock() && + !get_node(end_idx)->is_ReachabilityFence()) { end_idx--; } diff --git a/src/hotspot/share/opto/buildOopMap.cpp b/src/hotspot/share/opto/buildOopMap.cpp index 675113163e8..e3a94f78d9c 100644 --- a/src/hotspot/share/opto/buildOopMap.cpp +++ b/src/hotspot/share/opto/buildOopMap.cpp @@ -377,6 +377,9 @@ OopMap *OopFlow::build_oop_map( Node *n, int max_reg, PhaseRegAlloc *regalloc, i worklist.push(u); } } + if (m->is_SpillCopy()) { + worklist.push(m->in(1)); + } } } #endif diff --git a/src/hotspot/share/opto/c2_globals.hpp b/src/hotspot/share/opto/c2_globals.hpp index 1662f808286..dacc8ce9c26 100644 --- a/src/hotspot/share/opto/c2_globals.hpp +++ b/src/hotspot/share/opto/c2_globals.hpp @@ -76,6 +76,17 @@ develop(bool, StressBailout, false, \ "Perform bailouts randomly at C2 failing() checks") \ \ + product(bool, OptimizeReachabilityFences, true, DIAGNOSTIC, \ + "Optimize reachability fences " \ + "(leave reachability fence nodes intact when turned off)") \ + \ + product(bool, PreserveReachabilityFencesOnConstants, false, DIAGNOSTIC, \ + "Keep reachability fences on compile-time constants") \ + \ + product(bool, StressReachabilityFences, false, DIAGNOSTIC, \ + "Aggressively insert reachability fences " \ + "for all oop method arguments") \ + \ develop(uint, StressBailoutMean, 100000, \ "The expected number of failing() checks made until " \ "a random bailout.") \ @@ -249,6 +260,9 @@ develop(bool, TraceLoopOpts, false, \ "Trace executed loop optimizations") \ \ + develop(bool, TraceSplitIf, false, \ + "Trace Split-If optimization") \ + \ develop(bool, TraceLoopLimitCheck, false, \ "Trace generation of loop limits checks") \ \ @@ -334,6 +348,15 @@ product(bool, PartialPeelLoop, true, \ "Partial peel (rotate) loops") \ \ + product(uint, LoopPeeling, 1, DIAGNOSTIC, \ + "Control loop peeling optimization: " \ + "0 = always disable loop peeling, " \ + "1 = enable loop peeling (default), " \ + "2 = disable loop peeling as a standalone optimization but " \ + "allow it as a helper to other loop optimizations like removing " \ + "empty loops") \ + range(0, 2) \ + \ product(intx, PartialPeelNewPhiDelta, 0, \ "Additional phis that can be created by partial peeling") \ range(0, max_jint) \ @@ -693,6 +716,10 @@ develop(bool, TraceIterativeGVN, false, \ "Print progress during Iterative Global Value Numbering") \ \ + develop(bool, UseDeepIGVNRevisit, true, \ + "Re-process nodes that could benefit from a deep revisit after " \ + "the IGVN worklist drains") \ + \ develop(uint, VerifyIterativeGVN, 0, \ "Verify Iterative Global Value Numbering =FEDCBA, with:" \ " F: verify Node::Ideal does not return nullptr if the node" \ @@ -895,6 +922,47 @@ \ develop(bool, StressLoopPeeling, false, \ "Randomize loop peeling decision") \ + \ + develop(bool, StressCountedLoop, false, \ + "Randomly delay conversion to counted loops") \ + \ + product(bool, HotCodeHeap, false, EXPERIMENTAL, \ + "Enable the code heap for hot C2 nmethods") \ + \ + product(double, HotCodeSamplePercent, 80, EXPERIMENTAL, \ + "Minimum percentage of profiling samples that must be in " \ + "the MethodHot heap before stopping hot code collection") \ + range(0, 100) \ + \ + product(double, HotCodeStablePercent, 5, EXPERIMENTAL, \ + "Maximum percentage of newly compiled to total C2 nmethods " \ + "to treat nmethod count as stable. " \ + "Values less than zero disable the stable check") \ + range(-1, DBL_MAX) \ + \ + product(uint, HotCodeIntervalSeconds, 300, EXPERIMENTAL, \ + "Seconds between hot code grouping attempts") \ + range(0, max_juint) \ + \ + product(uint, HotCodeSampleSeconds, 120, EXPERIMENTAL, \ + "Seconds to sample application threads per grouping attempt") \ + range(0, max_juint) \ + \ + product(uint, HotCodeStartupDelaySeconds, 120, EXPERIMENTAL, \ + "Seconds to delay before starting hot code grouping thread") \ + range(0, max_juint) \ + \ + product(uint, HotCodeMinSamplingMs, 5, EXPERIMENTAL, \ + "Minimum sampling interval in milliseconds") \ + range(0, max_juint) \ + \ + product(uint, HotCodeMaxSamplingMs, 15, EXPERIMENTAL, \ + "Maximum sampling interval in milliseconds") \ + range(0, max_juint) \ + \ + product(uint, HotCodeCallLevel, 1, EXPERIMENTAL, \ + "Number of levels of callees to relocate per candidate") \ + range(0, max_juint) \ // end of C2_FLAGS diff --git a/src/hotspot/share/opto/c2compiler.cpp b/src/hotspot/share/opto/c2compiler.cpp index ead1b78cdea..5d170f919c8 100644 --- a/src/hotspot/share/opto/c2compiler.cpp +++ b/src/hotspot/share/opto/c2compiler.cpp @@ -775,6 +775,7 @@ bool C2Compiler::is_intrinsic_supported(vmIntrinsics::ID id) { case vmIntrinsics::_longBitsToDouble: case vmIntrinsics::_Reference_get0: case vmIntrinsics::_Reference_refersTo0: + case vmIntrinsics::_Reference_reachabilityFence: case vmIntrinsics::_PhantomReference_refersTo0: case vmIntrinsics::_Reference_clear0: case vmIntrinsics::_PhantomReference_clear0: diff --git a/src/hotspot/share/opto/callGenerator.cpp b/src/hotspot/share/opto/callGenerator.cpp index 1465da02ac8..49897ca3c17 100644 --- a/src/hotspot/share/opto/callGenerator.cpp +++ b/src/hotspot/share/opto/callGenerator.cpp @@ -611,6 +611,20 @@ void CallGenerator::do_late_inline_helper() { } Compile* C = Compile::current(); + + uint endoff = call->jvms()->endoff(); + if (C->inlining_incrementally()) { + // No reachability edges should be present when incremental inlining takes place. + // Inlining logic doesn't expect any extra edges past debug info and fails with + // an assert in SafePointNode::grow_stack. + assert(endoff == call->req(), "reachability edges not supported"); + } else { + if (call->req() > endoff) { // reachability edges present + assert(OptimizeReachabilityFences, "required"); + return; // keep the original call node as the holder of reachability info + } + } + // Remove inlined methods from Compiler's lists. if (call->is_macro()) { C->remove_macro_node(call); diff --git a/src/hotspot/share/opto/callnode.cpp b/src/hotspot/share/opto/callnode.cpp index 9b3d7b38d15..eb4f506d14f 100644 --- a/src/hotspot/share/opto/callnode.cpp +++ b/src/hotspot/share/opto/callnode.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -43,6 +43,7 @@ #include "opto/rootnode.hpp" #include "opto/runtime.hpp" #include "runtime/sharedRuntime.hpp" +#include "runtime/stubRoutines.hpp" #include "utilities/powerOfTwo.hpp" // Portions of code courtesy of Clifford Click @@ -826,7 +827,7 @@ uint CallNode::match_edge(uint idx) const { // Determine whether the call could modify the field of the specified // instance at the specified offset. // -bool CallNode::may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) { +bool CallNode::may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) const { assert((t_oop != nullptr), "sanity"); if (is_call_to_arraycopystub() && strcmp(_name, "unsafe_arraycopy") != 0) { const TypeTuple* args = _tf->domain(); @@ -844,6 +845,10 @@ bool CallNode::may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) { } } guarantee(dest != nullptr, "Call had only one ptr in, broken IR!"); + if (phase->type(dest)->isa_rawptr()) { + // may happen for an arraycopy that initializes a newly allocated object. Conservatively return true; + return true; + } if (!dest->is_top() && may_modify_arraycopy_helper(phase->type(dest)->is_oopptr(), t_oop, phase)) { return true; } @@ -893,7 +898,7 @@ bool CallNode::may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) { } // Does this call have a direct reference to n other than debug information? -bool CallNode::has_non_debug_use(Node *n) { +bool CallNode::has_non_debug_use(const Node *n) { const TypeTuple * d = tf()->domain(); for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { Node *arg = in(i); @@ -935,7 +940,7 @@ Node *CallNode::result_cast() { } -void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts) const { +void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts, bool allow_handlers) const { projs->fallthrough_proj = nullptr; projs->fallthrough_catchproj = nullptr; projs->fallthrough_ioproj = nullptr; @@ -956,14 +961,13 @@ void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj projs->fallthrough_proj = pn; const Node* cn = pn->unique_ctrl_out_or_null(); if (cn != nullptr && cn->is_Catch()) { - ProjNode *cpn = nullptr; for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) { - cpn = cn->fast_out(k)->as_Proj(); - assert(cpn->is_CatchProj(), "must be a CatchProjNode"); - if (cpn->_con == CatchProjNode::fall_through_index) + CatchProjNode* cpn = cn->fast_out(k)->as_CatchProj(); + assert(allow_handlers || !cpn->is_handler_proj(), "not allowed"); + if (cpn->_con == CatchProjNode::fall_through_index) { + assert(cpn->handler_bci() == CatchProjNode::no_handler_bci, ""); projs->fallthrough_catchproj = cpn; - else { - assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index."); + } else if (!cpn->is_handler_proj()) { projs->catchall_catchproj = cpn; } } @@ -971,15 +975,20 @@ void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj break; } case TypeFunc::I_O: - if (pn->_is_io_use) + if (pn->_is_io_use) { projs->catchall_ioproj = pn; - else + } else { projs->fallthrough_ioproj = pn; + } for (DUIterator j = pn->outs(); pn->has_out(j); j++) { Node* e = pn->out(j); - if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) { - assert(projs->exobj == nullptr, "only one"); - projs->exobj = e; + if (e->Opcode() == Op_CreateEx && e->outcnt() > 0) { + CatchProjNode* ecpn = e->in(0)->isa_CatchProj(); + assert(allow_handlers || ecpn == nullptr || !ecpn->is_handler_proj(), "not allowed"); + if (ecpn != nullptr && ecpn->_con != CatchProjNode::fall_through_index && !ecpn->is_handler_proj()) { + assert(projs->exobj == nullptr, "only one"); + projs->exobj = e; + } } } break; @@ -1367,6 +1376,25 @@ TupleNode* CallLeafPureNode::make_tuple_of_input_state_and_top_return_values(con return tuple; } +CallLeafPureNode* CallLeafPureNode::inline_call_leaf_pure_node(Node* control) const { + Node* top = Compile::current()->top(); + if (control == nullptr) { + control = in(TypeFunc::Control); + } + + CallLeafPureNode* call = new CallLeafPureNode(tf(), entry_point(), _name); + call->init_req(TypeFunc::Control, control); + call->init_req(TypeFunc::I_O, top); + call->init_req(TypeFunc::Memory, top); + call->init_req(TypeFunc::ReturnAdr, top); + call->init_req(TypeFunc::FramePtr, top); + for (unsigned int i = 0; i < tf()->domain()->cnt() - TypeFunc::Parms; i++) { + call->init_req(TypeFunc::Parms + i, in(TypeFunc::Parms + i)); + } + + return call; +} + Node* CallLeafPureNode::Ideal(PhaseGVN* phase, bool can_reshape) { if (is_dead()) { return nullptr; @@ -1585,6 +1613,33 @@ void SafePointNode::disconnect_from_root(PhaseIterGVN *igvn) { } } +void SafePointNode::remove_non_debug_edges(NodeEdgeTempStorage& non_debug_edges) { + assert(non_debug_edges._state == NodeEdgeTempStorage::state_initial, "not processed"); + assert(non_debug_edges.is_empty(), "edges not processed"); + + while (req() > jvms()->endoff()) { + uint last = req() - 1; + non_debug_edges.push(in(last)); + del_req(last); + } + + assert(jvms()->endoff() == req(), "no extra edges past debug info allowed"); + DEBUG_ONLY(non_debug_edges._state = NodeEdgeTempStorage::state_populated); +} + +void SafePointNode::restore_non_debug_edges(NodeEdgeTempStorage& non_debug_edges) { + assert(non_debug_edges._state == NodeEdgeTempStorage::state_populated, "not populated"); + assert(jvms()->endoff() == req(), "no extra edges past debug info allowed"); + + while (!non_debug_edges.is_empty()) { + Node* non_debug_edge = non_debug_edges.pop(); + add_req(non_debug_edge); + } + + assert(non_debug_edges.is_empty(), "edges not processed"); + DEBUG_ONLY(non_debug_edges._state = NodeEdgeTempStorage::state_processed); +} + //============== SafePointScalarObjectNode ============== SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp, Node* alloc, uint first_index, uint depth, uint n_fields) : @@ -1741,8 +1796,8 @@ Node *AllocateNode::make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem) { Node* mark_node = nullptr; if (UseCompactObjectHeaders) { Node* klass_node = in(AllocateNode::KlassNode); - Node* proto_adr = phase->transform(new AddPNode(phase->C->top(), klass_node, phase->MakeConX(in_bytes(Klass::prototype_header_offset())))); - mark_node = LoadNode::make(*phase, control, mem, proto_adr, TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), MemNode::unordered); + Node* proto_adr = phase->transform(AddPNode::make_off_heap(klass_node, phase->MakeConX(in_bytes(Klass::prototype_header_offset())))); + mark_node = LoadNode::make(*phase, control, mem, proto_adr, phase->type(proto_adr)->is_ptr(), TypeX_X, TypeX_X->basic_type(), MemNode::unordered); } else { // For now only enable fast locking for non-array types mark_node = phase->MakeConX(markWord::prototype().value()); @@ -2389,7 +2444,7 @@ void AbstractLockNode::log_lock_optimization(Compile *C, const char * tag, Node* } } -bool CallNode::may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase) { +bool CallNode::may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase) const { if (dest_t->is_known_instance() && t_oop->is_known_instance()) { return dest_t->instance_id() == t_oop->instance_id(); } @@ -2433,3 +2488,157 @@ bool CallNode::may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeO return true; } + +PowDNode::PowDNode(Compile* C, Node* base, Node* exp) + : CallLeafPureNode( + OptoRuntime::Math_DD_D_Type(), + StubRoutines::dpow() != nullptr ? StubRoutines::dpow() : CAST_FROM_FN_PTR(address, SharedRuntime::dpow), + "pow") { + add_flag(Flag_is_macro); + C->add_macro_node(this); + + init_req(TypeFunc::Parms + 0, base); + init_req(TypeFunc::Parms + 1, C->top()); // double slot padding + init_req(TypeFunc::Parms + 2, exp); + init_req(TypeFunc::Parms + 3, C->top()); // double slot padding +} + +const Type* PowDNode::Value(PhaseGVN* phase) const { + const Type* t_base = phase->type(base()); + const Type* t_exp = phase->type(exp()); + + if (t_base == Type::TOP || t_exp == Type::TOP) { + return Type::TOP; + } + + const TypeD* base_con = t_base->isa_double_constant(); + const TypeD* exp_con = t_exp->isa_double_constant(); + const TypeD* result_t = nullptr; + + // constant folding: both inputs are constants + if (base_con != nullptr && exp_con != nullptr) { + result_t = TypeD::make(SharedRuntime::dpow(base_con->getd(), exp_con->getd())); + } + + // Special cases when only the exponent is known: + if (exp_con != nullptr) { + double e = exp_con->getd(); + + // If the second argument is positive or negative zero, then the result is 1.0. + // i.e., pow(x, +/-0.0D) => 1.0 + if (e == 0.0) { // true for both -0.0 and +0.0 + result_t = TypeD::ONE; + } + + // If the second argument is NaN, then the result is NaN. + // i.e., pow(x, NaN) => NaN + if (g_isnan(e)) { + result_t = TypeD::make(NAN); + } + } + + if (result_t != nullptr) { + // We can't simply return a TypeD here, it must be a tuple type to be compatible with call nodes. + const Type** fields = TypeTuple::fields(2); + fields[TypeFunc::Parms + 0] = result_t; + fields[TypeFunc::Parms + 1] = Type::HALF; + return TypeTuple::make(TypeFunc::Parms + 2, fields); + } + + return tf()->range(); +} + +Node* PowDNode::Ideal(PhaseGVN* phase, bool can_reshape) { + if (!can_reshape) { + return nullptr; // wait for igvn + } + + PhaseIterGVN* igvn = phase->is_IterGVN(); + Node* base = this->base(); + Node* exp = this->exp(); + + const Type* t_exp = phase->type(exp); + const TypeD* exp_con = t_exp->isa_double_constant(); + + // Special cases when only the exponent is known: + if (exp_con != nullptr) { + double e = exp_con->getd(); + + // If the second argument is 1.0, then the result is the same as the first argument. + // i.e., pow(x, 1.0) => x + if (e == 1.0) { + return make_tuple_of_input_state_and_result(igvn, base); + } + + // If the second argument is 2.0, then strength reduce to multiplications. + // i.e., pow(x, 2.0) => x * x + if (e == 2.0) { + Node* mul = igvn->transform(new MulDNode(base, base)); + return make_tuple_of_input_state_and_result(igvn, mul); + } + + // If the second argument is 0.5, the strength reduce to square roots. + // i.e., pow(x, 0.5) => sqrt(x) iff x > 0 + if (e == 0.5 && Matcher::match_rule_supported(Op_SqrtD)) { + Node* ctrl = in(TypeFunc::Control); + Node* zero = igvn->zerocon(T_DOUBLE); + + // According to the API specs, pow(-0.0, 0.5) = 0.0 and sqrt(-0.0) = -0.0. + // So pow(-0.0, 0.5) shouldn't be replaced with sqrt(-0.0). + // -0.0/+0.0 are both excluded since floating-point comparison doesn't distinguish -0.0 from +0.0. + Node* cmp = igvn->register_new_node_with_optimizer(new CmpDNode(base, zero)); + Node* test = igvn->register_new_node_with_optimizer(new BoolNode(cmp, BoolTest::le)); + + IfNode* iff = new IfNode(ctrl, test, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN); + igvn->register_new_node_with_optimizer(iff); + Node* if_slow = igvn->register_new_node_with_optimizer(new IfTrueNode(iff)); // x <= 0 + Node* if_fast = igvn->register_new_node_with_optimizer(new IfFalseNode(iff)); // x > 0 + + // slow path: call pow(x, 0.5) + Node* call = igvn->register_new_node_with_optimizer(inline_call_leaf_pure_node(if_slow)); + Node* call_ctrl = igvn->register_new_node_with_optimizer(new ProjNode(call, TypeFunc::Control)); + Node* call_result = igvn->register_new_node_with_optimizer(new ProjNode(call, TypeFunc::Parms + 0)); + + // fast path: sqrt(x) + Node* sqrt = igvn->register_new_node_with_optimizer(new SqrtDNode(igvn->C, if_fast, base)); + + // merge paths + RegionNode* region = new RegionNode(3); + igvn->register_new_node_with_optimizer(region); + region->init_req(1, call_ctrl); // slow path + region->init_req(2, if_fast); // fast path + + PhiNode* phi = new PhiNode(region, Type::DOUBLE); + igvn->register_new_node_with_optimizer(phi); + phi->init_req(1, call_result); // slow: pow() result + phi->init_req(2, sqrt); // fast: sqrt() result + + igvn->C->set_has_split_ifs(true); // Has chance for split-if optimization + + return make_tuple_of_input_state_and_result(igvn, phi, region); + } + } + + return CallLeafPureNode::Ideal(phase, can_reshape); +} + +// We can't simply have Ideal() returning a Con or MulNode since the users are still expecting a Call node, but we could +// produce a tuple that follows the same pattern so users can still get control, io, memory, etc.. +TupleNode* PowDNode::make_tuple_of_input_state_and_result(PhaseIterGVN* phase, Node* result, Node* control) { + if (control == nullptr) { + control = in(TypeFunc::Control); + } + + Compile* C = phase->C; + C->remove_macro_node(this); + TupleNode* tuple = TupleNode::make( + tf()->range(), + control, + in(TypeFunc::I_O), + in(TypeFunc::Memory), + in(TypeFunc::FramePtr), + in(TypeFunc::ReturnAdr), + result, + C->top()); + return tuple; +} diff --git a/src/hotspot/share/opto/callnode.hpp b/src/hotspot/share/opto/callnode.hpp index 95d1fc27d45..e4c548fc744 100644 --- a/src/hotspot/share/opto/callnode.hpp +++ b/src/hotspot/share/opto/callnode.hpp @@ -503,6 +503,66 @@ public: return _has_ea_local_in_scope; } + // A temporary storge for node edges. + // Intended for a single use. + class NodeEdgeTempStorage : public StackObj { + friend class SafePointNode; + + PhaseIterGVN& _igvn; + Node* _node_hook; + +#ifdef ASSERT + enum State { state_initial, state_populated, state_processed }; + + State _state; // monotonically transitions from initial to processed state. +#endif // ASSERT + + bool is_empty() const { + return _node_hook == nullptr || _node_hook->req() == 1; + } + void push(Node* n) { + assert(n != nullptr, ""); + if (_node_hook == nullptr) { + _node_hook = new Node(nullptr); + } + _node_hook->add_req(n); + } + Node* pop() { + assert(!is_empty(), ""); + int idx = _node_hook->req()-1; + Node* r = _node_hook->in(idx); + _node_hook->del_req(idx); + assert(r != nullptr, ""); + return r; + } + + public: + NodeEdgeTempStorage(PhaseIterGVN &igvn) : _igvn(igvn), _node_hook(nullptr) + DEBUG_ONLY(COMMA _state(state_initial)) { + assert(is_empty(), ""); + } + + ~NodeEdgeTempStorage() { + assert(_state == state_processed, "not processed"); + assert(is_empty(), ""); + if (_node_hook != nullptr) { + _node_hook->destruct(&_igvn); + } + } + + void remove_edge_if_present(Node* n) { + if (!is_empty()) { + int idx = _node_hook->find_edge(n); + if (idx > 0) { + _node_hook->del_req(idx); + } + } + } + }; + + void remove_non_debug_edges(NodeEdgeTempStorage& non_debug_edges); + void restore_non_debug_edges(NodeEdgeTempStorage& non_debug_edges); + void disconnect_from_root(PhaseIterGVN *igvn); // Standard Node stuff @@ -685,7 +745,7 @@ class CallGenerator; class CallNode : public SafePointNode { protected: - bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase); + bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase) const; public: const TypeFunc* _tf; // Function type @@ -734,9 +794,9 @@ public: virtual bool needs_deep_clone_jvms(Compile* C) { return _generator != nullptr || C->needs_deep_clone_jvms(); } // Returns true if the call may modify n - virtual bool may_modify(const TypeOopPtr* t_oop, PhaseValues* phase); + virtual bool may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) const; // Does this node have a use of n other than in debug information? - bool has_non_debug_use(Node* n); + bool has_non_debug_use(const Node* n); // Returns the unique CheckCastPP of a call // or result projection is there are several CheckCastPP // or returns null if there is no one. @@ -751,7 +811,10 @@ public: // Collect all the interesting edges from a call for use in // replacing the call by something else. Used by macro expansion // and the late inlining support. - void extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts = true) const; + void extract_projections(CallProjections* projs, + bool separate_io_proj, + bool do_asserts = true, + bool allow_handlers = false) const; virtual uint match_edge(uint idx) const; @@ -948,6 +1011,8 @@ public: } int Opcode() const override; Node* Ideal(PhaseGVN* phase, bool can_reshape) override; + + CallLeafPureNode* inline_call_leaf_pure_node(Node* control = nullptr) const; }; //------------------------------CallLeafNoFPNode------------------------------- @@ -1042,7 +1107,7 @@ public: virtual bool guaranteed_safepoint() { return false; } // allocations do not modify their arguments - virtual bool may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) { return false;} + virtual bool may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) const { return false; } // Pattern-match a possible usage of AllocateNode. // Return null if no allocation is recognized. @@ -1206,7 +1271,7 @@ public: bool is_balanced(); // locking does not modify its arguments - virtual bool may_modify(const TypeOopPtr* t_oop, PhaseValues* phase){ return false; } + virtual bool may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) const { return false; } #ifndef PRODUCT void create_lock_counter(JVMState* s); @@ -1299,4 +1364,19 @@ public: JVMState* dbg_jvms() const { return nullptr; } #endif }; + +//------------------------------PowDNode-------------------------------------- +class PowDNode : public CallLeafPureNode { + TupleNode* make_tuple_of_input_state_and_result(PhaseIterGVN* phase, Node* result, Node* control = nullptr); + +public: + PowDNode(Compile* C, Node* base, Node* exp); + int Opcode() const override; + const Type* Value(PhaseGVN* phase) const override; + Node* Ideal(PhaseGVN* phase, bool can_reshape) override; + + Node* base() const { return in(TypeFunc::Parms + 0); } + Node* exp() const { return in(TypeFunc::Parms + 2); } +}; + #endif // SHARE_OPTO_CALLNODE_HPP diff --git a/src/hotspot/share/opto/castnode.cpp b/src/hotspot/share/opto/castnode.cpp index 4e3750b5ee5..7bb6b1dcb77 100644 --- a/src/hotspot/share/opto/castnode.cpp +++ b/src/hotspot/share/opto/castnode.cpp @@ -413,6 +413,43 @@ Node* CastLLNode::Ideal(PhaseGVN* phase, bool can_reshape) { return nullptr; } +// CastPPNodes are removed before matching, while alias classes are needed in global code motion. +// As a result, it is not valid for a CastPPNode to change the oop such that the derived pointers +// lie in different alias classes with and without the node. For example, a CastPPNode c may not +// cast an Object to a Bottom[], because later removal of c would affect the alias class of c's +// array length field (c + arrayOopDesc::length_offset_in_bytes()). +// +// This function verifies that a CastPPNode on an oop does not violate the aforementioned property. +// +// TODO 8382147: Currently, this verification only applies during the construction of a CastPPNode, +// we may want to apply the same verification during IGVN transformations, as well as final graph +// reshaping. +void CastPPNode::verify_type(const Type* in_type, const Type* out_type) { +#ifdef ASSERT + out_type = out_type->join(in_type); + if (in_type->empty() || out_type->empty()) { + return; + } + if (in_type == TypePtr::NULL_PTR || out_type == TypePtr::NULL_PTR) { + return; + } + if (!in_type->isa_oopptr() && !out_type->isa_oopptr()) { + return; + } + + assert(in_type->isa_oopptr() && out_type->isa_oopptr(), "must be both oops or both non-oops"); + if (in_type->isa_aryptr() && out_type->isa_aryptr()) { + const Type* e1 = in_type->is_aryptr()->elem(); + const Type* e2 = out_type->is_aryptr()->elem(); + assert(e1->basic_type() == e2->basic_type(), "must both be arrays of the same primitive type or both be oops arrays"); + return; + } + + assert(in_type->isa_instptr() && out_type->isa_instptr(), "must be both array oops or both non-array oops"); + assert(in_type->is_instptr()->instance_klass() == out_type->is_instptr()->instance_klass(), "must not cast to a different type"); +#endif // ASSERT +} + //------------------------------Value------------------------------------------ // Take 'join' of input and cast-up type, unless working with an Interface const Type* CheckCastPPNode::Value(PhaseGVN* phase) const { @@ -440,6 +477,11 @@ const Type* CheckCastPPNode::Value(PhaseGVN* phase) const { return result; } +Node* CheckCastPPNode::pin_node_under_control_impl() const { + assert(_dependency.is_floating(), "already pinned"); + return new CheckCastPPNode(in(0), in(1), bottom_type(), _dependency.with_pinned_dependency(), _extra_types); +} + //============================================================================= //------------------------------Value------------------------------------------ const Type* CastX2PNode::Value(PhaseGVN* phase) const { @@ -470,9 +512,7 @@ static inline Node* addP_of_X2P(PhaseGVN *phase, if (negate) { dispX = phase->transform(new SubXNode(phase->MakeConX(0), dispX)); } - return new AddPNode(phase->C->top(), - phase->transform(new CastX2PNode(base)), - dispX); + return AddPNode::make_off_heap(phase->transform(new CastX2PNode(base)), dispX); } Node *CastX2PNode::Ideal(PhaseGVN *phase, bool can_reshape) { diff --git a/src/hotspot/share/opto/castnode.hpp b/src/hotspot/share/opto/castnode.hpp index 38545fd6f41..dce54eb73c0 100644 --- a/src/hotspot/share/opto/castnode.hpp +++ b/src/hotspot/share/opto/castnode.hpp @@ -303,14 +303,18 @@ public: //------------------------------CastPPNode------------------------------------- // cast pointer to pointer (different type) -class CastPPNode: public ConstraintCastNode { - public: - CastPPNode (Node* ctrl, Node* n, const Type* t, const DependencyType& dependency = DependencyType::FloatingNarrowing, const TypeTuple* types = nullptr) +class CastPPNode : public ConstraintCastNode { +public: + CastPPNode(Node* ctrl, Node* n, const Type* t, const DependencyType& dependency = DependencyType::FloatingNarrowing, const TypeTuple* types = nullptr) : ConstraintCastNode(ctrl, n, t, dependency, types) { init_class_id(Class_CastPP); + verify_type(n->bottom_type(), t); } virtual int Opcode() const; virtual uint ideal_reg() const { return Op_RegP; } + +private: + static void verify_type(const Type* in_type, const Type* out_type); }; //------------------------------CheckCastPPNode-------------------------------- @@ -329,6 +333,7 @@ class CheckCastPPNode: public ConstraintCastNode { private: virtual bool depends_only_on_test_impl() const { return !type()->isa_rawptr() && ConstraintCastNode::depends_only_on_test_impl(); } + virtual Node* pin_node_under_control_impl() const; }; diff --git a/src/hotspot/share/opto/cfgnode.cpp b/src/hotspot/share/opto/cfgnode.cpp index c65bc391792..828e5bf299f 100644 --- a/src/hotspot/share/opto/cfgnode.cpp +++ b/src/hotspot/share/opto/cfgnode.cpp @@ -735,7 +735,7 @@ Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) { #endif } // Remove the RegionNode itself from DefUse info - igvn->remove_dead_node(this); + igvn->remove_dead_node(this, PhaseIterGVN::NodeOrigin::Graph); return nullptr; } return this; // Record progress @@ -1007,7 +1007,7 @@ bool RegionNode::optimize_trichotomy(PhaseIterGVN* igvn) { BoolNode* new_bol = new BoolNode(bol2->in(1), res); igvn->replace_input_of(iff2, 1, igvn->transform((proj2->_con == 1) ? new_bol : new_bol->negate(igvn))); if (new_bol->outcnt() == 0) { - igvn->remove_dead_node(new_bol); + igvn->remove_dead_node(new_bol, PhaseIterGVN::NodeOrigin::Speculative); } } return false; @@ -2480,7 +2480,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) { } phase->is_IterGVN()->register_new_node_with_optimizer(offset); } - return new AddPNode(base, address, offset); + return AddPNode::make_with_base(base, address, offset); } } } @@ -2675,6 +2675,10 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) { for( uint i=1; ias_Mach()->ideal_Opcode() == Op_CastPP) || (UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_DecodeN) || - (UseCompressedClassPointers && check->as_Mach()->ideal_Opcode() == Op_DecodeNKlass) || + (check->as_Mach()->ideal_Opcode() == Op_DecodeNKlass) || #endif // _LP64 check->as_Mach()->ideal_Opcode() == Op_LoadP || check->as_Mach()->ideal_Opcode() == Op_LoadKlass))) { diff --git a/src/hotspot/share/opto/classes.cpp b/src/hotspot/share/opto/classes.cpp index b760a179b57..1cd6c52393b 100644 --- a/src/hotspot/share/opto/classes.cpp +++ b/src/hotspot/share/opto/classes.cpp @@ -43,6 +43,7 @@ #include "opto/narrowptrnode.hpp" #include "opto/node.hpp" #include "opto/opaquenode.hpp" +#include "opto/reachability.hpp" #include "opto/rootnode.hpp" #include "opto/subnode.hpp" #include "opto/subtypenode.hpp" diff --git a/src/hotspot/share/opto/classes.hpp b/src/hotspot/share/opto/classes.hpp index abd93fdd876..0f67cf90183 100644 --- a/src/hotspot/share/opto/classes.hpp +++ b/src/hotspot/share/opto/classes.hpp @@ -239,8 +239,10 @@ macro(MemBarRelease) macro(StoreFence) macro(StoreStoreFence) macro(MemBarReleaseLock) +macro(MemBarStoreLoad) macro(MemBarVolatile) macro(MemBarStoreStore) +macro(MemBarFull) macro(MergeMem) macro(MinI) macro(MinL) @@ -284,6 +286,7 @@ macro(OpaqueZeroTripGuard) macro(OpaqueConstantBool) macro(OpaqueInitializedAssertionPredicate) macro(OpaqueTemplateAssertionPredicate) +macro(PowD) macro(ProfileBoolean) macro(OrI) macro(OrL) @@ -393,6 +396,7 @@ macro(AddVL) macro(AddReductionVL) macro(AddVF) macro(AddVHF) +macro(AddReductionVHF) macro(AddReductionVF) macro(AddVD) macro(AddReductionVD) @@ -410,6 +414,7 @@ macro(MulReductionVI) macro(MulVL) macro(MulReductionVL) macro(MulVF) +macro(MulReductionVHF) macro(MulReductionVF) macro(MulVD) macro(MulReductionVD) @@ -544,3 +549,4 @@ macro(MaskAll) macro(AndVMask) macro(OrVMask) macro(XorVMask) +macro(ReachabilityFence) diff --git a/src/hotspot/share/opto/compile.cpp b/src/hotspot/share/opto/compile.cpp index f1ea8231df9..e05df8ea716 100644 --- a/src/hotspot/share/opto/compile.cpp +++ b/src/hotspot/share/opto/compile.cpp @@ -74,6 +74,7 @@ #include "opto/output.hpp" #include "opto/parse.hpp" #include "opto/phaseX.hpp" +#include "opto/reachability.hpp" #include "opto/rootnode.hpp" #include "opto/runtime.hpp" #include "opto/stringopts.hpp" @@ -396,6 +397,9 @@ void Compile::remove_useless_node(Node* dead) { if (dead->is_expensive()) { remove_expensive_node(dead); } + if (dead->is_ReachabilityFence()) { + remove_reachability_fence(dead->as_ReachabilityFence()); + } if (dead->is_OpaqueTemplateAssertionPredicate()) { remove_template_assertion_predicate_opaque(dead->as_OpaqueTemplateAssertionPredicate()); } @@ -459,6 +463,7 @@ void Compile::disconnect_useless_nodes(Unique_Node_List& useful, Unique_Node_Lis // Remove useless Template Assertion Predicate opaque nodes remove_useless_nodes(_template_assertion_predicate_opaques, useful); remove_useless_nodes(_expensive_nodes, useful); // remove useless expensive nodes + remove_useless_nodes(_reachability_fences, useful); // remove useless node recorded for post loop opts IGVN pass remove_useless_nodes(_for_post_loop_igvn, useful); // remove useless node recorded for post loop opts IGVN pass remove_useless_nodes(_for_merge_stores_igvn, useful); // remove useless node recorded for merge stores IGVN pass remove_useless_unstable_if_traps(useful); // remove useless unstable_if traps @@ -665,6 +670,7 @@ Compile::Compile(ciEnv* ci_env, ciMethod* target, int osr_bci, _parse_predicates(comp_arena(), 8, 0, nullptr), _template_assertion_predicate_opaques(comp_arena(), 8, 0, nullptr), _expensive_nodes(comp_arena(), 8, 0, nullptr), + _reachability_fences(comp_arena(), 8, 0, nullptr), _for_post_loop_igvn(comp_arena(), 8, 0, nullptr), _for_merge_stores_igvn(comp_arena(), 8, 0, nullptr), _unstable_if_traps(comp_arena(), 8, 0, nullptr), @@ -741,7 +747,7 @@ Compile::Compile(ciEnv* ci_env, ciMethod* target, int osr_bci, if (StressLCM || StressGCM || StressIGVN || StressCCP || StressIncrementalInlining || StressMacroExpansion || StressMacroElimination || StressUnstableIfTraps || - StressBailout || StressLoopPeeling) { + StressBailout || StressLoopPeeling || StressCountedLoop) { initialize_stress_seed(directive); } @@ -934,6 +940,7 @@ Compile::Compile(ciEnv* ci_env, _directive(directive), _log(ci_env->log()), _first_failure_details(nullptr), + _reachability_fences(comp_arena(), 8, 0, nullptr), _for_post_loop_igvn(comp_arena(), 8, 0, nullptr), _for_merge_stores_igvn(comp_arena(), 8, 0, nullptr), _congraph(nullptr), @@ -2257,7 +2264,9 @@ bool Compile::optimize_loops(PhaseIterGVN& igvn, LoopOptsMode mode) { PhaseIdealLoop::optimize(igvn, mode); _loop_opts_cnt--; if (failing()) return false; - if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2); + if (major_progress()) { + print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2); + } } } return true; @@ -2275,7 +2284,7 @@ void Compile::remove_root_to_sfpts_edges(PhaseIterGVN& igvn) { if (n != nullptr && n->is_SafePoint()) { r->rm_prec(i); if (n->outcnt() == 0) { - igvn.remove_dead_node(n); + igvn.remove_dead_node(n, PhaseIterGVN::NodeOrigin::Graph); } --i; } @@ -2319,7 +2328,7 @@ void Compile::Optimize() { #endif { TracePhase tp(_t_iterGVN); - igvn.optimize(); + igvn.optimize(true); } if (failing()) return; @@ -2383,7 +2392,7 @@ void Compile::Optimize() { PhaseRenumberLive prl(initial_gvn(), *igvn_worklist()); } igvn.reset(); - igvn.optimize(); + igvn.optimize(true); if (failing()) return; } @@ -2416,7 +2425,7 @@ void Compile::Optimize() { int mcount = macro_count(); // Record number of allocations and locks before IGVN // Optimize out fields loads from scalar replaceable allocations. - igvn.optimize(); + igvn.optimize(true); print_method(PHASE_ITER_GVN_AFTER_EA, 2); if (failing()) return; @@ -2496,7 +2505,7 @@ void Compile::Optimize() { { TracePhase tp(_t_iterGVN2); igvn.reset_from_igvn(&ccp); - igvn.optimize(); + igvn.optimize(true); } print_method(PHASE_ITER_GVN2, 2); @@ -2508,12 +2517,23 @@ void Compile::Optimize() { return; } - if (failing()) return; - C->clear_major_progress(); // ensure that major progress is now clear process_for_post_loop_opts_igvn(igvn); + if (failing()) return; + + // Once loop optimizations are over, it is safe to get rid of all reachability fence nodes and + // migrate reachability edges to safepoints. + if (OptimizeReachabilityFences && _reachability_fences.length() > 0) { + TracePhase tp1(_t_idealLoop); + TracePhase tp2(_t_reachability); + PhaseIdealLoop::optimize(igvn, PostLoopOptsExpandReachabilityFences); + print_method(PHASE_EXPAND_REACHABILITY_FENCES, 2); + if (failing()) return; + assert(_reachability_fences.length() == 0 || PreserveReachabilityFencesOnConstants, "no RF nodes allowed"); + } + process_for_merge_stores_igvn(igvn); if (failing()) return; @@ -3180,10 +3200,10 @@ void Compile::final_graph_reshaping_impl(Node *n, Final_Reshape_Counts& frc, Uni !n->in(2)->is_Con() ) { // right use is not a constant // Check for commutative opcode switch( nop ) { - case Op_AddI: case Op_AddF: case Op_AddD: case Op_AddL: + case Op_AddI: case Op_AddF: case Op_AddD: case Op_AddHF: case Op_AddL: case Op_MaxI: case Op_MaxL: case Op_MaxF: case Op_MaxD: case Op_MinI: case Op_MinL: case Op_MinF: case Op_MinD: - case Op_MulI: case Op_MulF: case Op_MulD: case Op_MulL: + case Op_MulI: case Op_MulF: case Op_MulD: case Op_MulHF: case Op_MulL: case Op_AndL: case Op_XorL: case Op_OrL: case Op_AndI: case Op_XorI: case Op_OrI: { // Move "last use" input to left by swapping inputs @@ -3262,6 +3282,8 @@ void Compile::handle_div_mod_op(Node* n, BasicType bt, bool is_unsigned) { void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& frc, uint nop, Unique_Node_List& dead_nodes) { switch( nop ) { // Count all float operations that may use FPU + case Op_AddHF: + case Op_MulHF: case Op_AddF: case Op_SubF: case Op_MulF: @@ -3412,8 +3434,7 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f Node *addp = n->in(AddPNode::Address); assert(n->as_AddP()->address_input_has_same_base(), "Base pointers must match (addp %u)", addp->_idx ); #ifdef _LP64 - if ((UseCompressedOops || UseCompressedClassPointers) && - addp->Opcode() == Op_ConP && + if (addp->Opcode() == Op_ConP && addp == n->in(AddPNode::Base) && n->in(AddPNode::Offset)->is_Con()) { // If the transformation of ConP to ConN+DecodeN is beneficial depends @@ -3426,7 +3447,7 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f bool is_klass = t->isa_klassptr() != nullptr; if ((is_oop && UseCompressedOops && Matcher::const_oop_prefer_decode() ) || - (is_klass && UseCompressedClassPointers && Matcher::const_klass_prefer_decode() && + (is_klass && Matcher::const_klass_prefer_decode() && t->isa_klassptr()->exact_klass()->is_in_encoding_range())) { Node* nn = nullptr; @@ -3769,10 +3790,12 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f case Op_AddReductionVI: case Op_AddReductionVL: + case Op_AddReductionVHF: case Op_AddReductionVF: case Op_AddReductionVD: case Op_MulReductionVI: case Op_MulReductionVL: + case Op_MulReductionVHF: case Op_MulReductionVF: case Op_MulReductionVD: case Op_MinReductionV: @@ -3798,7 +3821,11 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f } break; case Op_Loop: - assert(!n->as_Loop()->is_loop_nest_inner_loop() || _loop_opts_cnt == 0, "should have been turned into a counted loop"); + // When StressCountedLoop is enabled, this loop may intentionally avoid a counted loop conversion. + // This is expected behavior for the stress mode, which exercises alternative compilation paths. + if (!StressCountedLoop) { + assert(!n->as_Loop()->is_loop_nest_inner_loop() || _loop_opts_cnt == 0, "should have been turned into a counted loop"); + } case Op_CountedLoop: case Op_LongCountedLoop: case Op_OuterStripMinedLoop: @@ -3968,11 +3995,28 @@ void Compile::final_graph_reshaping_walk(Node_Stack& nstack, Node* root, Final_R } } + expand_reachability_edges(sfpt); + // Skip next transformation if compressed oops are not used. - if ((UseCompressedOops && !Matcher::gen_narrow_oop_implicit_null_checks()) || - (!UseCompressedOops && !UseCompressedClassPointers)) + if (UseCompressedOops && !Matcher::gen_narrow_oop_implicit_null_checks()) return; + // Go over ReachabilityFence nodes to skip DecodeN nodes for referents. + // The sole purpose of RF node is to keep the referent oop alive and + // decoding the oop for that is not needed. + for (int i = 0; i < C->reachability_fences_count(); i++) { + ReachabilityFenceNode* rf = C->reachability_fence(i); + DecodeNNode* dn = rf->in(1)->isa_DecodeN(); + if (dn != nullptr) { + if (!dn->has_non_debug_uses() || Matcher::narrow_oop_use_complex_address()) { + rf->set_req(1, dn->in(1)); + if (dn->outcnt() == 0) { + dn->disconnect_inputs(this); + } + } + } + } + // Go over safepoints nodes to skip DecodeN/DecodeNKlass nodes for debug edges. // It could be done for an uncommon traps or any safepoints/calls // if the DecodeN/DecodeNKlass node is referenced only in a debug info. @@ -3986,21 +4030,8 @@ void Compile::final_graph_reshaping_walk(Node_Stack& nstack, Node* root, Final_R n->as_CallStaticJava()->uncommon_trap_request() != 0); for (int j = start; j < end; j++) { Node* in = n->in(j); - if (in->is_DecodeNarrowPtr()) { - bool safe_to_skip = true; - if (!is_uncommon ) { - // Is it safe to skip? - for (uint i = 0; i < in->outcnt(); i++) { - Node* u = in->raw_out(i); - if (!u->is_SafePoint() || - (u->is_Call() && u->as_Call()->has_non_debug_use(n))) { - safe_to_skip = false; - } - } - } - if (safe_to_skip) { - n->set_req(j, in->in(1)); - } + if (in->is_DecodeNarrowPtr() && (is_uncommon || !in->has_non_debug_uses())) { + n->set_req(j, in->in(1)); if (in->outcnt() == 0) { in->disconnect_inputs(this); } diff --git a/src/hotspot/share/opto/compile.hpp b/src/hotspot/share/opto/compile.hpp index eb6be669f24..ff0085d79de 100644 --- a/src/hotspot/share/opto/compile.hpp +++ b/src/hotspot/share/opto/compile.hpp @@ -80,6 +80,7 @@ class PhaseIterGVN; class PhaseRegAlloc; class PhaseCCP; class PhaseOutput; +class ReachabilityFenceNode; class RootNode; class relocInfo; class StartNode; @@ -107,7 +108,8 @@ enum LoopOptsMode { LoopOptsMaxUnroll, LoopOptsShenandoahExpand, LoopOptsSkipSplitIf, - LoopOptsVerify + LoopOptsVerify, + PostLoopOptsExpandReachabilityFences }; // The type of all node counts and indexes. @@ -385,6 +387,7 @@ class Compile : public Phase { // of Template Assertion Predicates themselves. GrowableArray _template_assertion_predicate_opaques; GrowableArray _expensive_nodes; // List of nodes that are expensive to compute and that we'd better not let the GVN freely common + GrowableArray _reachability_fences; // List of reachability fences GrowableArray _for_post_loop_igvn; // List of nodes for IGVN after loop opts are over GrowableArray _for_merge_stores_igvn; // List of nodes for IGVN merge stores GrowableArray _unstable_if_traps; // List of ifnodes after IGVN @@ -714,11 +717,13 @@ public: int template_assertion_predicate_count() const { return _template_assertion_predicate_opaques.length(); } int expensive_count() const { return _expensive_nodes.length(); } int coarsened_count() const { return _coarsened_locks.length(); } - Node* macro_node(int idx) const { return _macro_nodes.at(idx); } Node* expensive_node(int idx) const { return _expensive_nodes.at(idx); } + ReachabilityFenceNode* reachability_fence(int idx) const { return _reachability_fences.at(idx); } + int reachability_fences_count() const { return _reachability_fences.length(); } + ConnectionGraph* congraph() { return _congraph;} void set_congraph(ConnectionGraph* congraph) { _congraph = congraph;} void add_macro_node(Node * n) { @@ -740,6 +745,14 @@ public: _expensive_nodes.remove_if_existing(n); } + void add_reachability_fence(ReachabilityFenceNode* rf) { + _reachability_fences.append(rf); + } + + void remove_reachability_fence(ReachabilityFenceNode* n) { + _reachability_fences.remove_if_existing(n); + } + void add_parse_predicate(ParsePredicateNode* n) { assert(!_parse_predicates.contains(n), "duplicate entry in Parse Predicate list"); _parse_predicates.append(n); @@ -1300,6 +1313,9 @@ public: // Definitions of pd methods static void pd_compiler2_init(); + // Materialize reachability fences from reachability edges on safepoints. + void expand_reachability_edges(Unique_Node_List& safepoints); + // Static parse-time type checking logic for gen_subtype_check: enum SubTypeCheckResult { SSC_always_false, SSC_always_true, SSC_easy_test, SSC_full_test }; SubTypeCheckResult static_subtype_check(const TypeKlassPtr* superk, const TypeKlassPtr* subk, bool skip = StressReflectiveCode); diff --git a/src/hotspot/share/opto/divnode.cpp b/src/hotspot/share/opto/divnode.cpp index ed72d8a11cf..b398ec27b80 100644 --- a/src/hotspot/share/opto/divnode.cpp +++ b/src/hotspot/share/opto/divnode.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -419,8 +419,7 @@ static Node *transform_long_divide( PhaseGVN *phase, Node *dividend, jlong divis if (!d_pos) { q = new SubLNode(phase->longcon(0), phase->transform(q)); } - } else if ( !Matcher::use_asm_for_ldiv_by_con(d) ) { // Use hardware DIV instruction when - // it is faster than code generated below. + } else { // Attempt the jlong constant divide -> multiply transform found in // "Division by Invariant Integers using Multiplication" // by Granlund and Montgomery @@ -1592,41 +1591,25 @@ const Type* ModDNode::get_result_if_constant(const Type* dividend, const Type* d return TypeD::make(jdouble_cast(xr)); } -Node* ModFloatingNode::Ideal(PhaseGVN* phase, bool can_reshape) { - if (can_reshape) { - PhaseIterGVN* igvn = phase->is_IterGVN(); - - // Either input is TOP ==> the result is TOP - const Type* dividend_type = phase->type(dividend()); - const Type* divisor_type = phase->type(divisor()); - if (dividend_type == Type::TOP || divisor_type == Type::TOP) { - return phase->C->top(); - } - const Type* constant_result = get_result_if_constant(dividend_type, divisor_type); - if (constant_result != nullptr) { - return make_tuple_of_input_state_and_constant_result(igvn, constant_result); - } +const Type* ModFloatingNode::Value(PhaseGVN* phase) const { + const Type* t = CallLeafPureNode::Value(phase); + if (t == Type::TOP) { return Type::TOP; } + const Type* dividend_type = phase->type(dividend()); + const Type* divisor_type = phase->type(divisor()); + if (dividend_type == Type::TOP || divisor_type == Type::TOP) { + return Type::TOP; } - - return CallLeafPureNode::Ideal(phase, can_reshape); -} - -/* Give a tuple node for ::Ideal to return, made of the input state (control to return addr) - * and the given constant result. Idealization of projections will make sure to transparently - * propagate the input state and replace the result by the said constant. - */ -TupleNode* ModFloatingNode::make_tuple_of_input_state_and_constant_result(PhaseIterGVN* phase, const Type* con) const { - Node* con_node = phase->makecon(con); - TupleNode* tuple = TupleNode::make( - tf()->range(), - in(TypeFunc::Control), - in(TypeFunc::I_O), - in(TypeFunc::Memory), - in(TypeFunc::FramePtr), - in(TypeFunc::ReturnAdr), - con_node); - - return tuple; + const Type* constant_result = get_result_if_constant(dividend_type, divisor_type); + if (constant_result != nullptr) { + const TypeTuple* tt = t->is_tuple(); + uint cnt = tt->cnt(); + uint param_cnt = cnt - TypeFunc::Parms; + const Type** fields = TypeTuple::fields(param_cnt); + fields[TypeFunc::Parms] = constant_result; + if (param_cnt > 1) { fields[TypeFunc::Parms + 1] = Type::HALF; } + return TypeTuple::make(cnt, fields); + } + return t; } //============================================================================= diff --git a/src/hotspot/share/opto/divnode.hpp b/src/hotspot/share/opto/divnode.hpp index 43432b271a4..2598429716f 100644 --- a/src/hotspot/share/opto/divnode.hpp +++ b/src/hotspot/share/opto/divnode.hpp @@ -175,8 +175,6 @@ public: // Base class for float and double modulus class ModFloatingNode : public CallLeafPureNode { - TupleNode* make_tuple_of_input_state_and_constant_result(PhaseIterGVN* phase, const Type* con) const; - protected: virtual Node* dividend() const = 0; virtual Node* divisor() const = 0; @@ -184,7 +182,7 @@ protected: public: ModFloatingNode(Compile* C, const TypeFunc* tf, address addr, const char* name); - Node* Ideal(PhaseGVN* phase, bool can_reshape) override; + const Type* Value(PhaseGVN* phase) const override; }; // Float Modulus diff --git a/src/hotspot/share/opto/doCall.cpp b/src/hotspot/share/opto/doCall.cpp index e4418631d17..d6e75f17f50 100644 --- a/src/hotspot/share/opto/doCall.cpp +++ b/src/hotspot/share/opto/doCall.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,6 +37,7 @@ #include "opto/callGenerator.hpp" #include "opto/castnode.hpp" #include "opto/cfgnode.hpp" +#include "opto/graphKit.hpp" #include "opto/mulnode.hpp" #include "opto/parse.hpp" #include "opto/rootnode.hpp" @@ -909,8 +910,7 @@ void Parse::catch_call_exceptions(ciExceptionHandlerStream& handlers) { if (handler_bci < 0) { // merge with corresponding rethrow node throw_to_exit(make_exception_state(ex_oop)); } else { // Else jump to corresponding handle - push_ex_oop(ex_oop); // Clear stack and push just the oop. - merge_exception(handler_bci); + push_and_merge_exception(handler_bci, ex_oop); } } @@ -1008,13 +1008,10 @@ void Parse::catch_inline_exceptions(SafePointNode* ex_map) { int handler_bci = handler->handler_bci(); if (remaining == 1) { - push_ex_oop(ex_node); // Push exception oop for handler if (PrintOpto && WizardMode) { tty->print_cr(" Catching every inline exception bci:%d -> handler_bci:%d", bci(), handler_bci); } - // If this is a backwards branch in the bytecodes, add safepoint - maybe_add_safepoint(handler_bci); - merge_exception(handler_bci); // jump to handler + push_and_merge_exception(handler_bci, ex_node); // jump to handler return; // No more handling to be done here! } @@ -1039,15 +1036,13 @@ void Parse::catch_inline_exceptions(SafePointNode* ex_map) { const TypeInstPtr* tinst = TypeOopPtr::make_from_klass_unique(klass)->cast_to_ptr_type(TypePtr::NotNull)->is_instptr(); assert(klass->has_subklass() || tinst->klass_is_exact(), "lost exactness"); Node* ex_oop = _gvn.transform(new CheckCastPPNode(control(), ex_node, tinst)); - push_ex_oop(ex_oop); // Push exception oop for handler if (PrintOpto && WizardMode) { tty->print(" Catching inline exception bci:%d -> handler_bci:%d -- ", bci(), handler_bci); klass->print_name(); tty->cr(); } // If this is a backwards branch in the bytecodes, add safepoint - maybe_add_safepoint(handler_bci); - merge_exception(handler_bci); + push_and_merge_exception(handler_bci, ex_oop); } set_control(not_subtype_ctrl); @@ -1086,13 +1081,13 @@ void Parse::catch_inline_exceptions(SafePointNode* ex_map) { #ifndef PRODUCT void Parse::count_compiled_calls(bool at_method_entry, bool is_inline) { - if( CountCompiledCalls ) { - if( at_method_entry ) { + if (CountCompiledCalls) { + if (at_method_entry) { // bump invocation counter if top method (for statistics) if (CountCompiledCalls && depth() == 1) { const TypePtr* addr_type = TypeMetadataPtr::make(method()); Node* adr1 = makecon(addr_type); - Node* adr2 = basic_plus_adr(adr1, adr1, in_bytes(Method::compiled_invocation_counter_offset())); + Node* adr2 = off_heap_plus_addr(adr1, in_bytes(Method::compiled_invocation_counter_offset())); increment_counter(adr2); } } else if (is_inline) { diff --git a/src/hotspot/share/opto/escape.cpp b/src/hotspot/share/opto/escape.cpp index 5befdd924ff..a05ad0ef99a 100644 --- a/src/hotspot/share/opto/escape.cpp +++ b/src/hotspot/share/opto/escape.cpp @@ -780,7 +780,7 @@ Node* ConnectionGraph::split_castpp_load_through_phi(Node* curr_addp, Node* curr base = base->find_out_with(Op_CastPP); } - Node* addr = _igvn->transform(new AddPNode(base, base, curr_addp->in(AddPNode::Offset))); + Node* addr = _igvn->transform(AddPNode::make_with_base(base, curr_addp->in(AddPNode::Offset))); Node* mem = (memory->is_Phi() && (memory->in(0) == region)) ? memory->in(i) : memory; Node* load = curr_load->clone(); load->set_req(0, nullptr); @@ -933,7 +933,7 @@ void ConnectionGraph::reduce_phi_on_castpp_field_load(Node* curr_castpp, Growabl j = MIN2(j, (int)use->outcnt()-1); } - _igvn->remove_dead_node(use); + _igvn->remove_dead_node(use, PhaseIterGVN::NodeOrigin::Graph); } --i; i = MIN2(i, (int)curr_castpp->outcnt()-1); @@ -1273,21 +1273,33 @@ bool ConnectionGraph::reduce_phi_on_safepoints_helper(Node* ophi, Node* cast, No for (uint spi = 0; spi < safepoints.size(); spi++) { SafePointNode* sfpt = safepoints.at(spi)->as_SafePoint(); - JVMState *jvms = sfpt->jvms(); - uint merge_idx = (sfpt->req() - jvms->scloff()); - int debug_start = jvms->debug_start(); + + SafePointNode::NodeEdgeTempStorage non_debug_edges_worklist(*_igvn); + + // All sfpt inputs are implicitly included into debug info during the scalarization process below. + // Keep non-debug inputs separately, so they stay non-debug. + sfpt->remove_non_debug_edges(non_debug_edges_worklist); + + JVMState* jvms = sfpt->jvms(); + uint merge_idx = (sfpt->req() - jvms->scloff()); + int debug_start = jvms->debug_start(); SafePointScalarMergeNode* smerge = new SafePointScalarMergeNode(merge_t, merge_idx); smerge->init_req(0, _compile->root()); _igvn->register_new_node_with_optimizer(smerge); + assert(sfpt->jvms()->endoff() == sfpt->req(), "no extra edges past debug info allowed"); + // The next two inputs are: // (1) A copy of the original pointer to NSR objects. // (2) A selector, used to decide if we need to rematerialize an object // or use the pointer to a NSR object. - // See more details of these fields in the declaration of SafePointScalarMergeNode + // See more details of these fields in the declaration of SafePointScalarMergeNode. + // It is safe to include them into debug info straight away since create_scalarized_object_description() + // will include all newly added inputs into debug info anyway. sfpt->add_req(nsr_merge_pointer); sfpt->add_req(selector); + sfpt->jvms()->set_endoff(sfpt->req()); for (uint i = 1; i < ophi->req(); i++) { Node* base = ophi->in(i); @@ -1302,13 +1314,15 @@ bool ConnectionGraph::reduce_phi_on_safepoints_helper(Node* ophi, Node* cast, No AllocateNode* alloc = ptn->ideal_node()->as_Allocate(); SafePointScalarObjectNode* sobj = mexp.create_scalarized_object_description(alloc, sfpt); if (sobj == nullptr) { - return false; + sfpt->restore_non_debug_edges(non_debug_edges_worklist); + return false; // non-recoverable failure; recompile } // Now make a pass over the debug information replacing any references // to the allocated object with "sobj" Node* ccpp = alloc->result_cast(); sfpt->replace_edges_in_range(ccpp, sobj, debug_start, jvms->debug_end(), _igvn); + non_debug_edges_worklist.remove_edge_if_present(ccpp); // drop scalarized input from non-debug info // Register the scalarized object as a candidate for reallocation smerge->add_req(sobj); @@ -1316,11 +1330,15 @@ bool ConnectionGraph::reduce_phi_on_safepoints_helper(Node* ophi, Node* cast, No // Replaces debug information references to "original_sfpt_parent" in "sfpt" with references to "smerge" sfpt->replace_edges_in_range(original_sfpt_parent, smerge, debug_start, jvms->debug_end(), _igvn); + non_debug_edges_worklist.remove_edge_if_present(original_sfpt_parent); // drop scalarized input from non-debug info // The call to 'replace_edges_in_range' above might have removed the // reference to ophi that we need at _merge_pointer_idx. The line below make // sure the reference is maintained. sfpt->set_req(smerge->merge_pointer_idx(jvms), nsr_merge_pointer); + + sfpt->restore_non_debug_edges(non_debug_edges_worklist); + _igvn->_worklist.push(sfpt); } @@ -4712,6 +4730,7 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist, op == Op_StrIndexOf || op == Op_StrIndexOfChar || op == Op_SubTypeCheck || op == Op_ReinterpretS2HF || + op == Op_ReachabilityFence || BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) { n->dump(); use->dump(); diff --git a/src/hotspot/share/opto/gcm.cpp b/src/hotspot/share/opto/gcm.cpp index 4a1553b1e00..e3d3108a22e 100644 --- a/src/hotspot/share/opto/gcm.cpp +++ b/src/hotspot/share/opto/gcm.cpp @@ -152,9 +152,12 @@ bool PhaseCFG::is_CFG(Node* n) { } bool PhaseCFG::is_control_proj_or_safepoint(Node* n) const { - bool result = (n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_SafePoint) || (n->is_Proj() && n->as_Proj()->bottom_type() == Type::CONTROL); - assert(!result || (n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_SafePoint) - || (n->is_Proj() && n->as_Proj()->_con == 0), "If control projection, it must be projection 0"); + bool result = n->is_ReachabilityFence() || + (n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_SafePoint) || + (n->is_Proj() && n->as_Proj()->bottom_type() == Type::CONTROL); + assert(!n->is_Proj() || + n->as_Proj()->bottom_type() != Type::CONTROL || + n->as_Proj()->_con == 0, "If control projection, it must be projection 0"); return result; } diff --git a/src/hotspot/share/opto/generateOptoStub.cpp b/src/hotspot/share/opto/generateOptoStub.cpp index 77633857cdf..d719c301dc6 100644 --- a/src/hotspot/share/opto/generateOptoStub.cpp +++ b/src/hotspot/share/opto/generateOptoStub.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -88,18 +88,17 @@ void GraphKit::gen_stub(address C_function, const int NoAlias = Compile::AliasIdxBot; - Node* adr_last_Java_pc = basic_plus_adr(top(), - thread, - in_bytes(JavaThread::frame_anchor_offset()) + - in_bytes(JavaFrameAnchor::last_Java_pc_offset())); + Node* adr_last_Java_pc = off_heap_plus_addr(thread, + in_bytes(JavaThread::frame_anchor_offset()) + + in_bytes(JavaFrameAnchor::last_Java_pc_offset())); // Drop in the last_Java_sp. last_Java_fp is not touched. // Always do this after the other "last_Java_frame" fields are set since // as soon as last_Java_sp != nullptr the has_last_Java_frame is true and // users will look at the other fields. // - Node *adr_sp = basic_plus_adr(top(), thread, in_bytes(JavaThread::last_Java_sp_offset())); - Node *last_sp = frameptr(); + Node* adr_sp = off_heap_plus_addr(thread, in_bytes(JavaThread::last_Java_sp_offset())); + Node* last_sp = frameptr(); store_to_memory(control(), adr_sp, last_sp, T_ADDRESS, MemNode::unordered); // Set _thread_in_native @@ -228,7 +227,7 @@ void GraphKit::gen_stub(address C_function, Node* target = map()->in(TypeFunc::Parms); // Runtime call returning oop in TLS? Fetch it out if( pass_tls ) { - Node* adr = basic_plus_adr(top(), thread, in_bytes(JavaThread::vm_result_oop_offset())); + Node* adr = off_heap_plus_addr(thread, in_bytes(JavaThread::vm_result_oop_offset())); Node* vm_result = make_load(nullptr, adr, TypeOopPtr::BOTTOM, T_OBJECT, MemNode::unordered); map()->set_req(TypeFunc::Parms, vm_result); // vm_result passed as result // clear thread-local-storage(tls) @@ -237,7 +236,7 @@ void GraphKit::gen_stub(address C_function, //----------------------------- // check exception - Node* adr = basic_plus_adr(top(), thread, in_bytes(Thread::pending_exception_offset())); + Node* adr = off_heap_plus_addr(thread, in_bytes(Thread::pending_exception_offset())); Node* pending = make_load(nullptr, adr, TypeOopPtr::BOTTOM, T_OBJECT, MemNode::unordered); Node* exit_memory = reset_memory(); diff --git a/src/hotspot/share/opto/graphKit.cpp b/src/hotspot/share/opto/graphKit.cpp index c969abb85bb..bbd00d111f7 100644 --- a/src/hotspot/share/opto/graphKit.cpp +++ b/src/hotspot/share/opto/graphKit.cpp @@ -41,6 +41,7 @@ #include "opto/machnode.hpp" #include "opto/opaquenode.hpp" #include "opto/parse.hpp" +#include "opto/reachability.hpp" #include "opto/rootnode.hpp" #include "opto/runtime.hpp" #include "opto/subtypenode.hpp" @@ -508,7 +509,7 @@ void GraphKit::uncommon_trap_if_should_post_on_exceptions(Deoptimization::DeoptR // first must access the should_post_on_exceptions_flag in this thread's JavaThread Node* jthread = _gvn.transform(new ThreadLocalNode()); - Node* adr = basic_plus_adr(top(), jthread, in_bytes(JavaThread::should_post_on_exceptions_flag_offset())); + Node* adr = off_heap_plus_addr(jthread, in_bytes(JavaThread::should_post_on_exceptions_flag_offset())); Node* should_post_flag = make_load(control(), adr, TypeInt::INT, T_INT, MemNode::unordered); // Test the should_post_on_exceptions_flag vs. 0 @@ -670,6 +671,48 @@ ciInstance* GraphKit::builtin_throw_exception(Deoptimization::DeoptReason reason } } +GraphKit::SavedState::SavedState(GraphKit* kit) : + _kit(kit), + _sp(kit->sp()), + _jvms(kit->jvms()), + _map(kit->clone_map()), + _discarded(false) +{ + for (DUIterator_Fast imax, i = kit->control()->fast_outs(imax); i < imax; i++) { + Node* out = kit->control()->fast_out(i); + if (out->is_CFG()) { + _ctrl_succ.push(out); + } + } +} + +GraphKit::SavedState::~SavedState() { + if (_discarded) { + _kit->destruct_map_clone(_map); + return; + } + _kit->jvms()->set_map(_map); + _kit->jvms()->set_sp(_sp); + _map->set_jvms(_kit->jvms()); + _kit->set_map(_map); + _kit->set_sp(_sp); + for (DUIterator_Fast imax, i = _kit->control()->fast_outs(imax); i < imax; i++) { + Node* out = _kit->control()->fast_out(i); + if (out->is_CFG() && out->in(0) == _kit->control() && out != _kit->map() && !_ctrl_succ.member(out)) { + _kit->_gvn.hash_delete(out); + out->set_req(0, _kit->C->top()); + _kit->C->record_for_igvn(out); + --i; --imax; + _kit->_gvn.hash_find_insert(out); + } + } +} + +void GraphKit::SavedState::discard() { + _discarded = true; +} + + //----------------------------PreserveJVMState--------------------------------- PreserveJVMState::PreserveJVMState(GraphKit* kit, bool clone_map) { DEBUG_ONLY(kit->verify_map()); @@ -1192,7 +1235,7 @@ Node* GraphKit::basic_plus_adr(Node* base, Node* ptr, Node* offset) { "Unexpected zero offset - should have matched MakeConX(0)"); } #endif - return _gvn.transform( new AddPNode(base, ptr, offset) ); + return _gvn.transform(AddPNode::make_with_base(base, ptr, offset)); } Node* GraphKit::ConvI2L(Node* offset) { @@ -1678,13 +1721,22 @@ Node* GraphKit::access_load_at(Node* obj, // containing obj return top(); // Dead path ? } + SavedState old_state(this); C2AccessValuePtr addr(adr, adr_type); C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, obj, addr); + Node* load; if (access.is_raw()) { - return _barrier_set->BarrierSetC2::load_at(access, val_type); + load = _barrier_set->BarrierSetC2::load_at(access, val_type); } else { - return _barrier_set->load_at(access, val_type); + load = _barrier_set->load_at(access, val_type); } + + // Restore the previous state only if the load got folded to a constant + // and we can discard any barriers that might have been added. + if (load == nullptr || !load->is_Con()) { + old_state.discard(); + } + return load; } Node* GraphKit::access_load(Node* adr, // actual address to load val at @@ -1695,13 +1747,22 @@ Node* GraphKit::access_load(Node* adr, // actual address to load val at return top(); // Dead path ? } + SavedState old_state(this); C2AccessValuePtr addr(adr, adr->bottom_type()->is_ptr()); C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, nullptr, addr); + Node* load; if (access.is_raw()) { - return _barrier_set->BarrierSetC2::load_at(access, val_type); + load = _barrier_set->BarrierSetC2::load_at(access, val_type); } else { - return _barrier_set->load_at(access, val_type); + load = _barrier_set->load_at(access, val_type); } + + // Restore the previous state only if the load got folded to a constant + // and we can discard any barriers that might have been added. + if (load == nullptr || !load->is_Con()) { + old_state.discard(); + } + return load; } Node* GraphKit::access_atomic_cmpxchg_val_at(Node* obj, @@ -2757,9 +2818,9 @@ Node* Phase::gen_subtype_check(Node* subklass, Node* superklass, Node** ctrl, No // will always succeed. We could leave a dependency behind to ensure this. // First load the super-klass's check-offset - Node *p1 = gvn.transform(new AddPNode(C->top(), superklass, gvn.MakeConX(in_bytes(Klass::super_check_offset_offset())))); + Node* p1 = gvn.transform(AddPNode::make_off_heap(superklass, gvn.MakeConX(in_bytes(Klass::super_check_offset_offset())))); Node* m = C->immutable_memory(); - Node *chk_off = gvn.transform(new LoadINode(nullptr, m, p1, gvn.type(p1)->is_ptr(), TypeInt::INT, MemNode::unordered)); + Node* chk_off = gvn.transform(new LoadINode(nullptr, m, p1, gvn.type(p1)->is_ptr(), TypeInt::INT, MemNode::unordered)); int cacheoff_con = in_bytes(Klass::secondary_super_cache_offset()); const TypeInt* chk_off_t = chk_off->Value(&gvn)->isa_int(); int chk_off_con = (chk_off_t != nullptr && chk_off_t->is_con()) ? chk_off_t->get_con() : cacheoff_con; @@ -2775,11 +2836,11 @@ Node* Phase::gen_subtype_check(Node* subklass, Node* superklass, Node** ctrl, No #ifdef _LP64 chk_off_X = gvn.transform(new ConvI2LNode(chk_off_X)); #endif - Node* p2 = gvn.transform(new AddPNode(C->top(), subklass, chk_off_X)); + Node* p2 = gvn.transform(AddPNode::make_off_heap(subklass, chk_off_X)); // For some types like interfaces the following loadKlass is from a 1-word // cache which is mutable so can't use immutable memory. Other // types load from the super-class display table which is immutable. - Node *kmem = C->immutable_memory(); + Node* kmem = C->immutable_memory(); // secondary_super_cache is not immutable but can be treated as such because: // - no ideal node writes to it in a way that could cause an // incorrect/missed optimization of the following Load. @@ -2851,8 +2912,8 @@ Node* Phase::gen_subtype_check(Node* subklass, Node* superklass, Node** ctrl, No *ctrl = iftrue1; // We need exactly the 1 test above PhaseIterGVN* igvn = gvn.is_IterGVN(); if (igvn != nullptr) { - igvn->remove_globally_dead_node(r_ok_subtype); - igvn->remove_globally_dead_node(r_not_subtype); + igvn->remove_globally_dead_node(r_ok_subtype, PhaseIterGVN::NodeOrigin::Speculative); + igvn->remove_globally_dead_node(r_not_subtype, PhaseIterGVN::NodeOrigin::Speculative); } return not_subtype_ctrl; } @@ -3016,7 +3077,7 @@ bool GraphKit::seems_never_null(Node* obj, ciProfileData* data, bool& speculatin void GraphKit::guard_klass_being_initialized(Node* klass) { int init_state_off = in_bytes(InstanceKlass::init_state_offset()); - Node* adr = basic_plus_adr(top(), klass, init_state_off); + Node* adr = off_heap_plus_addr(klass, init_state_off); Node* init_state = LoadNode::make(_gvn, nullptr, immutable_memory(), adr, adr->bottom_type()->is_ptr(), TypeInt::BYTE, T_BYTE, MemNode::acquire); @@ -3034,7 +3095,7 @@ void GraphKit::guard_klass_being_initialized(Node* klass) { void GraphKit::guard_init_thread(Node* klass) { int init_thread_off = in_bytes(InstanceKlass::init_thread_offset()); - Node* adr = basic_plus_adr(top(), klass, init_thread_off); + Node* adr = off_heap_plus_addr(klass, init_thread_off); Node* init_thread = LoadNode::make(_gvn, nullptr, immutable_memory(), adr, adr->bottom_type()->is_ptr(), TypePtr::NOTNULL, @@ -3481,6 +3542,15 @@ Node* GraphKit::insert_mem_bar_volatile(int opcode, int alias_idx, Node* precede return membar; } +//------------------------------insert_reachability_fence---------------------- +Node* GraphKit::insert_reachability_fence(Node* referent) { + assert(!referent->is_top(), ""); + Node* rf = _gvn.transform(new ReachabilityFenceNode(C, control(), referent)); + set_control(rf); + C->record_for_igvn(rf); + return rf; +} + //------------------------------shared_lock------------------------------------ // Emit locking code. FastLockNode* GraphKit::shared_lock(Node* obj) { @@ -3612,7 +3682,7 @@ Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) { } } constant_value = Klass::_lh_neutral_value; // put in a known value - Node* lhp = basic_plus_adr(top(), klass_node, in_bytes(Klass::layout_helper_offset())); + Node* lhp = off_heap_plus_addr(klass_node, in_bytes(Klass::layout_helper_offset())); return make_load(nullptr, lhp, TypeInt::INT, T_INT, MemNode::unordered); } diff --git a/src/hotspot/share/opto/graphKit.hpp b/src/hotspot/share/opto/graphKit.hpp index 0537d31ae36..f53f73d0978 100644 --- a/src/hotspot/share/opto/graphKit.hpp +++ b/src/hotspot/share/opto/graphKit.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -320,6 +320,13 @@ class GraphKit : public Phase { } Node* basic_plus_adr(Node* base, Node* ptr, Node* offset); + Node* off_heap_plus_addr(Node* ptr, intptr_t offset) { + return basic_plus_adr(top(), ptr, MakeConX(offset)); + } + + Node* off_heap_plus_addr(Node* ptr, Node* offset) { + return basic_plus_adr(top(), ptr, offset); + } // Some convenient shortcuts for common nodes Node* IfTrue(IfNode* iff) { return _gvn.transform(new IfTrueNode(iff)); } @@ -346,7 +353,7 @@ class GraphKit : public Phase { Node* CmpP(Node* l, Node* r) { return _gvn.transform(new CmpPNode(l, r)); } Node* Bool(Node* cmp, BoolTest::mask relop) { return _gvn.transform(new BoolNode(cmp, relop)); } - Node* AddP(Node* b, Node* a, Node* o) { return _gvn.transform(new AddPNode(b, a, o)); } + Node* AddP(Node* b, Node* a, Node* o) { return _gvn.transform(AddPNode::make_with_base(b, a, o)); } // Convert between int and long, and size_t. // (See macros ConvI2X, etc., in type.hpp for ConvI2X, etc.) @@ -798,6 +805,7 @@ class GraphKit : public Phase { int next_monitor(); Node* insert_mem_bar(int opcode, Node* precedent = nullptr); Node* insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent = nullptr); + Node* insert_reachability_fence(Node* referent); // Optional 'precedent' is appended as an extra edge, to force ordering. FastLockNode* shared_lock(Node* obj); void shared_unlock(Node* box, Node* obj); @@ -876,6 +884,29 @@ class GraphKit : public Phase { Node* box_vector(Node* in, const TypeInstPtr* vbox_type, BasicType elem_bt, int num_elem, bool deoptimize_on_exception = false); Node* unbox_vector(Node* in, const TypeInstPtr* vbox_type, BasicType elem_bt, int num_elem); Node* vector_shift_count(Node* cnt, int shift_op, BasicType bt, int num_elem); + + // Helper class to support reverting to a previous parsing state. + // When an intrinsic makes changes before bailing out, it's necessary to restore the graph + // as it was. See JDK-8359344 for what can happen wrong. It's also not always possible to + // bailout before making changes because the bailing out decision might depend on new nodes + // (their types, for instance). + // + // So, if an intrinsic might cause this situation, one must start by saving the state in a + // SavedState by constructing it, and the state will be restored on destruction. If the + // intrinsic is not bailing out, one need to call discard to prevent restoring the old state. + class SavedState : public StackObj { + GraphKit* _kit; + int _sp; + JVMState* _jvms; + SafePointNode* _map; + Unique_Node_List _ctrl_succ; + bool _discarded; + + public: + SavedState(GraphKit*); + ~SavedState(); + void discard(); + }; }; // Helper class to support building of control flow branches. Upon diff --git a/src/hotspot/share/opto/idealGraphPrinter.cpp b/src/hotspot/share/opto/idealGraphPrinter.cpp index 5070a9f00e1..563a914ea5c 100644 --- a/src/hotspot/share/opto/idealGraphPrinter.cpp +++ b/src/hotspot/share/opto/idealGraphPrinter.cpp @@ -40,16 +40,60 @@ class PrintProperties { private: IdealGraphPrinter* _printer; + void print_alias_properties(Node* node); + void print_escape_properties(Node* node); public: PrintProperties(IdealGraphPrinter* printer) : _printer(printer) {} void print_node_properties(Node* node); + void print_node_details(Node* node); void print_lrg_properties(const LRG& lrg, const char* buffer); void print_property(int flag, const char* name); void print_property(int flag, const char* name, const char* val); void print_property(int flag, const char* name, int val); }; +void PrintProperties::print_alias_properties(Node* node) { + const TypePtr* adr_type = node->adr_type(); + Compile* C = _printer->C; + if (adr_type != nullptr && C->have_alias_type(adr_type)) { + Compile::AliasType* at = C->alias_type(adr_type); + if (at != nullptr) { + print_property(true, "alias_index", at->index()); + // The value of at->field(), if present, is already dumped in the + // "source"/"destination" properties. + const Type* element = at->element(); + if (element != nullptr) { + stringStream element_stream; + element->dump_on(&element_stream); + print_property(true, "alias_element", element_stream.freeze()); + } + print_property(at->is_rewritable(), "alias_is_rewritable"); + print_property(at->is_volatile(), "alias_is_volatile"); + print_property(at->general_index() != at->index(), "alias_general_index", at->general_index()); + } + } +} + +void PrintProperties::print_escape_properties(Node* node) { + // Dump escape analysis state for relevant nodes. + if (node->is_Allocate()) { + AllocateNode* alloc = node->as_Allocate(); + print_property(alloc->_is_scalar_replaceable, "is_scalar_replaceable"); + print_property(alloc->_is_non_escaping, "is_non_escaping"); + print_property(alloc->does_not_escape_thread(), "does_not_escape_thread"); + } + if (node->is_SafePoint() && node->as_SafePoint()->has_ea_local_in_scope()) { + print_property(true, "has_ea_local_in_scope"); + } + if (node->is_CallJava() && node->as_CallJava()->arg_escape()) { + print_property(true, "arg_escape"); + } + if (node->is_Initialize() && node->as_Initialize()->does_not_escape()) { + print_property(true, "does_not_escape"); + } +} + void PrintProperties::print_node_properties(Node* node) { const jushort flags = node->flags(); print_property((flags & Node::Flag_is_Copy), "is_copy"); @@ -75,6 +119,15 @@ void PrintProperties::print_node_properties(Node* node) { } } +void PrintProperties::print_node_details(Node* node) { + print_alias_properties(node); + + print_escape_properties(node); + + print_property(node->is_block_proj() != nullptr, "is_block_proj"); + print_property(node->is_block_start(), "is_block_start"); +} + void PrintProperties::print_lrg_properties(const LRG &lrg, const char *buffer) { print_property(true, "mask", buffer); print_property(true, "mask_size", lrg.mask_size()); @@ -651,61 +704,7 @@ void IdealGraphPrinter::visit_node(Node* n, bool edges) { assert(s2.size() < sizeof(buffer), "size in range"); print_prop("dump_spec", buffer); - const TypePtr* adr_type = node->adr_type(); - if (adr_type != nullptr && C->have_alias_type(adr_type)) { - Compile::AliasType* at = C->alias_type(adr_type); - if (at != nullptr) { - print_prop("alias_index", at->index()); - // The value of at->field(), if present, is already dumped in the - // "source"/"destination" properties. - const Type* element = at->element(); - if (element != nullptr) { - stringStream element_stream; - element->dump_on(&element_stream); - print_prop("alias_element", element_stream.freeze()); - } - if (at->is_rewritable()) { - print_prop("alias_is_rewritable", "true"); - } - if (at->is_volatile()) { - print_prop("alias_is_volatile", "true"); - } - if (at->general_index() != at->index()) { - print_prop("alias_general_index", at->general_index()); - } - } - } - - if (node->is_block_proj()) { - print_prop("is_block_proj", "true"); - } - - if (node->is_block_start()) { - print_prop("is_block_start", "true"); - } - - // Dump escape analysis state for relevant nodes. - if (node->is_Allocate()) { - AllocateNode* alloc = node->as_Allocate(); - if (alloc->_is_scalar_replaceable) { - print_prop("is_scalar_replaceable", "true"); - } - if (alloc->_is_non_escaping) { - print_prop("is_non_escaping", "true"); - } - if (alloc->does_not_escape_thread()) { - print_prop("does_not_escape_thread", "true"); - } - } - if (node->is_SafePoint() && node->as_SafePoint()->has_ea_local_in_scope()) { - print_prop("has_ea_local_in_scope", "true"); - } - if (node->is_CallJava() && node->as_CallJava()->arg_escape()) { - print_prop("arg_escape", "true"); - } - if (node->is_Initialize() && node->as_Initialize()->does_not_escape()) { - print_prop("does_not_escape", "true"); - } + print_node.print_node_details(node); const char *short_name = "short_name"; if (strcmp(node->Name(), "Parm") == 0 && node->as_Proj()->_con >= TypeFunc::Parms) { diff --git a/src/hotspot/share/opto/idealKit.hpp b/src/hotspot/share/opto/idealKit.hpp index 518c3b92136..a2ac9e204ba 100644 --- a/src/hotspot/share/opto/idealKit.hpp +++ b/src/hotspot/share/opto/idealKit.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -200,7 +200,7 @@ class IdealKit: public StackObj { // Raw address should be transformed regardless 'delay_transform' flag // to produce canonical form CastX2P(offset). - Node* AddP(Node *base, Node *ptr, Node *off) { return _gvn.transform(new AddPNode(base, ptr, off)); } + Node* AddP(Node* base, Node* ptr, Node* off) { return _gvn.transform(AddPNode::make_with_base(base, ptr, off)); } Node* CmpP(Node* l, Node* r) { return transform(new CmpPNode(l, r)); } #ifdef _LP64 diff --git a/src/hotspot/share/opto/ifnode.cpp b/src/hotspot/share/opto/ifnode.cpp index 762791d467d..ad8f0ced6ea 100644 --- a/src/hotspot/share/opto/ifnode.cpp +++ b/src/hotspot/share/opto/ifnode.cpp @@ -132,7 +132,7 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) { cmp2->set_req(2,con2); const Type *t = cmp2->Value(igvn); // This compare is dead, so whack it! - igvn->remove_dead_node(cmp2); + igvn->remove_dead_node(cmp2, PhaseIterGVN::NodeOrigin::Speculative); if( !t->singleton() ) return nullptr; // No intervening control, like a simple Call @@ -443,7 +443,7 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) { } l -= uses_found; // we deleted 1 or more copies of this edge } - igvn->remove_dead_node(p); + igvn->remove_dead_node(p, PhaseIterGVN::NodeOrigin::Graph); } // Force the original merge dead @@ -455,14 +455,14 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) { r->set_req(0, nullptr); } else { assert(u->outcnt() == 0, "only dead users"); - igvn->remove_dead_node(u); + igvn->remove_dead_node(u, PhaseIterGVN::NodeOrigin::Graph); } l -= 1; } - igvn->remove_dead_node(r); + igvn->remove_dead_node(r, PhaseIterGVN::NodeOrigin::Graph); // Now remove the bogus extra edges used to keep things alive - igvn->remove_dead_node( hook ); + igvn->remove_dead_node(hook, PhaseIterGVN::NodeOrigin::Speculative); // Must return either the original node (now dead) or a new node // (Do not return a top here, since that would break the uniqueness of top.) @@ -905,6 +905,7 @@ bool IfNode::fold_compares_helper(IfProjNode* proj, IfProjNode* success, IfProjN IfNode* dom_iff = proj->in(0)->as_If(); BoolNode* dom_bool = dom_iff->in(1)->as_Bool(); Node* lo = dom_iff->in(1)->in(1)->in(2); + Node* orig_lo = lo; Node* hi = this_cmp->in(2); Node* n = this_cmp->in(1); IfProjNode* otherproj = proj->other_if_proj(); @@ -916,6 +917,7 @@ bool IfNode::fold_compares_helper(IfProjNode* proj, IfProjNode* success, IfProjN BoolTest::mask hi_test = this_bool->_test._test; BoolTest::mask cond = hi_test; + PhaseTransform::SpeculativeProgressGuard progress_guard(igvn); // convert: // // dom_bool = x {<,<=,>,>=} a @@ -1053,6 +1055,7 @@ bool IfNode::fold_compares_helper(IfProjNode* proj, IfProjNode* success, IfProjN // previous if determines the result of this if so // replace Bool with constant igvn->replace_input_of(this, 1, igvn->intcon(success->_con)); + progress_guard.commit(); return true; } } @@ -1087,11 +1090,14 @@ bool IfNode::fold_compares_helper(IfProjNode* proj, IfProjNode* success, IfProjN // min(limit, max(-2 + min_jint + 1, min_jint)) // = min(limit, min_jint) // = min_jint + if (lo != orig_lo && lo->outcnt() == 0) { + igvn->remove_dead_node(lo, PhaseIterGVN::NodeOrigin::Speculative); + } if (adjusted_val->outcnt() == 0) { - igvn->remove_dead_node(adjusted_val); + igvn->remove_dead_node(adjusted_val, PhaseIterGVN::NodeOrigin::Speculative); } if (adjusted_lim->outcnt() == 0) { - igvn->remove_dead_node(adjusted_lim); + igvn->remove_dead_node(adjusted_lim, PhaseIterGVN::NodeOrigin::Speculative); } igvn->C->record_for_post_loop_opts_igvn(this); return false; @@ -1103,6 +1109,7 @@ bool IfNode::fold_compares_helper(IfProjNode* proj, IfProjNode* success, IfProjN igvn->replace_input_of(dom_iff, 1, igvn->intcon(proj->_con)); igvn->replace_input_of(this, 1, newbool); + progress_guard.commit(); return true; } @@ -1592,11 +1599,11 @@ Node* IfNode::dominated_by(Node* prev_dom, PhaseIterGVN* igvn, bool prev_dom_not } } // End for each child of a projection - igvn->remove_dead_node(ifp); + igvn->remove_dead_node(ifp, PhaseIterGVN::NodeOrigin::Graph); } // End for each IfTrue/IfFalse child of If // Kill the IfNode - igvn->remove_dead_node(this); + igvn->remove_dead_node(this, PhaseIterGVN::NodeOrigin::Graph); // Must return either the original node (now dead) or a new node // (Do not return a top here, since that would break the uniqueness of top.) @@ -1758,7 +1765,7 @@ Node* IfNode::simple_subsuming(PhaseIterGVN* igvn) { } if (bol->outcnt() == 0) { - igvn->remove_dead_node(bol); // Kill the BoolNode. + igvn->remove_dead_node(bol, PhaseIterGVN::NodeOrigin::Graph); // Kill the BoolNode. } return this; } @@ -1903,7 +1910,7 @@ static IfNode* idealize_test(PhaseGVN* phase, IfNode* iff) { Node *prior = igvn->hash_find_insert(iff); if( prior ) { - igvn->remove_dead_node(iff); + igvn->remove_dead_node(iff, PhaseIterGVN::NodeOrigin::Graph); iff = (IfNode*)prior; } else { // Cannot call transform on it just yet diff --git a/src/hotspot/share/opto/intrinsicnode.cpp b/src/hotspot/share/opto/intrinsicnode.cpp index c3e003ad8d3..3e235738e0f 100644 --- a/src/hotspot/share/opto/intrinsicnode.cpp +++ b/src/hotspot/share/opto/intrinsicnode.cpp @@ -273,7 +273,7 @@ static const Type* bitshuffle_value(const TypeInteger* src_type, const TypeInteg // result.lo = 0 if (maskcon != -1L) { int bitcount = population_count(static_cast(bt == T_INT ? maskcon & 0xFFFFFFFFL : maskcon)); - hi = right_n_bits_typed(bitcount); + hi = right_n_bits(bitcount); lo = 0L; } else { // preserve originally assigned hi (MAX_INT/LONG) and lo (MIN_INT/LONG) values @@ -376,7 +376,7 @@ static const Type* bitshuffle_value(const TypeInteger* src_type, const TypeInteg // Rule 3: // We can further constrain the upper bound of bit compression if the number of bits // which can be set(one) is less than the maximum number of bits of integral type. - hi = MIN2(right_n_bits_typed(result_bit_width), hi); + hi = MIN2(right_n_bits(result_bit_width), hi); } } else { assert(opc == Op_ExpandBits, ""); diff --git a/src/hotspot/share/opto/lcm.cpp b/src/hotspot/share/opto/lcm.cpp index 53a503866fa..450d267e821 100644 --- a/src/hotspot/share/opto/lcm.cpp +++ b/src/hotspot/share/opto/lcm.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -306,8 +306,7 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo // cannot reason about it; is probably not implicit null exception } else { const TypePtr* tptr; - if ((UseCompressedOops && CompressedOops::shift() == 0) || - (UseCompressedClassPointers && CompressedKlassPointers::shift() == 0)) { + if ((UseCompressedOops && CompressedOops::shift() == 0) || CompressedKlassPointers::shift() == 0) { // 32-bits narrow oop can be the base of address expressions tptr = base->get_ptr_type(); } else { diff --git a/src/hotspot/share/opto/library_call.cpp b/src/hotspot/share/opto/library_call.cpp index 3627d06a87a..ff7bc2c10d3 100644 --- a/src/hotspot/share/opto/library_call.cpp +++ b/src/hotspot/share/opto/library_call.cpp @@ -568,6 +568,7 @@ bool LibraryCallKit::try_to_inline(int predicate) { case vmIntrinsics::_Reference_get0: return inline_reference_get0(); case vmIntrinsics::_Reference_refersTo0: return inline_reference_refersTo0(false); + case vmIntrinsics::_Reference_reachabilityFence: return inline_reference_reachabilityFence(); case vmIntrinsics::_PhantomReference_refersTo0: return inline_reference_refersTo0(true); case vmIntrinsics::_Reference_clear0: return inline_reference_clear0(false); case vmIntrinsics::_PhantomReference_clear0: return inline_reference_clear0(true); @@ -843,6 +844,22 @@ void LibraryCallKit::set_result(RegionNode* region, PhiNode* value) { assert(value->type()->basic_type() == result()->bottom_type()->basic_type(), "sanity"); } +RegionNode* LibraryCallKit::create_bailout() { + RegionNode* bailout = new RegionNode(1); + record_for_igvn(bailout); + return bailout; +} + +bool LibraryCallKit::check_bailout(RegionNode* bailout) { + if (bailout->req() > 1) { + bailout = _gvn.transform(bailout)->as_Region(); + Node* frame = _gvn.transform(new ParmNode(C->start(), TypeFunc::FramePtr)); + Node* halt = _gvn.transform(new HaltNode(bailout, frame, "unexpected guard failure in intrinsic")); + C->root()->add_req(halt); + } + return stopped(); +} + //------------------------------generate_guard--------------------------- // Helper function for generating guarded fast-slow graph structures. // The given 'test', if true, guards a slow path. If the test fails @@ -951,36 +968,19 @@ void LibraryCallKit::generate_string_range_check(Node* array, Node* offset, Node* count, bool char_count, - bool halt_on_oob) { + RegionNode* region) { if (stopped()) { return; // already stopped } - RegionNode* bailout = new RegionNode(1); - record_for_igvn(bailout); if (char_count) { // Convert char count to byte count count = _gvn.transform(new LShiftINode(count, intcon(1))); } - // Offset and count must not be negative - generate_negative_guard(offset, bailout, nullptr, halt_on_oob); - generate_negative_guard(count, bailout, nullptr, halt_on_oob); + generate_negative_guard(offset, region, nullptr, true); + generate_negative_guard(count, region, nullptr, true); // Offset + count must not exceed length of array - generate_limit_guard(offset, count, load_array_length(array), bailout, halt_on_oob); - - if (bailout->req() > 1) { - if (halt_on_oob) { - bailout = _gvn.transform(bailout)->as_Region(); - Node* frame = _gvn.transform(new ParmNode(C->start(), TypeFunc::FramePtr)); - Node* halt = _gvn.transform(new HaltNode(bailout, frame, "unexpected guard failure in intrinsic")); - C->root()->add_req(halt); - } else { - PreserveJVMState pjvms(this); - set_control(_gvn.transform(bailout)); - uncommon_trap(Deoptimization::Reason_intrinsic, - Deoptimization::Action_maybe_recompile); - } - } + generate_limit_guard(offset, count, load_array_length(array), region, true); } Node* LibraryCallKit::current_thread_helper(Node*& tls_output, ByteSize handle_offset, @@ -990,7 +990,7 @@ Node* LibraryCallKit::current_thread_helper(Node*& tls_output, ByteSize handle_o = TypeOopPtr::make_from_klass(thread_klass)->cast_to_ptr_type(TypePtr::NotNull); Node* thread = _gvn.transform(new ThreadLocalNode()); - Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(handle_offset)); + Node* p = off_heap_plus_addr(thread, in_bytes(handle_offset)); tls_output = thread; Node* thread_obj_handle @@ -1139,10 +1139,6 @@ bool LibraryCallKit::inline_array_equals(StrIntrinsicNode::ArgEnc ae) { //------------------------------inline_countPositives------------------------------ // int java.lang.StringCoding#countPositives0(byte[] ba, int off, int len) bool LibraryCallKit::inline_countPositives() { - if (too_many_traps(Deoptimization::Reason_intrinsic)) { - return false; - } - assert(callee()->signature()->size() == 3, "countPositives has 3 parameters"); // no receiver since it is static method Node* ba = argument(0); @@ -1150,8 +1146,9 @@ bool LibraryCallKit::inline_countPositives() { Node* len = argument(2); ba = must_be_not_null(ba, true); - generate_string_range_check(ba, offset, len, false, true); - if (stopped()) { + RegionNode* bailout = create_bailout(); + generate_string_range_check(ba, offset, len, false, bailout); + if (check_bailout(bailout)) { return true; } @@ -1283,9 +1280,6 @@ bool LibraryCallKit::inline_string_indexOf(StrIntrinsicNode::ArgEnc ae) { //-----------------------------inline_string_indexOfI----------------------- bool LibraryCallKit::inline_string_indexOfI(StrIntrinsicNode::ArgEnc ae) { - if (too_many_traps(Deoptimization::Reason_intrinsic)) { - return false; - } if (!Matcher::match_rule_supported(Op_StrIndexOf)) { return false; } @@ -1307,9 +1301,10 @@ bool LibraryCallKit::inline_string_indexOfI(StrIntrinsicNode::ArgEnc ae) { Node* tgt_start = array_element_address(tgt, intcon(0), T_BYTE); // Range checks - generate_string_range_check(src, src_offset, src_count, ae != StrIntrinsicNode::LL, true); - generate_string_range_check(tgt, intcon(0), tgt_count, ae == StrIntrinsicNode::UU, true); - if (stopped()) { + RegionNode* bailout = create_bailout(); + generate_string_range_check(src, src_offset, src_count, ae != StrIntrinsicNode::LL, bailout); + generate_string_range_check(tgt, intcon(0), tgt_count, ae == StrIntrinsicNode::UU, bailout); + if (check_bailout(bailout)) { return true; } @@ -1404,7 +1399,11 @@ bool LibraryCallKit::inline_string_indexOfChar(StrIntrinsicNode::ArgEnc ae) { Node* src_count = _gvn.transform(new SubINode(max, from_index)); // Range checks - generate_string_range_check(src, src_offset, src_count, ae == StrIntrinsicNode::U, true); + RegionNode* bailout = create_bailout(); + generate_string_range_check(src, src_offset, src_count, ae == StrIntrinsicNode::U, bailout); + if (check_bailout(bailout)) { + return true; + } // Check for int_ch >= 0 Node* int_ch_cmp = _gvn.transform(new CmpINode(int_ch, intcon(0))); @@ -1454,9 +1453,6 @@ bool LibraryCallKit::inline_string_indexOfChar(StrIntrinsicNode::ArgEnc ae) { // void StringLatin1.inflate0(byte[] src, int srcOff, char[] dst, int dstOff, int len) // void StringLatin1.inflate0(byte[] src, int srcOff, byte[] dst, int dstOff, int len) bool LibraryCallKit::inline_string_copy(bool compress) { - if (too_many_traps(Deoptimization::Reason_intrinsic)) { - return false; - } int nargs = 5; // 2 oops, 3 ints assert(callee()->signature()->size() == nargs, "string copy has 5 arguments"); @@ -1495,9 +1491,10 @@ bool LibraryCallKit::inline_string_copy(bool compress) { } // Range checks - generate_string_range_check(src, src_offset, length, convert_src, true); - generate_string_range_check(dst, dst_offset, length, convert_dst, true); - if (stopped()) { + RegionNode* bailout = create_bailout(); + generate_string_range_check(src, src_offset, length, convert_src, bailout); + generate_string_range_check(dst, dst_offset, length, convert_dst, bailout); + if (check_bailout(bailout)) { return true; } @@ -1545,12 +1542,10 @@ bool LibraryCallKit::inline_string_copy(bool compress) { #endif //_LP64 //------------------------inline_string_toBytesU-------------------------- -// public static byte[] StringUTF16.toBytes(char[] value, int off, int len) +// public static byte[] StringUTF16.toBytes0(char[] value, int off, int len) bool LibraryCallKit::inline_string_toBytesU() { - if (too_many_traps(Deoptimization::Reason_intrinsic)) { - return false; - } // Get the arguments. + assert(callee()->signature()->size() == 3, "character array encoder requires 3 arguments"); Node* value = argument(0); Node* offset = argument(1); Node* length = argument(2); @@ -1558,30 +1553,18 @@ bool LibraryCallKit::inline_string_toBytesU() { Node* newcopy = nullptr; // Set the original stack and the reexecute bit for the interpreter to reexecute - // the bytecode that invokes StringUTF16.toBytes() if deoptimization happens. + // the bytecode that invokes StringUTF16.toBytes0() if deoptimization happens. { PreserveReexecuteState preexecs(this); jvms()->set_should_reexecute(true); - // Check if a null path was taken unconditionally. - value = null_check(value); - - RegionNode* bailout = new RegionNode(1); - record_for_igvn(bailout); - - // Range checks - generate_negative_guard(offset, bailout); - generate_negative_guard(length, bailout); - generate_limit_guard(offset, length, load_array_length(value), bailout); + value = must_be_not_null(value, true); + RegionNode* bailout = create_bailout(); + generate_negative_guard(offset, bailout, nullptr, true); + generate_negative_guard(length, bailout, nullptr, true); + generate_limit_guard(offset, length, load_array_length(value), bailout, true); // Make sure that resulting byte[] length does not overflow Integer.MAX_VALUE - generate_limit_guard(length, intcon(0), intcon(max_jint/2), bailout); - - if (bailout->req() > 1) { - PreserveJVMState pjvms(this); - set_control(_gvn.transform(bailout)); - uncommon_trap(Deoptimization::Reason_intrinsic, - Deoptimization::Action_maybe_recompile); - } - if (stopped()) { + generate_limit_guard(length, intcon(0), intcon(max_jint/2), bailout, true); + if (check_bailout(bailout)) { return true; } @@ -1640,12 +1623,9 @@ bool LibraryCallKit::inline_string_toBytesU() { } //------------------------inline_string_getCharsU-------------------------- -// public void StringUTF16.getChars(byte[] src, int srcBegin, int srcEnd, char dst[], int dstBegin) +// public void StringUTF16.getChars0(byte[] src, int srcBegin, int srcEnd, char dst[], int dstBegin) bool LibraryCallKit::inline_string_getCharsU() { - if (too_many_traps(Deoptimization::Reason_intrinsic)) { - return false; - } - + assert(callee()->signature()->size() == 5, "StringUTF16.getChars0() has 5 arguments"); // Get the arguments. Node* src = argument(0); Node* src_begin = argument(1); @@ -1658,8 +1638,8 @@ bool LibraryCallKit::inline_string_getCharsU() { AllocateArrayNode* alloc = tightly_coupled_allocation(dst); // Check if a null path was taken unconditionally. - src = null_check(src); - dst = null_check(dst); + src = must_be_not_null(src, true); + dst = must_be_not_null(dst, true); if (stopped()) { return true; } @@ -1669,51 +1649,50 @@ bool LibraryCallKit::inline_string_getCharsU() { src_begin = _gvn.transform(new LShiftINode(src_begin, intcon(1))); // Range checks - generate_string_range_check(src, src_begin, length, true); - generate_string_range_check(dst, dst_begin, length, false); - if (stopped()) { + RegionNode* bailout = create_bailout(); + generate_string_range_check(src, src_begin, length, true, bailout); + generate_string_range_check(dst, dst_begin, length, false, bailout); + if (check_bailout(bailout)) { return true; } - if (!stopped()) { - // Calculate starting addresses. - Node* src_start = array_element_address(src, src_begin, T_BYTE); - Node* dst_start = array_element_address(dst, dst_begin, T_CHAR); + // Calculate starting addresses. + Node* src_start = array_element_address(src, src_begin, T_BYTE); + Node* dst_start = array_element_address(dst, dst_begin, T_CHAR); - // Check if array addresses are aligned to HeapWordSize - const TypeInt* tsrc = gvn().type(src_begin)->is_int(); - const TypeInt* tdst = gvn().type(dst_begin)->is_int(); - bool aligned = tsrc->is_con() && ((arrayOopDesc::base_offset_in_bytes(T_BYTE) + tsrc->get_con() * type2aelembytes(T_BYTE)) % HeapWordSize == 0) && - tdst->is_con() && ((arrayOopDesc::base_offset_in_bytes(T_CHAR) + tdst->get_con() * type2aelembytes(T_CHAR)) % HeapWordSize == 0); + // Check if array addresses are aligned to HeapWordSize + const TypeInt* tsrc = gvn().type(src_begin)->is_int(); + const TypeInt* tdst = gvn().type(dst_begin)->is_int(); + bool aligned = tsrc->is_con() && ((arrayOopDesc::base_offset_in_bytes(T_BYTE) + tsrc->get_con() * type2aelembytes(T_BYTE)) % HeapWordSize == 0) && + tdst->is_con() && ((arrayOopDesc::base_offset_in_bytes(T_CHAR) + tdst->get_con() * type2aelembytes(T_CHAR)) % HeapWordSize == 0); - // Figure out which arraycopy runtime method to call (disjoint, uninitialized). - const char* copyfunc_name = "arraycopy"; - address copyfunc_addr = StubRoutines::select_arraycopy_function(T_CHAR, aligned, true, copyfunc_name, true); - Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, - OptoRuntime::fast_arraycopy_Type(), - copyfunc_addr, copyfunc_name, TypeRawPtr::BOTTOM, - src_start, dst_start, ConvI2X(length) XTOP); - // Do not let reads from the cloned object float above the arraycopy. - if (alloc != nullptr) { - if (alloc->maybe_set_complete(&_gvn)) { - // "You break it, you buy it." - InitializeNode* init = alloc->initialization(); - assert(init->is_complete(), "we just did this"); - init->set_complete_with_arraycopy(); - assert(dst->is_CheckCastPP(), "sanity"); - assert(dst->in(0)->in(0) == init, "dest pinned"); - } - // Do not let stores that initialize this object be reordered with - // a subsequent store that would make this object accessible by - // other threads. - // Record what AllocateNode this StoreStore protects so that - // escape analysis can go from the MemBarStoreStoreNode to the - // AllocateNode and eliminate the MemBarStoreStoreNode if possible - // based on the escape status of the AllocateNode. - insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress)); - } else { - insert_mem_bar(Op_MemBarCPUOrder); + // Figure out which arraycopy runtime method to call (disjoint, uninitialized). + const char* copyfunc_name = "arraycopy"; + address copyfunc_addr = StubRoutines::select_arraycopy_function(T_CHAR, aligned, true, copyfunc_name, true); + Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, + OptoRuntime::fast_arraycopy_Type(), + copyfunc_addr, copyfunc_name, TypeRawPtr::BOTTOM, + src_start, dst_start, ConvI2X(length) XTOP); + // Do not let reads from the cloned object float above the arraycopy. + if (alloc != nullptr) { + if (alloc->maybe_set_complete(&_gvn)) { + // "You break it, you buy it." + InitializeNode* init = alloc->initialization(); + assert(init->is_complete(), "we just did this"); + init->set_complete_with_arraycopy(); + assert(dst->is_CheckCastPP(), "sanity"); + assert(dst->in(0)->in(0) == init, "dest pinned"); } + // Do not let stores that initialize this object be reordered with + // a subsequent store that would make this object accessible by + // other threads. + // Record what AllocateNode this StoreStore protects so that + // escape analysis can go from the MemBarStoreStoreNode to the + // AllocateNode and eliminate the MemBarStoreStoreNode if possible + // based on the escape status of the AllocateNode. + insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress)); + } else { + insert_mem_bar(Op_MemBarCPUOrder); } C->set_has_split_ifs(true); // Has chance for split-if optimization @@ -1725,9 +1704,16 @@ bool LibraryCallKit::inline_string_getCharsU() { // static void StringUTF16.putChar(byte[] val, int index, int c) // static char StringUTF16.getChar(byte[] val, int index) bool LibraryCallKit::inline_string_char_access(bool is_store) { + Node* ch; + if (is_store) { + assert(callee()->signature()->size() == 3, "StringUTF16.putChar() has 3 arguments"); + ch = argument(2); + } else { + assert(callee()->signature()->size() == 2, "StringUTF16.getChar() has 2 arguments"); + ch = nullptr; + } Node* value = argument(0); Node* index = argument(1); - Node* ch = is_store ? argument(2) : nullptr; // This intrinsic accesses byte[] array as char[] array. Computing the offsets // correctly requires matched array shapes. @@ -1834,61 +1820,17 @@ bool LibraryCallKit::runtime_math(const TypeFunc* call_type, address funcAddr, c //------------------------------inline_math_pow----------------------------- bool LibraryCallKit::inline_math_pow() { + Node* base = argument(0); Node* exp = argument(2); - const TypeD* d = _gvn.type(exp)->isa_double_constant(); - if (d != nullptr) { - if (d->getd() == 2.0) { - // Special case: pow(x, 2.0) => x * x - Node* base = argument(0); - set_result(_gvn.transform(new MulDNode(base, base))); - return true; - } else if (d->getd() == 0.5 && Matcher::match_rule_supported(Op_SqrtD)) { - // Special case: pow(x, 0.5) => sqrt(x) - Node* base = argument(0); - Node* zero = _gvn.zerocon(T_DOUBLE); - RegionNode* region = new RegionNode(3); - Node* phi = new PhiNode(region, Type::DOUBLE); - - Node* cmp = _gvn.transform(new CmpDNode(base, zero)); - // According to the API specs, pow(-0.0, 0.5) = 0.0 and sqrt(-0.0) = -0.0. - // So pow(-0.0, 0.5) shouldn't be replaced with sqrt(-0.0). - // -0.0/+0.0 are both excluded since floating-point comparison doesn't distinguish -0.0 from +0.0. - Node* test = _gvn.transform(new BoolNode(cmp, BoolTest::le)); - - Node* if_pow = generate_slow_guard(test, nullptr); - Node* value_sqrt = _gvn.transform(new SqrtDNode(C, control(), base)); - phi->init_req(1, value_sqrt); - region->init_req(1, control()); - - if (if_pow != nullptr) { - set_control(if_pow); - address target = StubRoutines::dpow() != nullptr ? StubRoutines::dpow() : - CAST_FROM_FN_PTR(address, SharedRuntime::dpow); - const TypePtr* no_memory_effects = nullptr; - Node* trig = make_runtime_call(RC_LEAF, OptoRuntime::Math_DD_D_Type(), target, "POW", - no_memory_effects, base, top(), exp, top()); - Node* value_pow = _gvn.transform(new ProjNode(trig, TypeFunc::Parms+0)); -#ifdef ASSERT - Node* value_top = _gvn.transform(new ProjNode(trig, TypeFunc::Parms+1)); - assert(value_top == top(), "second value must be top"); -#endif - phi->init_req(2, value_pow); - region->init_req(2, _gvn.transform(new ProjNode(trig, TypeFunc::Control))); - } - - C->set_has_split_ifs(true); // Has chance for split-if optimization - set_control(_gvn.transform(region)); - record_for_igvn(region); - set_result(_gvn.transform(phi)); - - return true; - } - } - - return StubRoutines::dpow() != nullptr ? - runtime_math(OptoRuntime::Math_DD_D_Type(), StubRoutines::dpow(), "dpow") : - runtime_math(OptoRuntime::Math_DD_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dpow), "POW"); + CallNode* pow = new PowDNode(C, base, exp); + set_predefined_input_for_runtime_call(pow); + pow = _gvn.transform(pow)->as_CallLeafPure(); + set_predefined_output_for_runtime_call(pow); + Node* result = _gvn.transform(new ProjNode(pow, TypeFunc::Parms + 0)); + record_for_igvn(pow); + set_result(result); + return true; } //------------------------------inline_math_native----------------------------- @@ -2176,7 +2118,7 @@ Node* LibraryCallKit::make_unsafe_address(Node*& base, Node* offset, BasicType t Node* uncasted_base = base; int kind = classify_unsafe_addr(uncasted_base, offset, type); if (kind == Type::RawPtr) { - return basic_plus_adr(top(), uncasted_base, offset); + return off_heap_plus_addr(uncasted_base, offset); } else if (kind == Type::AnyPtr) { assert(base == uncasted_base, "unexpected base change"); if (can_cast) { @@ -2196,13 +2138,13 @@ Node* LibraryCallKit::make_unsafe_address(Node*& base, Node* offset, BasicType t base = null_assert(base); Node* raw_base = _gvn.transform(new CastX2PNode(offset)); offset = MakeConX(0); - return basic_plus_adr(top(), raw_base, offset); + return off_heap_plus_addr(raw_base, offset); } } // We don't know if it's an on heap or off heap access. Fall back // to raw memory access. Node* raw = _gvn.transform(new CheckCastPPNode(control(), base, TypeRawPtr::BOTTOM)); - return basic_plus_adr(top(), raw, offset); + return off_heap_plus_addr(raw, offset); } else { assert(base == uncasted_base, "unexpected base change"); // We know it's an on heap access so base can't be null @@ -2393,47 +2335,6 @@ DecoratorSet LibraryCallKit::mo_decorator_for_access_kind(AccessKind kind) { } } -LibraryCallKit::SavedState::SavedState(LibraryCallKit* kit) : - _kit(kit), - _sp(kit->sp()), - _jvms(kit->jvms()), - _map(kit->clone_map()), - _discarded(false) -{ - for (DUIterator_Fast imax, i = kit->control()->fast_outs(imax); i < imax; i++) { - Node* out = kit->control()->fast_out(i); - if (out->is_CFG()) { - _ctrl_succ.push(out); - } - } -} - -LibraryCallKit::SavedState::~SavedState() { - if (_discarded) { - _kit->destruct_map_clone(_map); - return; - } - _kit->jvms()->set_map(_map); - _kit->jvms()->set_sp(_sp); - _map->set_jvms(_kit->jvms()); - _kit->set_map(_map); - _kit->set_sp(_sp); - for (DUIterator_Fast imax, i = _kit->control()->fast_outs(imax); i < imax; i++) { - Node* out = _kit->control()->fast_out(i); - if (out->is_CFG() && out->in(0) == _kit->control() && out != _kit->map() && !_ctrl_succ.member(out)) { - _kit->_gvn.hash_delete(out); - out->set_req(0, _kit->C->top()); - _kit->C->record_for_igvn(out); - --i; --imax; - _kit->_gvn.hash_find_insert(out); - } - } -} - -void LibraryCallKit::SavedState::discard() { - _discarded = true; -} - bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, const AccessKind kind, const bool unaligned) { if (callee()->is_static()) return false; // caller must have the capability! DecoratorSet decorators = C2_UNSAFE_ACCESS; @@ -2900,7 +2801,7 @@ bool LibraryCallKit::inline_unsafe_fence(vmIntrinsics::ID id) { insert_mem_bar(Op_StoreStoreFence); return true; case vmIntrinsics::_fullFence: - insert_mem_bar(Op_MemBarVolatile); + insert_mem_bar(Op_MemBarFull); return true; default: fatal_unexpected_iid(id); @@ -3018,7 +2919,7 @@ bool LibraryCallKit::inline_unsafe_allocate() { // Note: The argument might still be an illegal value like // Serializable.class or Object[].class. The runtime will handle it. // But we must make an explicit check for initialization. - Node* insp = basic_plus_adr(top(), kls, in_bytes(InstanceKlass::init_state_offset())); + Node* insp = off_heap_plus_addr(kls, in_bytes(InstanceKlass::init_state_offset())); // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler // can generate code to load it as unsigned byte. Node* inst = make_load(nullptr, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::acquire); @@ -3062,26 +2963,27 @@ bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* func // slow path: runtime call // } bool LibraryCallKit::inline_native_vthread_start_transition(address funcAddr, const char* funcName, bool is_final_transition) { - Node* vt_oop = _gvn.transform(must_be_not_null(argument(0), true)); // VirtualThread this argument + Node* vt_oop = must_be_not_null(argument(0), true); // VirtualThread this argument IdealKit ideal(this); Node* thread = ideal.thread(); - Node* jt_addr = basic_plus_adr(top(), thread, in_bytes(JavaThread::is_in_vthread_transition_offset())); + Node* jt_addr = off_heap_plus_addr(thread, in_bytes(JavaThread::is_in_vthread_transition_offset())); Node* vt_addr = basic_plus_adr(vt_oop, java_lang_Thread::is_in_vthread_transition_offset()); access_store_at(nullptr, jt_addr, _gvn.type(jt_addr)->is_ptr(), ideal.ConI(1), TypeInt::BOOL, T_BOOLEAN, IN_NATIVE | MO_UNORDERED); access_store_at(nullptr, vt_addr, _gvn.type(vt_addr)->is_ptr(), ideal.ConI(1), TypeInt::BOOL, T_BOOLEAN, IN_NATIVE | MO_UNORDERED); - insert_mem_bar(Op_MemBarVolatile); + insert_mem_bar(Op_MemBarStoreLoad); ideal.sync_kit(this); Node* global_disable_addr = makecon(TypeRawPtr::make((address)MountUnmountDisabler::global_vthread_transition_disable_count_address())); Node* global_disable = ideal.load(ideal.ctrl(), global_disable_addr, TypeInt::INT, T_INT, Compile::AliasIdxRaw, true /*require_atomic_access*/); Node* vt_disable_addr = basic_plus_adr(vt_oop, java_lang_Thread::vthread_transition_disable_count_offset()); - Node* vt_disable = ideal.load(ideal.ctrl(), vt_disable_addr, TypeInt::INT, T_INT, Compile::AliasIdxRaw, true /*require_atomic_access*/); + const TypePtr* vt_disable_addr_t = _gvn.type(vt_disable_addr)->is_ptr(); + Node* vt_disable = ideal.load(ideal.ctrl(), vt_disable_addr, TypeInt::INT, T_INT, C->get_alias_index(vt_disable_addr_t), true /*require_atomic_access*/); Node* disabled = _gvn.transform(new AddINode(global_disable, vt_disable)); ideal.if_then(disabled, BoolTest::ne, ideal.ConI(0)); { sync_kit(ideal); - Node* is_mount = is_final_transition ? ideal.ConI(0) : _gvn.transform(argument(1)); + Node* is_mount = is_final_transition ? ideal.ConI(0) : argument(1); const TypeFunc* tf = OptoRuntime::vthread_transition_Type(); make_runtime_call(RC_NO_LEAF, tf, funcAddr, funcName, TypePtr::BOTTOM, vt_oop, is_mount); ideal.sync_kit(this); @@ -3093,7 +2995,7 @@ bool LibraryCallKit::inline_native_vthread_start_transition(address funcAddr, co } bool LibraryCallKit::inline_native_vthread_end_transition(address funcAddr, const char* funcName, bool is_first_transition) { - Node* vt_oop = _gvn.transform(must_be_not_null(argument(0), true)); // VirtualThread this argument + Node* vt_oop = must_be_not_null(argument(0), true); // VirtualThread this argument IdealKit ideal(this); Node* _notify_jvmti_addr = makecon(TypeRawPtr::make((address)MountUnmountDisabler::notify_jvmti_events_address())); @@ -3101,13 +3003,13 @@ bool LibraryCallKit::inline_native_vthread_end_transition(address funcAddr, cons ideal.if_then(_notify_jvmti, BoolTest::eq, ideal.ConI(1)); { sync_kit(ideal); - Node* is_mount = is_first_transition ? ideal.ConI(1) : _gvn.transform(argument(1)); + Node* is_mount = is_first_transition ? ideal.ConI(1) : argument(1); const TypeFunc* tf = OptoRuntime::vthread_transition_Type(); make_runtime_call(RC_NO_LEAF, tf, funcAddr, funcName, TypePtr::BOTTOM, vt_oop, is_mount); ideal.sync_kit(this); } ideal.else_(); { Node* thread = ideal.thread(); - Node* jt_addr = basic_plus_adr(top(), thread, in_bytes(JavaThread::is_in_vthread_transition_offset())); + Node* jt_addr = off_heap_plus_addr(thread, in_bytes(JavaThread::is_in_vthread_transition_offset())); Node* vt_addr = basic_plus_adr(vt_oop, java_lang_Thread::is_in_vthread_transition_offset()); sync_kit(ideal); @@ -3132,8 +3034,8 @@ bool LibraryCallKit::inline_native_notify_jvmti_sync() { { // unconditionally update the is_disable_suspend bit in current JavaThread Node* thread = ideal.thread(); - Node* arg = _gvn.transform(argument(0)); // argument for notification - Node* addr = basic_plus_adr(top(), thread, in_bytes(JavaThread::is_disable_suspend_offset())); + Node* arg = argument(0); // argument for notification + Node* addr = off_heap_plus_addr(thread, in_bytes(JavaThread::is_disable_suspend_offset())); const TypePtr *addr_type = _gvn.type(addr)->isa_ptr(); sync_kit(ideal); @@ -3212,7 +3114,7 @@ bool LibraryCallKit::inline_native_classID() { ideal.set(result, _gvn.transform(new AddLNode(array_kls_trace_id, longcon(1)))); } __ else_(); { // void class case - ideal.set(result, _gvn.transform(longcon(LAST_TYPE_ID + 1))); + ideal.set(result, longcon(LAST_TYPE_ID + 1)); } __ end_if(); Node* signaled_flag_address = makecon(TypeRawPtr::make(JfrIntrinsicSupport::signal_address())); @@ -3240,12 +3142,12 @@ bool LibraryCallKit::inline_native_jvm_commit() { // TLS. Node* tls_ptr = _gvn.transform(new ThreadLocalNode()); // Jfr java buffer. - Node* java_buffer_offset = _gvn.transform(new AddPNode(top(), tls_ptr, _gvn.transform(MakeConX(in_bytes(JAVA_BUFFER_OFFSET_JFR))))); + Node* java_buffer_offset = _gvn.transform(AddPNode::make_off_heap(tls_ptr, MakeConX(in_bytes(JAVA_BUFFER_OFFSET_JFR)))); Node* java_buffer = _gvn.transform(new LoadPNode(control(), input_memory_state, java_buffer_offset, TypePtr::BOTTOM, TypeRawPtr::NOTNULL, MemNode::unordered)); - Node* java_buffer_pos_offset = _gvn.transform(new AddPNode(top(), java_buffer, _gvn.transform(MakeConX(in_bytes(JFR_BUFFER_POS_OFFSET))))); + Node* java_buffer_pos_offset = _gvn.transform(AddPNode::make_off_heap(java_buffer, MakeConX(in_bytes(JFR_BUFFER_POS_OFFSET)))); // Load the current value of the notified field in the JfrThreadLocal. - Node* notified_offset = basic_plus_adr(top(), tls_ptr, in_bytes(NOTIFY_OFFSET_JFR)); + Node* notified_offset = off_heap_plus_addr(tls_ptr, in_bytes(NOTIFY_OFFSET_JFR)); Node* notified = make_load(control(), notified_offset, TypeInt::BOOL, T_BOOLEAN, MemNode::unordered); // Test for notification. @@ -3264,7 +3166,7 @@ bool LibraryCallKit::inline_native_jvm_commit() { // Iff notified, the return address of the commit method is the current position of the backing java buffer. This is used to reset the event writer. Node* current_pos_X = _gvn.transform(new LoadXNode(control(), input_memory_state, java_buffer_pos_offset, TypeRawPtr::NOTNULL, TypeX_X, MemNode::unordered)); // Convert the machine-word to a long. - Node* current_pos = _gvn.transform(ConvX2L(current_pos_X)); + Node* current_pos = ConvX2L(current_pos_X); // False branch, not notified. Node* not_notified = _gvn.transform(new IfFalseNode(iff_notified)); @@ -3274,7 +3176,7 @@ bool LibraryCallKit::inline_native_jvm_commit() { // Arg is the next position as a long. Node* arg = argument(0); // Convert long to machine-word. - Node* next_pos_X = _gvn.transform(ConvL2X(arg)); + Node* next_pos_X = ConvL2X(arg); // Store the next_position to the underlying jfr java buffer. store_to_memory(control(), java_buffer_pos_offset, next_pos_X, LP64_ONLY(T_LONG) NOT_LP64(T_INT), MemNode::release); @@ -3283,9 +3185,9 @@ bool LibraryCallKit::inline_native_jvm_commit() { set_all_memory(commit_memory); // Now load the flags from off the java buffer and decide if the buffer is a lease. If so, it needs to be returned post-commit. - Node* java_buffer_flags_offset = _gvn.transform(new AddPNode(top(), java_buffer, _gvn.transform(MakeConX(in_bytes(JFR_BUFFER_FLAGS_OFFSET))))); + Node* java_buffer_flags_offset = _gvn.transform(AddPNode::make_off_heap(java_buffer, MakeConX(in_bytes(JFR_BUFFER_FLAGS_OFFSET)))); Node* flags = make_load(control(), java_buffer_flags_offset, TypeInt::UBYTE, T_BYTE, MemNode::unordered); - Node* lease_constant = _gvn.transform(_gvn.intcon(4)); + Node* lease_constant = _gvn.intcon(4); // And flags with lease constant. Node* lease = _gvn.transform(new AndINode(flags, lease_constant)); @@ -3322,7 +3224,7 @@ bool LibraryCallKit::inline_native_jvm_commit() { lease_compare_rgn->init_req(_true_path, call_return_lease_control); lease_compare_rgn->init_req(_false_path, not_lease); - lease_compare_mem->init_req(_true_path, _gvn.transform(reset_memory())); + lease_compare_mem->init_req(_true_path, reset_memory()); lease_compare_mem->init_req(_false_path, commit_memory); lease_compare_io->init_req(_true_path, i_o()); @@ -3415,7 +3317,7 @@ bool LibraryCallKit::inline_native_getEventWriter() { Node* tls_ptr = _gvn.transform(new ThreadLocalNode()); // Load the address of java event writer jobject handle from the jfr_thread_local structure. - Node* jobj_ptr = basic_plus_adr(top(), tls_ptr, in_bytes(THREAD_LOCAL_WRITER_OFFSET_JFR)); + Node* jobj_ptr = off_heap_plus_addr(tls_ptr, in_bytes(THREAD_LOCAL_WRITER_OFFSET_JFR)); // Load the eventwriter jobject handle. Node* jobj = make_load(control(), jobj_ptr, TypeRawPtr::BOTTOM, T_ADDRESS, MemNode::unordered); @@ -3480,10 +3382,10 @@ bool LibraryCallKit::inline_native_getEventWriter() { IN_HEAP | MO_UNORDERED | C2_MISMATCHED | C2_CONTROL_DEPENDENT_LOAD); // Mask off the excluded information from the epoch. - Node * vthread_is_excluded = _gvn.transform(new AndINode(vthread_epoch_raw, _gvn.transform(excluded_mask))); + Node * vthread_is_excluded = _gvn.transform(new AndINode(vthread_epoch_raw, excluded_mask)); // Branch on excluded to conditionalize updating the epoch for the virtual thread. - Node* is_excluded_cmp = _gvn.transform(new CmpINode(vthread_is_excluded, _gvn.transform(excluded_mask))); + Node* is_excluded_cmp = _gvn.transform(new CmpINode(vthread_is_excluded, excluded_mask)); Node* test_not_excluded = _gvn.transform(new BoolNode(is_excluded_cmp, BoolTest::ne)); IfNode* iff_not_excluded = create_and_map_if(control(), test_not_excluded, PROB_MAX, COUNT_UNKNOWN); @@ -3495,7 +3397,7 @@ bool LibraryCallKit::inline_native_getEventWriter() { set_control(included); // Get epoch value. - Node* epoch = _gvn.transform(new AndINode(vthread_epoch_raw, _gvn.transform(epoch_mask))); + Node* epoch = _gvn.transform(new AndINode(vthread_epoch_raw, epoch_mask)); // Load the current epoch generation. The value is unsigned 16-bit, so we type it as T_CHAR. Node* epoch_generation_address = makecon(TypeRawPtr::make(JfrIntrinsicSupport::epoch_generation_address())); @@ -3533,7 +3435,7 @@ bool LibraryCallKit::inline_native_getEventWriter() { // Update control and phi nodes. epoch_compare_rgn->init_req(_true_path, call_write_checkpoint_control); epoch_compare_rgn->init_req(_false_path, epoch_is_equal); - epoch_compare_mem->init_req(_true_path, _gvn.transform(reset_memory())); + epoch_compare_mem->init_req(_true_path, reset_memory()); epoch_compare_mem->init_req(_false_path, input_memory_state); epoch_compare_io->init_req(_true_path, i_o()); epoch_compare_io->init_req(_false_path, input_io_state); @@ -3574,11 +3476,11 @@ bool LibraryCallKit::inline_native_getEventWriter() { vthread_compare_mem->init_req(_false_path, input_memory_state); vthread_compare_io->init_req(_true_path, _gvn.transform(exclude_compare_io)); vthread_compare_io->init_req(_false_path, input_io_state); - tid->init_req(_true_path, _gvn.transform(vthread_tid)); - tid->init_req(_false_path, _gvn.transform(thread_obj_tid)); - exclusion->init_req(_true_path, _gvn.transform(vthread_is_excluded)); - exclusion->init_req(_false_path, _gvn.transform(threadObj_is_excluded)); - pinVirtualThread->init_req(_true_path, _gvn.transform(continuation_support)); + tid->init_req(_true_path, vthread_tid); + tid->init_req(_false_path, thread_obj_tid); + exclusion->init_req(_true_path, vthread_is_excluded); + exclusion->init_req(_false_path, threadObj_is_excluded); + pinVirtualThread->init_req(_true_path, continuation_support); pinVirtualThread->init_req(_false_path, _gvn.intcon(0)); // Update branch state. @@ -3592,7 +3494,7 @@ bool LibraryCallKit::inline_native_getEventWriter() { ciInstanceKlass* const instklass_EventWriter = klass_EventWriter->as_instance_klass(); const TypeKlassPtr* const aklass = TypeKlassPtr::make(instklass_EventWriter); const TypeOopPtr* const xtype = aklass->as_instance_type(); - Node* jobj_untagged = _gvn.transform(new AddPNode(top(), jobj, _gvn.MakeConX(-JNIHandles::TypeTag::global))); + Node* jobj_untagged = _gvn.transform(AddPNode::make_off_heap(jobj, _gvn.MakeConX(-JNIHandles::TypeTag::global))); Node* event_writer = access_load(jobj_untagged, xtype, T_OBJECT, IN_NATIVE | C2_CONTROL_DEPENDENT_LOAD); // Load the current thread id from the event writer object. @@ -3636,9 +3538,9 @@ bool LibraryCallKit::inline_native_getEventWriter() { // Update control and phi nodes. event_writer_tid_compare_rgn->init_req(_true_path, tid_is_not_equal); event_writer_tid_compare_rgn->init_req(_false_path, tid_is_equal); - event_writer_tid_compare_mem->init_req(_true_path, _gvn.transform(reset_memory())); + event_writer_tid_compare_mem->init_req(_true_path, reset_memory()); event_writer_tid_compare_mem->init_req(_false_path, _gvn.transform(vthread_compare_mem)); - event_writer_tid_compare_io->init_req(_true_path, _gvn.transform(i_o())); + event_writer_tid_compare_io->init_req(_true_path, i_o()); event_writer_tid_compare_io->init_req(_false_path, _gvn.transform(vthread_compare_io)); // Result of top level CFG, Memory, IO and Value. @@ -3653,14 +3555,14 @@ bool LibraryCallKit::inline_native_getEventWriter() { // Result memory. result_mem->init_req(_true_path, _gvn.transform(event_writer_tid_compare_mem)); - result_mem->init_req(_false_path, _gvn.transform(input_memory_state)); + result_mem->init_req(_false_path, input_memory_state); // Result IO. result_io->init_req(_true_path, _gvn.transform(event_writer_tid_compare_io)); - result_io->init_req(_false_path, _gvn.transform(input_io_state)); + result_io->init_req(_false_path, input_io_state); // Result value. - result_value->init_req(_true_path, _gvn.transform(event_writer)); // return event writer oop + result_value->init_req(_true_path, event_writer); // return event writer oop result_value->init_req(_false_path, null()); // return null // Set output state. @@ -3707,7 +3609,7 @@ void LibraryCallKit::extend_setCurrentThread(Node* jt, Node* thread) { IfNode* iff_thread_not_equal_carrierThread = create_and_map_if(control(), test_thread_not_equal_carrierThread, PROB_FAIR, COUNT_UNKNOWN); - Node* vthread_offset = basic_plus_adr(top(), jt, in_bytes(THREAD_LOCAL_OFFSET_JFR + VTHREAD_OFFSET_JFR)); + Node* vthread_offset = off_heap_plus_addr(jt, in_bytes(THREAD_LOCAL_OFFSET_JFR + VTHREAD_OFFSET_JFR)); // False branch, is carrierThread. Node* thread_equal_carrierThread = _gvn.transform(new IfFalseNode(iff_thread_not_equal_carrierThread)); @@ -3726,17 +3628,17 @@ void LibraryCallKit::extend_setCurrentThread(Node* jt, Node* thread) { IN_HEAP | MO_UNORDERED | C2_MISMATCHED | C2_CONTROL_DEPENDENT_LOAD); // Mask off the excluded information from the epoch. - Node * const is_excluded = _gvn.transform(new AndINode(epoch_raw, _gvn.transform(excluded_mask))); + Node * const is_excluded = _gvn.transform(new AndINode(epoch_raw, excluded_mask)); // Load the tid field from the thread. Node* tid = load_field_from_object(thread, "tid", "J"); // Store the vthread tid to the jfr thread local. - Node* thread_id_offset = basic_plus_adr(top(), jt, in_bytes(THREAD_LOCAL_OFFSET_JFR + VTHREAD_ID_OFFSET_JFR)); + Node* thread_id_offset = off_heap_plus_addr(jt, in_bytes(THREAD_LOCAL_OFFSET_JFR + VTHREAD_ID_OFFSET_JFR)); Node* tid_memory = store_to_memory(control(), thread_id_offset, tid, T_LONG, MemNode::unordered, true); // Branch is_excluded to conditionalize updating the epoch . - Node* excluded_cmp = _gvn.transform(new CmpINode(is_excluded, _gvn.transform(excluded_mask))); + Node* excluded_cmp = _gvn.transform(new CmpINode(is_excluded, excluded_mask)); Node* test_excluded = _gvn.transform(new BoolNode(excluded_cmp, BoolTest::eq)); IfNode* iff_excluded = create_and_map_if(control(), test_excluded, PROB_MIN, COUNT_UNKNOWN); @@ -3751,10 +3653,10 @@ void LibraryCallKit::extend_setCurrentThread(Node* jt, Node* thread) { Node* vthread_is_included = _gvn.intcon(0); // Get epoch value. - Node* epoch = _gvn.transform(new AndINode(epoch_raw, _gvn.transform(epoch_mask))); + Node* epoch = _gvn.transform(new AndINode(epoch_raw, epoch_mask)); // Store the vthread epoch to the jfr thread local. - Node* vthread_epoch_offset = basic_plus_adr(top(), jt, in_bytes(THREAD_LOCAL_OFFSET_JFR + VTHREAD_EPOCH_OFFSET_JFR)); + Node* vthread_epoch_offset = off_heap_plus_addr(jt, in_bytes(THREAD_LOCAL_OFFSET_JFR + VTHREAD_EPOCH_OFFSET_JFR)); Node* included_memory = store_to_memory(control(), vthread_epoch_offset, epoch, T_CHAR, MemNode::unordered, true); RegionNode* excluded_rgn = new RegionNode(PATH_LIMIT); @@ -3769,15 +3671,15 @@ void LibraryCallKit::extend_setCurrentThread(Node* jt, Node* thread) { excluded_rgn->init_req(_false_path, included); excluded_mem->init_req(_true_path, tid_memory); excluded_mem->init_req(_false_path, included_memory); - exclusion->init_req(_true_path, _gvn.transform(vthread_is_excluded)); - exclusion->init_req(_false_path, _gvn.transform(vthread_is_included)); + exclusion->init_req(_true_path, vthread_is_excluded); + exclusion->init_req(_false_path, vthread_is_included); // Set intermediate state. set_control(_gvn.transform(excluded_rgn)); set_all_memory(excluded_mem); // Store the vthread exclusion state to the jfr thread local. - Node* thread_local_excluded_offset = basic_plus_adr(top(), jt, in_bytes(THREAD_LOCAL_OFFSET_JFR + VTHREAD_EXCLUDED_OFFSET_JFR)); + Node* thread_local_excluded_offset = off_heap_plus_addr(jt, in_bytes(THREAD_LOCAL_OFFSET_JFR + VTHREAD_EXCLUDED_OFFSET_JFR)); store_to_memory(control(), thread_local_excluded_offset, _gvn.transform(exclusion), T_BOOLEAN, MemNode::unordered, true); // Store release @@ -3823,16 +3725,15 @@ bool LibraryCallKit::inline_native_setCurrentThread() { "method changes current Thread but is not annotated ChangesCurrentThread"); Node* arr = argument(1); Node* thread = _gvn.transform(new ThreadLocalNode()); - Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::vthread_offset())); + Node* p = off_heap_plus_addr(thread, in_bytes(JavaThread::vthread_offset())); Node* thread_obj_handle = make_load(nullptr, p, p->bottom_type()->is_ptr(), T_OBJECT, MemNode::unordered); - thread_obj_handle = _gvn.transform(thread_obj_handle); const TypePtr *adr_type = _gvn.type(thread_obj_handle)->isa_ptr(); access_store_at(nullptr, thread_obj_handle, adr_type, arr, _gvn.type(arr), T_OBJECT, IN_NATIVE | MO_UNORDERED); // Change the _monitor_owner_id of the JavaThread Node* tid = load_field_from_object(arr, "tid", "J"); - Node* monitor_owner_id_offset = basic_plus_adr(top(), thread, in_bytes(JavaThread::monitor_owner_id_offset())); + Node* monitor_owner_id_offset = off_heap_plus_addr(thread, in_bytes(JavaThread::monitor_owner_id_offset())); store_to_memory(control(), monitor_owner_id_offset, tid, T_LONG, MemNode::unordered, true); JFR_ONLY(extend_setCurrentThread(thread, arr);) @@ -3853,7 +3754,7 @@ const Type* LibraryCallKit::scopedValueCache_type() { Node* LibraryCallKit::scopedValueCache_helper() { Node* thread = _gvn.transform(new ThreadLocalNode()); - Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::scopedValueCache_offset())); + Node* p = off_heap_plus_addr(thread, in_bytes(JavaThread::scopedValueCache_offset())); // We cannot use immutable_memory() because we might flip onto a // different carrier thread, at which point we'll need to use that // carrier thread's cache. @@ -3895,7 +3796,7 @@ bool LibraryCallKit::inline_native_Continuation_pinning(bool unpin) { // TLS Node* tls_ptr = _gvn.transform(new ThreadLocalNode()); - Node* last_continuation_offset = basic_plus_adr(top(), tls_ptr, in_bytes(JavaThread::cont_entry_offset())); + Node* last_continuation_offset = off_heap_plus_addr(tls_ptr, in_bytes(JavaThread::cont_entry_offset())); Node* last_continuation = make_load(control(), last_continuation_offset, last_continuation_offset->get_ptr_type(), T_ADDRESS, MemNode::unordered); // Null check the last continuation object. @@ -3912,7 +3813,7 @@ bool LibraryCallKit::inline_native_Continuation_pinning(bool unpin) { set_control(continuation_is_not_null); // Load the pin count from the last continuation. - Node* pin_count_offset = basic_plus_adr(top(), last_continuation, in_bytes(ContinuationEntry::pin_count_offset())); + Node* pin_count_offset = off_heap_plus_addr(last_continuation, in_bytes(ContinuationEntry::pin_count_offset())); Node* pin_count = make_load(control(), pin_count_offset, TypeInt::INT, T_INT, MemNode::unordered); // The loaded pin count is compared against a context specific rhs for over/underflow detection. @@ -3922,7 +3823,7 @@ bool LibraryCallKit::inline_native_Continuation_pinning(bool unpin) { } else { pin_count_rhs = _gvn.intcon(UINT32_MAX); } - Node* pin_count_cmp = _gvn.transform(new CmpUNode(_gvn.transform(pin_count), pin_count_rhs)); + Node* pin_count_cmp = _gvn.transform(new CmpUNode(pin_count, pin_count_rhs)); Node* test_pin_count_over_underflow = _gvn.transform(new BoolNode(pin_count_cmp, BoolTest::eq)); IfNode* iff_pin_count_over_underflow = create_and_map_if(control(), test_pin_count_over_underflow, PROB_MIN, COUNT_UNKNOWN); @@ -3959,10 +3860,10 @@ bool LibraryCallKit::inline_native_Continuation_pinning(bool unpin) { PhiNode* result_mem = new PhiNode(result_rgn, Type::MEMORY, TypePtr::BOTTOM); record_for_igvn(result_mem); - result_rgn->init_req(_true_path, _gvn.transform(valid_pin_count)); - result_rgn->init_req(_false_path, _gvn.transform(continuation_is_null)); - result_mem->init_req(_true_path, _gvn.transform(reset_memory())); - result_mem->init_req(_false_path, _gvn.transform(input_memory_state)); + result_rgn->init_req(_true_path, valid_pin_count); + result_rgn->init_req(_false_path, continuation_is_null); + result_mem->init_req(_true_path, reset_memory()); + result_mem->init_req(_false_path, input_memory_state); // Set output state. set_control(_gvn.transform(result_rgn)); @@ -3974,7 +3875,7 @@ bool LibraryCallKit::inline_native_Continuation_pinning(bool unpin) { //---------------------------load_mirror_from_klass---------------------------- // Given a klass oop, load its java mirror (a java.lang.Class oop). Node* LibraryCallKit::load_mirror_from_klass(Node* klass) { - Node* p = basic_plus_adr(top(), klass, in_bytes(Klass::java_mirror_offset())); + Node* p = off_heap_plus_addr(klass, in_bytes(Klass::java_mirror_offset())); Node* load = make_load(nullptr, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered); // mirror = ((OopHandle)mirror)->resolve(); return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE); @@ -4014,7 +3915,7 @@ Node* LibraryCallKit::generate_klass_flags_guard(Node* kls, int modifier_mask, i ByteSize offset, const Type* type, BasicType bt) { // Branch around if the given klass has the given modifier bit set. // Like generate_guard, adds a new path onto the region. - Node* modp = basic_plus_adr(top(), kls, in_bytes(offset)); + Node* modp = off_heap_plus_addr(kls, in_bytes(offset)); Node* mods = make_load(nullptr, modp, type, bt, MemNode::unordered); Node* mask = intcon(modifier_mask); Node* bits = intcon(modifier_bits); @@ -4148,7 +4049,7 @@ bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) { phi->add_req(null()); } // If we fall through, it's a plain class. Get its _super. - p = basic_plus_adr(top(), kls, in_bytes(Klass::super_offset())); + p = off_heap_plus_addr(kls, in_bytes(Klass::super_offset())); kls = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeInstKlassPtr::OBJECT_OR_NULL)); null_ctl = top(); kls = null_check_oop(kls, &null_ctl); @@ -4410,7 +4311,7 @@ Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region, if (obj != nullptr && is_array_ctrl != nullptr && is_array_ctrl != top()) { // Keep track of the fact that 'obj' is an array to prevent // array specific accesses from floating above the guard. - *obj = _gvn.transform(new CastPPNode(is_array_ctrl, *obj, TypeAryPtr::BOTTOM)); + *obj = _gvn.transform(new CheckCastPPNode(is_array_ctrl, *obj, TypeAryPtr::BOTTOM)); } return ctrl; } @@ -4683,10 +4584,10 @@ Node* LibraryCallKit::generate_virtual_guard(Node* obj_klass, assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index, "bad index %d", vtable_index); // Get the Method* out of the appropriate vtable entry. - int entry_offset = in_bytes(Klass::vtable_start_offset()) + + int entry_offset = in_bytes(Klass::vtable_start_offset()) + vtable_index*vtableEntry::size_in_bytes() + in_bytes(vtableEntry::method_offset()); - Node* entry_addr = basic_plus_adr(top(), obj_klass, entry_offset); + Node* entry_addr = off_heap_plus_addr(obj_klass, entry_offset); Node* target_call = make_load(nullptr, entry_addr, TypePtr::NOTNULL, T_ADDRESS, MemNode::unordered); // Compare the target method with the expected method (e.g., Object.hashCode). @@ -5161,7 +5062,7 @@ bool LibraryCallKit::inline_unsafe_copyMemory() { Node* dst_addr = make_unsafe_address(dst_base, dst_off); Node* thread = _gvn.transform(new ThreadLocalNode()); - Node* doing_unsafe_access_addr = basic_plus_adr(top(), thread, in_bytes(JavaThread::doing_unsafe_access_offset())); + Node* doing_unsafe_access_addr = off_heap_plus_addr(thread, in_bytes(JavaThread::doing_unsafe_access_offset())); BasicType doing_unsafe_access_bt = T_BYTE; assert((sizeof(bool) * CHAR_BIT) == 8, "not implemented"); @@ -5216,7 +5117,7 @@ bool LibraryCallKit::inline_unsafe_setMemory() { Node* dst_addr = make_unsafe_address(dst_base, dst_off); Node* thread = _gvn.transform(new ThreadLocalNode()); - Node* doing_unsafe_access_addr = basic_plus_adr(top(), thread, in_bytes(JavaThread::doing_unsafe_access_offset())); + Node* doing_unsafe_access_addr = off_heap_plus_addr(thread, in_bytes(JavaThread::doing_unsafe_access_offset())); BasicType doing_unsafe_access_bt = T_BYTE; assert((sizeof(bool) * CHAR_BIT) == 8, "not implemented"); @@ -6225,9 +6126,10 @@ bool LibraryCallKit::inline_encodeISOArray(bool ascii) { } // Check source & target bounds - generate_string_range_check(src, src_offset, length, src_elem == T_BYTE, true); - generate_string_range_check(dst, dst_offset, length, false, true); - if (stopped()) { + RegionNode* bailout = create_bailout(); + generate_string_range_check(src, src_offset, length, src_elem == T_BYTE, bailout); + generate_string_range_check(dst, dst_offset, length, false, bailout); + if (check_bailout(bailout)) { return true; } @@ -6751,7 +6653,7 @@ bool LibraryCallKit::inline_updateCRC32() { Node* base = makecon(TypeRawPtr::make(StubRoutines::crc_table_addr())); Node* offset = _gvn.transform(new LShiftINode(result, intcon(0x2))); - Node* adr = basic_plus_adr(top(), base, ConvI2X(offset)); + Node* adr = off_heap_plus_addr(base, ConvI2X(offset)); result = make_load(control(), adr, TypeInt::INT, T_INT, MemNode::unordered); crc = _gvn.transform(new URShiftINode(crc, intcon(8))); @@ -6823,7 +6725,7 @@ bool LibraryCallKit::inline_updateByteBufferCRC32() { offset = ConvI2X(offset); // 'src_start' points to src array + scaled offset - Node* src_start = basic_plus_adr(top(), base, offset); + Node* src_start = off_heap_plus_addr(base, offset); // Call the stub. address stubAddr = StubRoutines::updateBytesCRC32(); @@ -6920,7 +6822,7 @@ bool LibraryCallKit::inline_updateDirectByteBufferCRC32C() { offset = ConvI2X(offset); // 'src_start' points to src array + scaled offset - Node* src_start = basic_plus_adr(top(), base, offset); + Node* src_start = off_heap_plus_addr(base, offset); // static final int[] byteTable in class CRC32C Node* table = get_table_from_crc32c_class(callee()->holder()); @@ -7004,7 +6906,7 @@ bool LibraryCallKit::inline_updateByteBufferAdler32() { offset = ConvI2X(offset); // 'src_start' points to src array + scaled offset - Node* src_start = basic_plus_adr(top(), base, offset); + Node* src_start = off_heap_plus_addr(base, offset); // Call the stub. address stubAddr = StubRoutines::updateBytesAdler32(); @@ -7124,6 +7026,14 @@ bool LibraryCallKit::inline_reference_clear0(bool is_phantom) { return true; } +//-----------------------inline_reference_reachabilityFence----------------- +// bool java.lang.ref.Reference.reachabilityFence(); +bool LibraryCallKit::inline_reference_reachabilityFence() { + Node* referent = argument(0); + insert_reachability_fence(referent); + return true; +} + Node* LibraryCallKit::load_field_from_object(Node* fromObj, const char* fieldName, const char* fieldTypeString, DecoratorSet decorators, bool is_static, ciInstanceKlass* fromKls) { diff --git a/src/hotspot/share/opto/library_call.hpp b/src/hotspot/share/opto/library_call.hpp index 56141be2362..9aae48302cf 100644 --- a/src/hotspot/share/opto/library_call.hpp +++ b/src/hotspot/share/opto/library_call.hpp @@ -129,30 +129,9 @@ class LibraryCallKit : public GraphKit { virtual int reexecute_sp() { return _reexecute_sp; } - /* When an intrinsic makes changes before bailing out, it's necessary to restore the graph - * as it was. See JDK-8359344 for what can happen wrong. It's also not always possible to - * bailout before making changes because the bailing out decision might depend on new nodes - * (their types, for instance). - * - * So, if an intrinsic might cause this situation, one must start by saving the state in a - * SavedState by constructing it, and the state will be restored on destruction. If the - * intrinsic is not bailing out, one need to call discard to prevent restoring the old state. - */ - class SavedState { - LibraryCallKit* _kit; - uint _sp; - JVMState* _jvms; - SafePointNode* _map; - Unique_Node_List _ctrl_succ; - bool _discarded; - - public: - SavedState(LibraryCallKit*); - ~SavedState(); - void discard(); - }; - // Helper functions to inline natives + RegionNode* create_bailout(); + bool check_bailout(RegionNode* bailout); Node* generate_guard(Node* test, RegionNode* region, float true_prob); Node* generate_slow_guard(Node* test, RegionNode* region); Node* generate_fair_guard(Node* test, RegionNode* region); @@ -166,7 +145,7 @@ class LibraryCallKit : public GraphKit { bool with_opaque = false); void generate_string_range_check(Node* array, Node* offset, Node* length, bool char_count, - bool halt_on_oob = false); + RegionNode* region); Node* current_thread_helper(Node* &tls_output, ByteSize handle_offset, bool is_immutable); Node* generate_current_thread(Node* &tls_output); @@ -333,6 +312,7 @@ class LibraryCallKit : public GraphKit { bool inline_divmod_methods(vmIntrinsics::ID id); bool inline_reference_get0(); bool inline_reference_refersTo0(bool is_phantom); + bool inline_reference_reachabilityFence(); bool inline_reference_clear0(bool is_phantom); bool inline_Class_cast(); bool inline_aescrypt_Block(vmIntrinsics::ID id); diff --git a/src/hotspot/share/opto/loopTransform.cpp b/src/hotspot/share/opto/loopTransform.cpp index 4e221a9a0ef..b65f90093ab 100644 --- a/src/hotspot/share/opto/loopTransform.cpp +++ b/src/hotspot/share/opto/loopTransform.cpp @@ -35,7 +35,9 @@ #include "opto/loopnode.hpp" #include "opto/movenode.hpp" #include "opto/mulnode.hpp" +#include "opto/node.hpp" #include "opto/opaquenode.hpp" +#include "opto/opcodes.hpp" #include "opto/phase.hpp" #include "opto/predicates.hpp" #include "opto/rootnode.hpp" @@ -50,17 +52,70 @@ // Given an IfNode, return the loop-exiting projection or null if both // arms remain in the loop. Node *IdealLoopTree::is_loop_exit(Node *iff) const { - if (iff->outcnt() != 2) return nullptr; // Ignore partially dead tests - PhaseIdealLoop *phase = _phase; + assert(iff->is_If(), "not an If: %s", iff->Name()); + assert(is_member(_phase->get_loop(iff)), "not related"); + + if (iff->outcnt() != 2) { + return nullptr; // Ignore partially dead tests + } // Test is an IfNode, has 2 projections. If BOTH are in the loop // we need loop unswitching instead of peeling. - if (!is_member(phase->get_loop(iff->raw_out(0)))) + if (!is_member(_phase->get_loop(iff->raw_out(0)))) { return iff->raw_out(0); - if (!is_member(phase->get_loop(iff->raw_out(1)))) + } + if (!is_member(_phase->get_loop(iff->raw_out(1)))) { return iff->raw_out(1); + } return nullptr; } +//------------------------------unique_loop_exit_or_null---------------------- +// Return the loop-exit projection if loop exit is unique. +IfFalseNode* IdealLoopTree::unique_loop_exit_proj_or_null() { + if (is_loop() && head()->is_BaseCountedLoop()) { + IfNode* loop_end = head()->as_BaseCountedLoop()->loopexit_or_null(); + if (loop_end == nullptr) { + return nullptr; // malformed loop shape + } + // Look for other loop exits. + assert(_phase->is_dominator(head(), tail()), "sanity"); + for (Node* ctrl = tail(); ctrl != head(); ctrl = ctrl->in(0)) { + assert(is_member(_phase->get_loop(ctrl)), "sanity"); + if (ctrl->is_If()) { + if (!is_loop_exit(ctrl->as_If())) { + continue; // local branch + } else if (ctrl != loop_end) { + return nullptr; // multiple loop exits + } + } else if (ctrl->is_Region()) { + return nullptr; // give up on control flow merges + } else if (ctrl->is_ReachabilityFence() || + ctrl->is_SafePoint() || + ctrl->is_MemBar() || + ctrl->Opcode() == Op_Blackhole) { + continue; // skip + } else if (ctrl->is_Proj()) { + if (ctrl->is_IfProj() || + ctrl->Opcode() == Op_SCMemProj || + ctrl->Opcode() == Op_Proj) { + continue; // skip simple control projections + } else if (ctrl->is_CatchProj() || + ctrl->is_JumpProj()) { + return nullptr; // give up on control flow splits + } else { + assert(false, "unknown control projection: %s", ctrl->Name()); + return nullptr; // stop on unknown control node + } + } else { + assert(false, "unknown CFG node: %s", ctrl->Name()); + return nullptr; // stop on unknown control node + } + } + assert(is_loop_exit(loop_end), "not a loop exit?"); + return loop_end->false_proj_or_null(); + } + return nullptr; // not found or multiple loop exits +} //============================================================================= @@ -524,6 +579,9 @@ bool IdealLoopTree::policy_peeling(PhaseIdealLoop *phase) { // return the estimated loop size if peeling is applicable, otherwise return // zero. No node budget is allocated. uint IdealLoopTree::estimate_peeling(PhaseIdealLoop *phase) { + if (LoopPeeling != 1) { + return 0; + } // If nodes are depleted, some transform has miscalculated its needs. assert(!phase->exceeding_node_budget(), "sanity"); @@ -775,6 +833,7 @@ void PhaseIdealLoop::peeled_dom_test_elim(IdealLoopTree* loop, Node_List& old_ne // exit // void PhaseIdealLoop::do_peeling(IdealLoopTree *loop, Node_List &old_new) { + assert(LoopPeeling != 0, "do_peeling called with loop peeling always disabled"); C->set_major_progress(); // Peeling a 'main' loop in a pre/main/post situation obfuscates the @@ -1770,13 +1829,39 @@ Node *PhaseIdealLoop::insert_post_loop(IdealLoopTree* loop, Node_List& old_new, for (DUIterator i = main_head->outs(); main_head->has_out(i); i++) { Node* main_phi = main_head->out(i); if (main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() > 0) { - Node* cur_phi = old_new[main_phi->_idx]; + Node* post_phi = old_new[main_phi->_idx]; + Node* loopback_input = main_phi->in(LoopNode::LoopBackControl); Node* fallnew = clone_up_backedge_goo(main_head->back_control(), post_head->init_control(), - main_phi->in(LoopNode::LoopBackControl), + loopback_input, visited, clones); - _igvn.hash_delete(cur_phi); - cur_phi->set_req(LoopNode::EntryControl, fallnew); + // Technically, the entry value of post_phi must be the loop back input of the corresponding + // Phi of the outer loop, not the Phi of the inner loop (i.e. main_phi). However, we have not + // constructed the Phis for the OuterStripMinedLoop yet, so the input must be inferred from + // the loop back input of main_phi. + // - If post_phi is a data Phi, then we can use the loop back input of main_phi. + // - If post_phi is a memory Phi, since Stores can be sunk below the inner loop, but still + // inside the outer loop, we have 2 cases: + // + If the loop back input of main_phi is on the backedge, then the entry input of + // post_phi is the clone of the node on the entry of post_head, similar to when post_phi + // is a data Phi. + // + If the loop back input of main_phi is not on the backedge, we need to find whether + // there is a sunk Store corresponding to post_phi, if there is any, the latest such + // store will be the entry input of post_phi. Fortunately, the safepoint at the exit of + // the outer loop captures all memory states, so we can use it as the entry input of + // post_phi. + // Another way to see it is that, the memory phi should capture the latest state at the + // post-loop entry. If loopback_input is cloned by clone_up_backedge_goo, it is pinned at + // the post-loop entry, and is surely the latest state. Otherwise, the latest memory state + // corresponding to post_phi is the memory state at the exit of the outer main-loop, which + // is captured by the safepoint there. + if (main_head->is_strip_mined() && fallnew == loopback_input && post_phi->is_memory_phi()) { + SafePointNode* main_safepoint = main_head->outer_safepoint(); + assert(main_safepoint != nullptr, "outer loop must have a safepoint"); + fallnew = main_safepoint->memory(); + } + _igvn.hash_delete(post_phi); + post_phi->set_req(LoopNode::EntryControl, fallnew); } } // Store nodes that were moved to the outer loop by PhaseIdealLoop::try_move_store_after_loop @@ -2201,6 +2286,15 @@ void PhaseIdealLoop::do_maximally_unroll(IdealLoopTree *loop, Node_List &old_new // If loop is tripping an odd number of times, peel odd iteration if ((cl->trip_count() & 1) == 1) { + if (LoopPeeling == 0) { +#ifndef PRODUCT + if (TraceLoopOpts) { + tty->print("MaxUnroll cancelled since LoopPeeling is always disabled"); + loop->dump_head(); + } +#endif + return; + } do_peeling(loop, old_new); } @@ -3094,9 +3188,13 @@ static CountedLoopNode* locate_pre_from_main(CountedLoopNode* main_loop) { Node* ctrl = main_loop->skip_assertion_predicates_with_halt(); assert(ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, ""); Node* iffm = ctrl->in(0); - assert(iffm->Opcode() == Op_If, ""); + assert(iffm->Opcode() == Op_If, "%s", iffm->Name()); Node* p_f = iffm->in(0); - assert(p_f->Opcode() == Op_IfFalse, ""); + // Skip ReachabilityFences hoisted out of pre-loop. + while (p_f->is_ReachabilityFence()) { + p_f = p_f->in(0); + } + assert(p_f->Opcode() == Op_IfFalse, "%s", p_f->Name()); CountedLoopNode* pre_loop = p_f->in(0)->as_CountedLoopEnd()->loopnode(); assert(pre_loop->is_pre_loop(), "No pre loop found"); return pre_loop; @@ -3243,6 +3341,15 @@ bool IdealLoopTree::do_remove_empty_loop(PhaseIdealLoop *phase) { #endif if (needs_guard) { + if (LoopPeeling == 0) { +#ifndef PRODUCT + if (TraceLoopOpts) { + tty->print("Empty loop not removed since LoopPeeling is always disabled"); + this->dump_head(); + } +#endif + return false; + } // Peel the loop to ensure there's a zero trip guard Node_List old_new; phase->do_peeling(this, old_new); @@ -3969,7 +4076,7 @@ bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) { index = new LShiftXNode(index, shift->in(2)); _igvn.register_new_node_with_optimizer(index); } - Node* from = new AddPNode(base, base, index); + Node* from = AddPNode::make_with_base(base, index); _igvn.register_new_node_with_optimizer(from); // For normal array fills, C2 uses two AddP nodes for array element // addressing. But for array fills with Unsafe call, there's only one @@ -3977,7 +4084,7 @@ bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) { assert(offset != nullptr || C->has_unsafe_access(), "Only array fills with unsafe have no extra offset"); if (offset != nullptr) { - from = new AddPNode(base, from, offset); + from = AddPNode::make_with_base(base, from, offset); _igvn.register_new_node_with_optimizer(from); } // Compute the number of elements to copy diff --git a/src/hotspot/share/opto/loopnode.cpp b/src/hotspot/share/opto/loopnode.cpp index d68505836d4..35a9108892c 100644 --- a/src/hotspot/share/opto/loopnode.cpp +++ b/src/hotspot/share/opto/loopnode.cpp @@ -44,6 +44,7 @@ #include "opto/opaquenode.hpp" #include "opto/opcodes.hpp" #include "opto/predicates.hpp" +#include "opto/reachability.hpp" #include "opto/rootnode.hpp" #include "opto/runtime.hpp" #include "opto/vectorization.hpp" @@ -375,17 +376,20 @@ IdealLoopTree* PhaseIdealLoop::create_outer_strip_mined_loop(Node* init_control, return outer_ilt; } -void PhaseIdealLoop::insert_loop_limit_check_predicate(ParsePredicateSuccessProj* loop_limit_check_parse_proj, - Node* cmp_limit, Node* bol) { + +void CountedLoopConverter::insert_loop_limit_check_predicate(const ParsePredicateSuccessProj* loop_limit_check_parse_proj, + Node* bol) const { assert(loop_limit_check_parse_proj->in(0)->is_ParsePredicate(), "must be parse predicate"); - Node* new_predicate_proj = create_new_if_for_predicate(loop_limit_check_parse_proj, nullptr, - Deoptimization::Reason_loop_limit_check, - Op_If); + Node* new_predicate_proj = _phase->create_new_if_for_predicate(loop_limit_check_parse_proj, nullptr, + Deoptimization::Reason_loop_limit_check, + Op_If); + + PhaseIterGVN& igvn = _phase->igvn(); Node* iff = new_predicate_proj->in(0); - cmp_limit = _igvn.register_new_node_with_optimizer(cmp_limit); - bol = _igvn.register_new_node_with_optimizer(bol); - set_subtree_ctrl(bol, false); - _igvn.replace_input_of(iff, 1, bol); + Node* cmp_limit = igvn.register_new_node_with_optimizer(bol->in(1)); + bol = igvn.register_new_node_with_optimizer(bol); + _phase->set_subtree_ctrl(bol, false); + igvn.replace_input_of(iff, 1, bol); #ifndef PRODUCT // report that the loop predication has been actually performed @@ -397,14 +401,38 @@ void PhaseIdealLoop::insert_loop_limit_check_predicate(ParsePredicateSuccessProj #endif } -Node* PhaseIdealLoop::loop_exit_control(Node* x, IdealLoopTree* loop) { +void CountedLoopConverter::insert_stride_overflow_limit_check() const { + const jlong stride_con = _structure.stride_con(); + + jlong adjusted_stride_con = (stride_con > 0 + ? max_signed_integer(_iv_bt) + : min_signed_integer(_iv_bt)) - _structure.final_limit_correction(); + Node* cmp_limit = CmpNode::make(_structure.limit(), + _phase->igvn().integercon(adjusted_stride_con, _iv_bt), _iv_bt); + Node* bol = new BoolNode(cmp_limit, stride_con > 0 ? BoolTest::le : BoolTest::ge); + + insert_loop_limit_check_predicate(_head->in(LoopNode::EntryControl)->as_IfTrue(), bol); +} + +void CountedLoopConverter::insert_init_trip_limit_check() const { + const jlong stride_con = _structure.stride_con(); + + Node* cmp_limit = CmpNode::make(_structure.phi()->in(LoopNode::EntryControl), _structure.limit(), _iv_bt); + Node* bol = new BoolNode(cmp_limit, stride_con > 0 ? BoolTest::lt : BoolTest::gt); + + insert_loop_limit_check_predicate(_head->in(LoopNode::EntryControl)->as_IfTrue(), bol); +} + +Node* PhaseIdealLoop::loop_exit_control(const IdealLoopTree* loop) const { + Node* head = loop->_head; + // Counted loop head must be a good RegionNode with only 3 not null // control input edges: Self, Entry, LoopBack. - if (x->in(LoopNode::Self) == nullptr || x->req() != 3 || loop->_irreducible) { + if (head->in(LoopNode::Self) == nullptr || head->req() != 3 || loop->_irreducible) { return nullptr; } - Node *init_control = x->in(LoopNode::EntryControl); - Node *back_control = x->in(LoopNode::LoopBackControl); + Node* init_control = head->in(LoopNode::EntryControl); + Node* back_control = head->in(LoopNode::LoopBackControl); if (init_control == nullptr || back_control == nullptr) { // Partially dead return nullptr; } @@ -437,77 +465,7 @@ Node* PhaseIdealLoop::loop_exit_control(Node* x, IdealLoopTree* loop) { return iftrue; } -Node* PhaseIdealLoop::loop_exit_test(Node* back_control, IdealLoopTree* loop, Node*& incr, Node*& limit, BoolTest::mask& bt, float& cl_prob) { - Node* iftrue = back_control; - uint iftrue_op = iftrue->Opcode(); - Node* iff = iftrue->in(0); - BoolNode* test = iff->in(1)->as_Bool(); - bt = test->_test._test; - cl_prob = iff->as_If()->_prob; - if (iftrue_op == Op_IfFalse) { - bt = BoolTest(bt).negate(); - cl_prob = 1.0 - cl_prob; - } - // Get backedge compare - Node* cmp = test->in(1); - if (!cmp->is_Cmp()) { - return nullptr; - } - - // Find the trip-counter increment & limit. Limit must be loop invariant. - incr = cmp->in(1); - limit = cmp->in(2); - - // --------- - // need 'loop()' test to tell if limit is loop invariant - // --------- - - if (!ctrl_is_member(loop, incr)) { // Swapped trip counter and limit? - Node* tmp = incr; // Then reverse order into the CmpI - incr = limit; - limit = tmp; - bt = BoolTest(bt).commute(); // And commute the exit test - } - if (ctrl_is_member(loop, limit)) { // Limit must be loop-invariant - return nullptr; - } - if (!ctrl_is_member(loop, incr)) { // Trip counter must be loop-variant - return nullptr; - } - return cmp; -} - -Node* PhaseIdealLoop::loop_iv_incr(Node* incr, Node* x, IdealLoopTree* loop, Node*& phi_incr) { - if (incr->is_Phi()) { - if (incr->as_Phi()->region() != x || incr->req() != 3) { - return nullptr; // Not simple trip counter expression - } - phi_incr = incr; - incr = phi_incr->in(LoopNode::LoopBackControl); // Assume incr is on backedge of Phi - if (!ctrl_is_member(loop, incr)) { // Trip counter must be loop-variant - return nullptr; - } - } - return incr; -} - -Node* PhaseIdealLoop::loop_iv_stride(Node* incr, Node*& xphi) { - assert(incr->Opcode() == Op_AddI || incr->Opcode() == Op_AddL, "caller resp."); - // Get merge point - xphi = incr->in(1); - Node *stride = incr->in(2); - if (!stride->is_Con()) { // Oops, swap these - if (!xphi->is_Con()) { // Is the other guy a constant? - return nullptr; // Nope, unknown stride, bail out - } - Node *tmp = xphi; // 'incr' is commutative, so ok to swap - xphi = stride; - stride = tmp; - } - return stride; -} - -PhiNode* PhaseIdealLoop::loop_iv_phi(Node* xphi, Node* phi_incr, Node* x) { +PhiNode* PhaseIdealLoop::loop_iv_phi(const Node* xphi, const Node* phi_incr, const Node* head) { if (!xphi->is_Phi()) { return nullptr; // Too much math on the trip counter } @@ -517,44 +475,31 @@ PhiNode* PhaseIdealLoop::loop_iv_phi(Node* xphi, Node* phi_incr, Node* x) { PhiNode *phi = xphi->as_Phi(); // Phi must be of loop header; backedge must wrap to increment - if (phi->region() != x) { + if (phi->region() != head) { return nullptr; } return phi; } -static int check_stride_overflow(jlong final_correction, const TypeInteger* limit_t, BasicType bt) { +CountedLoopConverter::StrideOverflowState CountedLoopConverter::check_stride_overflow(jlong final_correction, + const TypeInteger* limit_t, + BasicType bt) { if (final_correction > 0) { if (limit_t->lo_as_long() > (max_signed_integer(bt) - final_correction)) { - return -1; + return Overflow; } if (limit_t->hi_as_long() > (max_signed_integer(bt) - final_correction)) { - return 1; + return RequireLimitCheck; } } else { if (limit_t->hi_as_long() < (min_signed_integer(bt) - final_correction)) { - return -1; + return Overflow; } if (limit_t->lo_as_long() < (min_signed_integer(bt) - final_correction)) { - return 1; + return RequireLimitCheck; } } - return 0; -} - -static bool condition_stride_ok(BoolTest::mask bt, jlong stride_con) { - // If the condition is inverted and we will be rolling - // through MININT to MAXINT, then bail out. - if (bt == BoolTest::eq || // Bail out, but this loop trips at most twice! - // Odd stride - (bt == BoolTest::ne && stride_con != 1 && stride_con != -1) || - // Count down loop rolls through MAXINT - ((bt == BoolTest::le || bt == BoolTest::lt) && stride_con < 0) || - // Count up loop rolls through MININT - ((bt == BoolTest::ge || bt == BoolTest::gt) && stride_con > 0)) { - return false; // Bail out - } - return true; + return NoOverflow; } Node* PhaseIdealLoop::loop_nest_replace_iv(Node* iv_to_replace, Node* inner_iv, Node* outer_phi, Node* inner_head, @@ -647,10 +592,10 @@ void PhaseIdealLoop::add_parse_predicate(Deoptimization::DeoptReason reason, Nod // Find a safepoint node that dominates the back edge. We need a // SafePointNode so we can use its jvm state to create empty // predicates. -static bool no_side_effect_since_safepoint(Compile* C, Node* x, Node* mem, MergeMemNode* mm, PhaseIdealLoop* phase) { +static bool no_side_effect_since_safepoint(Compile* C, const Node* head, const Node* mem, MergeMemNode* mm, const PhaseIdealLoop* phase) { SafePointNode* safepoint = nullptr; - for (DUIterator_Fast imax, i = x->fast_outs(imax); i < imax; i++) { - Node* u = x->fast_out(i); + for (DUIterator_Fast imax, i = head->fast_outs(imax); i < imax; i++) { + Node* u = head->fast_out(i); if (u->is_memory_phi()) { Node* m = u->in(LoopNode::LoopBackControl); if (u->adr_type() == TypePtr::BOTTOM) { @@ -700,14 +645,14 @@ static bool no_side_effect_since_safepoint(Compile* C, Node* x, Node* mem, Merge return true; } -SafePointNode* PhaseIdealLoop::find_safepoint(Node* back_control, Node* x, IdealLoopTree* loop) { +SafePointNode* PhaseIdealLoop::find_safepoint(Node* back_control, const Node* head, const IdealLoopTree* loop) { IfNode* exit_test = back_control->in(0)->as_If(); SafePointNode* safepoint = nullptr; if (exit_test->in(0)->is_SafePoint() && exit_test->in(0)->outcnt() == 1) { safepoint = exit_test->in(0)->as_SafePoint(); } else { Node* c = back_control; - while (c != x && c->Opcode() != Op_SafePoint) { + while (c != head && c->Opcode() != Op_SafePoint) { c = idom(c); } @@ -746,14 +691,14 @@ SafePointNode* PhaseIdealLoop::find_safepoint(Node* back_control, Node* x, Ideal } } #endif - if (!no_side_effect_since_safepoint(C, x, mem, mm, this)) { + if (!no_side_effect_since_safepoint(C, head, mem, mm, this)) { safepoint = nullptr; } else { assert(mm == nullptr|| _igvn.transform(mm) == mem->as_MergeMem()->base_memory(), "all memory state should have been processed"); } #ifdef ASSERT if (mm != nullptr) { - _igvn.remove_dead_node(mm); + _igvn.remove_dead_node(mm, PhaseIterGVN::NodeOrigin::Speculative); } #endif } @@ -952,11 +897,11 @@ bool PhaseIdealLoop::create_loop_nest(IdealLoopTree* loop, Node_List &old_new) { // Loop is strip mined: use the safepoint of the outer strip mined loop OuterStripMinedLoopNode* outer_loop = head->as_CountedLoop()->outer_loop(); assert(outer_loop != nullptr, "no outer loop"); - safepoint = outer_loop->outer_safepoint(); + safepoint = LoopPeeling == 0 ? nullptr : outer_loop->outer_safepoint(); outer_loop->transform_to_counted_loop(&_igvn, this); exit_test = head->loopexit(); } else { - safepoint = find_safepoint(back_control, x, loop); + safepoint = LoopPeeling == 0 ? nullptr : find_safepoint(back_control, x, loop); } IfFalseNode* exit_branch = exit_test->false_proj(); @@ -1130,8 +1075,8 @@ bool PhaseIdealLoop::create_loop_nest(IdealLoopTree* loop, Node_List &old_new) { // Peel one iteration of the loop and use the safepoint at the end // of the peeled iteration to insert Parse Predicates. If no well // positioned safepoint peel to guarantee a safepoint in the outer - // loop. - if (safepoint != nullptr || !loop->_has_call) { + // loop. When loop peeling is disabled, skip the peeling step altogether. + if (LoopPeeling != 0 && (safepoint != nullptr || !loop->_has_call)) { old_new.clear(); do_peeling(loop, old_new); } else { @@ -1744,51 +1689,275 @@ LoopNode* PhaseIdealLoop::create_inner_head(IdealLoopTree* loop, BaseCountedLoop } #ifdef ASSERT -void PhaseIdealLoop::check_counted_loop_shape(IdealLoopTree* loop, Node* x, BasicType bt) { - Node* back_control = loop_exit_control(x, loop); +void PhaseIdealLoop::check_counted_loop_shape(IdealLoopTree* loop, Node* head, BasicType bt) { + Node* back_control = loop_exit_control(loop); assert(back_control != nullptr, "no back control"); - BoolTest::mask mask = BoolTest::illegal; - float cl_prob = 0; - Node* incr = nullptr; - Node* limit = nullptr; + LoopExitTest exit_test(back_control, loop, this); + exit_test.build(); + assert(exit_test.is_valid_with_bt(bt), "no exit test"); - Node* cmp = loop_exit_test(back_control, loop, incr, limit, mask, cl_prob); - assert(cmp != nullptr && cmp->Opcode() == Op_Cmp(bt), "no exit test"); + LoopIVIncr iv_incr(head, loop); + iv_incr.build(exit_test.incr()); + assert(iv_incr.is_valid_with_bt(bt), "no incr"); - Node* phi_incr = nullptr; - incr = loop_iv_incr(incr, x, loop, phi_incr); - assert(incr != nullptr && incr->Opcode() == Op_Add(bt), "no incr"); + LoopIVStride stride = LoopIVStride(bt); + stride.build(iv_incr.incr()); + assert(stride.is_valid(), "no stride"); - Node* xphi = nullptr; - Node* stride = loop_iv_stride(incr, xphi); + PhiNode* phi = loop_iv_phi(stride.xphi(), iv_incr.phi_incr(), head); + assert(phi != nullptr && phi->in(LoopNode::LoopBackControl) == iv_incr.incr(), "No phi"); - assert(stride != nullptr, "no stride"); + assert(stride.compute_non_zero_stride_con(exit_test.mask(), bt) != 0, "illegal condition"); - PhiNode* phi = loop_iv_phi(xphi, phi_incr, x); - - assert(phi != nullptr && phi->in(LoopNode::LoopBackControl) == incr, "No phi"); - - jlong stride_con = stride->get_integer_as_long(bt); - - assert(condition_stride_ok(mask, stride_con), "illegal condition"); - - assert(mask != BoolTest::ne, "unexpected condition"); - assert(phi_incr == nullptr, "bad loop shape"); - assert(cmp->in(1) == incr, "bad exit test shape"); + assert(exit_test.mask() != BoolTest::ne, "unexpected condition"); + assert(iv_incr.phi_incr() == nullptr, "bad loop shape"); + assert(exit_test.cmp()->in(1) == iv_incr.incr(), "bad exit test shape"); // Safepoint on backedge not supported - assert(x->in(LoopNode::LoopBackControl)->Opcode() != Op_SafePoint, "no safepoint on backedge"); + assert(head->in(LoopNode::LoopBackControl)->Opcode() != Op_SafePoint, "no safepoint on backedge"); } #endif +void PhaseIdealLoop::LoopExitTest::build() { + _is_valid = false; + + const Node* iftrue = _back_control; + uint iftrue_op = iftrue->Opcode(); + Node* iff = iftrue->in(0); + BoolNode* test = iff->in(1)->as_Bool(); + _mask = test->_test._test; + _cl_prob = iff->as_If()->_prob; + if (iftrue_op == Op_IfFalse) { + _mask = BoolTest(_mask).negate(); + _cl_prob = 1.0f - _cl_prob; + } + // Get backedge compare + _cmp = test->in(1); + if (!_cmp->is_Cmp()) { + return; + } + + // Find the trip-counter increment & limit. Limit must be loop invariant. + _incr = _cmp->in(1); + _limit = _cmp->in(2); + + // --------- + // need 'loop()' test to tell if limit is loop invariant + // --------- + + if (_loop->is_invariant(_incr)) { // Swapped trip counter and limit? + swap(_incr, _limit); // Then reverse order into the CmpI + _mask = BoolTest(_mask).commute(); // And commute the exit test + } + + if (!_loop->is_invariant(_limit)) { // Limit must be loop-invariant + return; + } + if (_loop->is_invariant(_incr)) { // Trip counter must be loop-variant + return; + } + + _is_valid = true; +} + +// Canonicalize the loop condition if it is 'ne'. +void PhaseIdealLoop::LoopExitTest::canonicalize_mask(jlong stride_con) { + if (_mask != BoolTest::ne) { + return; + } + + assert(stride_con == 1 || stride_con == -1, "simple increment only - checked in CountedLoopConverter"); + + if (stride_con == 1) { + // 'ne' can be replaced with 'lt' only when init < limit. + // This is ensured by the inserted predicate in CountedLoopConverter + _mask = BoolTest::lt; + } else { + // 'ne' can be replaced with 'gt' only when init > limit. + // This is ensured by the inserted predicate in CountedLoopConverter. + _mask = BoolTest::gt; + } +} + +void PhaseIdealLoop::LoopIVIncr::build(Node* old_incr) { + _is_valid = false; + + Node* incr = old_incr; + // Trip-counter increment must be commutative & associative. + if (incr->is_Phi()) { + if (incr->as_Phi()->region() != _head || incr->req() != 3) { + return; // Not simple trip counter expression + } + Node* phi_incr = incr; + Node* back_control = phi_incr->in(LoopNode::LoopBackControl); // Assume incr is on backedge of Phi + if (_loop->_phase->ctrl_is_member(_loop, back_control)) { // Trip counter must be loop-variant + _incr = back_control; + _phi_incr = phi_incr; + _is_valid = true; + return; + } + } + _incr = incr; + _phi_incr = nullptr; + + _is_valid = true; +} + +void PhaseIdealLoop::LoopIVStride::build(const Node* incr) { + _is_valid = false; + + assert(incr->Opcode() == Op_AddI || incr->Opcode() == Op_AddL, "caller resp."); + // Get merge point + _xphi = incr->in(1); + _stride_node = incr->in(2); + if (!_stride_node->is_Con()) { // Oops, swap these + if (!_xphi->is_Con()) { // Is the other guy a constant? + return; // Nope, unknown stride, bail out + } + swap(_xphi, _stride_node); // 'incr' is commutative, so ok to swap + } + + // Iteratively uncast the loop induction variable + // until no more CastII/CastLL nodes are found. + while (_xphi->Opcode() == Op_Cast(_iv_bt)) { + _xphi = _xphi->in(1); + } + + _is_valid = true; +} + +jlong PhaseIdealLoop::LoopIVStride::compute_non_zero_stride_con(const BoolTest::mask mask, const BasicType iv_bt) const { + jlong stride_con = stride_node()->get_integer_as_long(iv_bt); + assert(stride_con != 0, "missed some peephole opt"); // stride constant can never be 0! + + // If the condition is inverted and we will be rolling + // through MININT to MAXINT, then bail out. + if (mask == BoolTest::eq || // Bail out, but this loop trips at most twice! + // Odd stride + (mask == BoolTest::ne && stride_con != 1 && stride_con != -1) || + // Count down loop rolls through MAXINT + ((mask == BoolTest::le || mask == BoolTest::lt) && stride_con < 0) || + // Count up loop rolls through MININT + ((mask == BoolTest::ge || mask == BoolTest::gt) && stride_con > 0)) { + return 0; // Bail out with sentinel = 0 + } + + // Bail out if the stride is too big. + if (stride_con == min_signed_integer(iv_bt) || (ABS(stride_con) > max_signed_integer(iv_bt) / 2)) { + return 0; // Bail out with sentinel = 0 + } + + return stride_con; +} + +void CountedLoopConverter::LoopStructure::build() { + _is_valid = false; + + if (_back_control == nullptr) { + return; + } + + _exit_test.build(); + if (!_exit_test.is_valid_with_bt(_iv_bt)) { + return; // Avoid pointer & float & 64-bit compares + } + + Node* incr = _exit_test.incr(); + if (_exit_test.incr()->Opcode() == Op_Cast(_iv_bt)) { + incr = incr->in(1); + } + + _iv_incr.build(incr); + if (!_iv_incr.is_valid()) { + return; + } + + _truncated_increment.build(_iv_incr.incr()); + if (!_truncated_increment.is_valid()) { + return; // Funny increment opcode + } + assert(_truncated_increment.incr()->Opcode() == Op_Add(_iv_bt), "wrong increment code"); + + _stride.build(_truncated_increment.incr()); + if (!_stride.is_valid()) { + return; + } + + _phi = PhaseIdealLoop::loop_iv_phi(_stride.xphi(), _iv_incr.phi_incr(), _head); + if (_phi == nullptr || + (_truncated_increment.outer_trunc() == nullptr && _phi->in(LoopNode::LoopBackControl) != _truncated_increment.incr()) || + (_truncated_increment.outer_trunc() != nullptr && _phi->in(LoopNode::LoopBackControl) != _truncated_increment.outer_trunc())) { + return; + } + + Node* safepoint = _back_control->in(0)->in(0); + if (_loop->_child != nullptr) { + if (safepoint->Opcode() == Op_SafePoint) { + _safepoint = safepoint->as_SafePoint(); + } else { + _safepoint = nullptr; + } + } else { + _safepoint = _phase->find_safepoint(_back_control, _head, _loop); + } + + _is_valid = true; +} + +// We need to canonicalize the loop exit check by using different values for adjusted_limit: +// (LE1) iv_post_i < limit: Already canonicalized. We can directly use limit as adjusted_limit. +// -> adjusted_limit = limit. +// (LE2) iv_post_i <= limit: +// iv_post_i < limit + 1 +// -> adjusted limit = limit + 1 +// (LE3) iv_pre_i < limit: +// iv_pre_i + stride < limit + stride +// iv_post_i < limit + stride +// -> adjusted_limit = limit + stride +// (LE4) iv_pre_i <= limit: +// iv_pre_i < limit + 1 +// iv_pre_i + stride < limit + stride + 1 +// iv_post_i < limit + stride + 1 +// -> adjusted_limit = limit + stride + 1 +// +// Note that: +// (AL) limit <= adjusted_limit. +jlong CountedLoopConverter::LoopStructure::final_limit_correction() const { + const jlong stride_con = _stride.compute_non_zero_stride_con(_exit_test.mask(), _iv_bt); + + // Accounting for (LE3) and (LE4) where we use pre-incremented phis in the loop exit check. + const jlong limit_correction_for_pre_iv_exit_check = _iv_incr.phi_incr() != nullptr ? stride_con : 0; + + // Accounting for (LE2) and (LE4) where we use <= or >= in the loop exit check. + const jlong limit_correction_for_le_ge_exit_check = _exit_test.should_include_limit() + ? (stride_con > 0 ? 1 : -1) + : 0; + + const jlong limit_correction = limit_correction_for_pre_iv_exit_check + limit_correction_for_le_ge_exit_check; + const jlong canonicalized_correction = stride_con + (stride_con > 0 ? -1 : 1); + + return canonicalized_correction + limit_correction; // final_correction +} + #ifdef ASSERT -// convert an int counted loop to a long counted to stress handling of -// long counted loops -bool PhaseIdealLoop::convert_to_long_loop(Node* cmp, Node* phi, IdealLoopTree* loop) { +bool CountedLoopConverter::should_stress_long_counted_loop() { + assert(_checked_for_counted_loop, "must check for counted loop before stressing"); + + return StressLongCountedLoop > 0 && + _iv_bt == T_INT && + !_head->as_Loop()->is_loop_nest_inner_loop() && + _structure.truncated_increment().trunc_type() == TypeInt::INT; // Only stress an int loop (i.e., not char, byte or short) +} + +// Convert an int counted loop to a long counted to stress handling of long counted loops. Returns true upon success. +bool CountedLoopConverter::stress_long_counted_loop() { + assert(should_stress_long_counted_loop(), "stress condition not satisfied"); + + PhaseIterGVN* igvn = &_phase->igvn(); Unique_Node_List iv_nodes; Node_List old_new; - iv_nodes.push(cmp); + iv_nodes.push(_structure.exit_test().cmp()); bool failed = false; for (uint i = 0; i < iv_nodes.size() && !failed; i++) { @@ -1818,12 +1987,12 @@ bool PhaseIdealLoop::convert_to_long_loop(Node* cmp, Node* phi, IdealLoopTree* l fatal("unexpected"); } - for (uint i = 1; i < n->req(); i++) { - Node* in = n->in(i); + for (uint j = 1; j < n->req(); j++) { + Node* in = n->in(j); if (in == nullptr) { continue; } - if (ctrl_is_member(loop, in)) { + if (_loop->is_member(_phase->get_loop(_phase->get_ctrl(in)))) { iv_nodes.push(in); } } @@ -1834,243 +2003,141 @@ bool PhaseIdealLoop::convert_to_long_loop(Node* cmp, Node* phi, IdealLoopTree* l Node* n = iv_nodes.at(i); Node* clone = old_new[n->_idx]; if (clone != nullptr) { - _igvn.remove_dead_node(clone); + igvn->remove_dead_node(clone, PhaseIterGVN::NodeOrigin::Speculative); } } return false; } + // Make sure we have loop limit checks in place to preserve overflows behaviour after casting to long. + if (_should_insert_stride_overflow_limit_check) { + insert_stride_overflow_limit_check(); + } + + if (_should_insert_init_trip_limit_check) { + insert_init_trip_limit_check(); + } + for (uint i = 0; i < iv_nodes.size(); i++) { Node* n = iv_nodes.at(i); Node* clone = old_new[n->_idx]; - for (uint i = 1; i < n->req(); i++) { - Node* in = n->in(i); + for (uint j = 1; j < n->req(); j++) { + Node* in = n->in(j); if (in == nullptr) { continue; } Node* in_clone = old_new[in->_idx]; if (in_clone == nullptr) { - assert(_igvn.type(in)->isa_int(), ""); + assert(igvn->type(in)->isa_int(), ""); in_clone = new ConvI2LNode(in); - _igvn.register_new_node_with_optimizer(in_clone); - set_subtree_ctrl(in_clone, false); + igvn->register_new_node_with_optimizer(in_clone); + _phase->set_subtree_ctrl(in_clone, false); } if (in_clone->in(0) == nullptr) { - in_clone->set_req(0, C->top()); - clone->set_req(i, in_clone); + in_clone->set_req(0, _phase->C->top()); + clone->set_req(j, in_clone); in_clone->set_req(0, nullptr); } else { - clone->set_req(i, in_clone); + clone->set_req(j, in_clone); } } - _igvn.register_new_node_with_optimizer(clone); + igvn->register_new_node_with_optimizer(clone); } - set_ctrl(old_new[phi->_idx], phi->in(0)); + _phase->set_ctrl(old_new[_structure.phi()->_idx], _structure.phi()->in(0)); for (uint i = 0; i < iv_nodes.size(); i++) { Node* n = iv_nodes.at(i); Node* clone = old_new[n->_idx]; - set_subtree_ctrl(clone, false); + _phase->set_subtree_ctrl(clone, false); Node* m = n->Opcode() == Op_CmpI ? clone : nullptr; - for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { - Node* u = n->fast_out(i); + for (DUIterator_Fast imax, j = n->fast_outs(imax); j < imax; j++) { + Node* u = n->fast_out(j); if (iv_nodes.member(u)) { continue; } if (m == nullptr) { m = new ConvL2INode(clone); - _igvn.register_new_node_with_optimizer(m); - set_subtree_ctrl(m, false); + igvn->register_new_node_with_optimizer(m); + _phase->set_subtree_ctrl(m, false); } - _igvn.rehash_node_delayed(u); - int nb = u->replace_edge(n, m, &_igvn); - --i, imax -= nb; + igvn->rehash_node_delayed(u); + int nb = u->replace_edge(n, m, igvn); + --j, imax -= nb; } } return true; } #endif -//------------------------------is_counted_loop-------------------------------- -bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*& loop, BasicType iv_bt) { - PhaseGVN *gvn = &_igvn; +bool PhaseIdealLoop::try_convert_to_counted_loop(Node* head, IdealLoopTree*& loop, const BasicType iv_bt) { + CountedLoopConverter converter(this, head, loop, iv_bt); + if (converter.is_counted_loop()) { +#ifdef ASSERT + // Stress by converting int counted loops to long counted loops + if (converter.should_stress_long_counted_loop() && converter.stress_long_counted_loop()) { + return false; + } +#endif - Node* back_control = loop_exit_control(x, loop); - if (back_control == nullptr) { + loop = converter.convert(); + return true; + } + + return false; +} + +bool CountedLoopConverter::is_counted_loop() { + PhaseIterGVN* igvn = &_phase->igvn(); + + _structure.build(); + if (!_structure.is_valid()) { return false; } - BoolTest::mask bt = BoolTest::illegal; - float cl_prob = 0; - Node* incr = nullptr; - Node* limit = nullptr; - Node* cmp = loop_exit_test(back_control, loop, incr, limit, bt, cl_prob); - if (cmp == nullptr || cmp->Opcode() != Op_Cmp(iv_bt)) { - return false; // Avoid pointer & float & 64-bit compares - } + // ================================================= + // ---- Is the loop trip counted? ---- - // Trip-counter increment must be commutative & associative. - if (incr->Opcode() == Op_Cast(iv_bt)) { - incr = incr->in(1); - } - - Node* phi_incr = nullptr; - incr = loop_iv_incr(incr, x, loop, phi_incr); - if (incr == nullptr) { + // Check trip counter will end up higher than the limit + if (_structure.is_infinite_loop()) { return false; } - Node* trunc1 = nullptr; - Node* trunc2 = nullptr; - const TypeInteger* iv_trunc_t = nullptr; - Node* orig_incr = incr; - if (!(incr = CountedLoopNode::match_incr_with_optional_truncation(incr, &trunc1, &trunc2, &iv_trunc_t, iv_bt))) { - return false; // Funny increment opcode - } - assert(incr->Opcode() == Op_Add(iv_bt), "wrong increment code"); - - Node* xphi = nullptr; - Node* stride = loop_iv_stride(incr, xphi); - - if (stride == nullptr) { - return false; - } - - // Iteratively uncast the loop induction variable - // until no more CastII/CastLL nodes are found. - while (xphi->Opcode() == Op_Cast(iv_bt)) { - xphi = xphi->in(1); - } - // Stride must be constant - jlong stride_con = stride->get_integer_as_long(iv_bt); - assert(stride_con != 0, "missed some peephole opt"); - - PhiNode* phi = loop_iv_phi(xphi, phi_incr, x); - - if (phi == nullptr || - (trunc1 == nullptr && phi->in(LoopNode::LoopBackControl) != incr) || - (trunc1 != nullptr && phi->in(LoopNode::LoopBackControl) != trunc1)) { + const jlong stride_con = _structure.stride_con(); + if (stride_con == 0) { return false; } - Node* iftrue = back_control; - uint iftrue_op = iftrue->Opcode(); - Node* iff = iftrue->in(0); - BoolNode* test = iff->in(1)->as_Bool(); - - const TypeInteger* limit_t = gvn->type(limit)->is_integer(iv_bt); - if (trunc1 != nullptr) { - // When there is a truncation, we must be sure that after the truncation - // the trip counter will end up higher than the limit, otherwise we are looking - // at an endless loop. Can happen with range checks. - - // Example: - // int i = 0; - // while (true) - // sum + = array[i]; - // i++; - // i = i && 0x7fff; - // } - // - // If the array is shorter than 0x8000 this exits through a AIOOB - // - Counted loop transformation is ok - // If the array is longer then this is an endless loop - // - No transformation can be done. - - const TypeInteger* incr_t = gvn->type(orig_incr)->is_integer(iv_bt); - if (limit_t->hi_as_long() > incr_t->hi_as_long()) { - // if the limit can have a higher value than the increment (before the phi) - return false; - } - } - - Node *init_trip = phi->in(LoopNode::EntryControl); - - // If iv trunc type is smaller than int, check for possible wrap. - if (!TypeInteger::bottom(iv_bt)->higher_equal(iv_trunc_t)) { - assert(trunc1 != nullptr, "must have found some truncation"); - - // Get a better type for the phi (filtered thru if's) - const TypeInteger* phi_ft = filtered_type(phi); - - // Can iv take on a value that will wrap? - // - // Ensure iv's limit is not within "stride" of the wrap value. - // - // Example for "short" type - // Truncation ensures value is in the range -32768..32767 (iv_trunc_t) - // If the stride is +10, then the last value of the induction - // variable before the increment (phi_ft->_hi) must be - // <= 32767 - 10 and (phi_ft->_lo) must be >= -32768 to - // ensure no truncation occurs after the increment. - - if (stride_con > 0) { - if (iv_trunc_t->hi_as_long() - phi_ft->hi_as_long() < stride_con || - iv_trunc_t->lo_as_long() > phi_ft->lo_as_long()) { - return false; // truncation may occur - } - } else if (stride_con < 0) { - if (iv_trunc_t->lo_as_long() - phi_ft->lo_as_long() > stride_con || - iv_trunc_t->hi_as_long() < phi_ft->hi_as_long()) { - return false; // truncation may occur - } - } - // No possibility of wrap so truncation can be discarded - // Promote iv type to Int - } else { - assert(trunc1 == nullptr && trunc2 == nullptr, "no truncation for int"); - } - - if (!condition_stride_ok(bt, stride_con)) { + // Check iv type can be promoted to int for short/char/byte loops + if (has_truncation_wrap(_structure.truncated_increment(), _structure.phi(), stride_con)) { return false; } - const TypeInteger* init_t = gvn->type(init_trip)->is_integer(iv_bt); - - if (stride_con > 0) { - if (init_t->lo_as_long() > max_signed_integer(iv_bt) - stride_con) { - return false; // cyclic loop - } - } else { - if (init_t->hi_as_long() < min_signed_integer(iv_bt) - stride_con) { - return false; // cyclic loop - } - } - - if (phi_incr != nullptr && bt != BoolTest::ne) { - // check if there is a possibility of IV overflowing after the first increment - if (stride_con > 0) { - if (init_t->hi_as_long() > max_signed_integer(iv_bt) - stride_con) { - return false; - } - } else { - if (init_t->lo_as_long() < min_signed_integer(iv_bt) - stride_con) { - return false; - } - } + // Check iv is not overflowing + Node* init_trip = _structure.phi()->in(LoopNode::EntryControl); + const TypeInteger* init_t = igvn->type(init_trip)->is_integer(_iv_bt); + if (is_iv_overflowing(init_t, stride_con, _structure.iv_incr().phi_incr(), _structure.exit_test().mask())) { + return false; } // ================================================= // ---- SUCCESS! Found A Trip-Counted Loop! ----- - // - if (x->Opcode() == Op_Region) { - // x has not yet been transformed to Loop or LongCountedLoop. + if (_head->Opcode() == Op_Region) { + // head has not yet been transformed to Loop or LongCountedLoop. // This should only happen if we are inside an infinite loop. // It happens like this: // build_loop_tree -> do not attach infinite loop and nested loops // beautify_loops -> does not transform the infinite and nested loops to LoopNode, because not attached yet // build_loop_tree -> find and attach infinite and nested loops // counted_loop -> nested Regions are not yet transformed to LoopNodes, we land here - assert(x->as_Region()->is_in_infinite_subgraph(), - "x can only be a Region and not Loop if inside infinite loop"); + assert(_head->as_Region()->is_in_infinite_subgraph(), + "head can only be a Region and not Loop if inside infinite loop"); // Come back later when Region is transformed to LoopNode return false; } - assert(x->Opcode() == Op_Loop || x->Opcode() == Op_LongCountedLoop, "regular loops only"); - C->print_method(PHASE_BEFORE_CLOOPS, 3, x); + assert(_head->Opcode() == Op_Loop || _head->Opcode() == Op_LongCountedLoop, "regular loops only"); // =================================================== // We can only convert this loop to a counted loop if we can guarantee that the iv phi will never overflow at runtime. @@ -2123,23 +2190,10 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*& loop, BasicType iv // When converting a loop to a counted loop, we want to have a canonicalized loop exit check of the form: // iv_post_i < adjusted_limit // - // If that is not the case, we need to canonicalize the loop exit check by using different values for adjusted_limit: - // (LE1) iv_post_i < limit: Already canonicalized. We can directly use limit as adjusted_limit. - // -> adjusted_limit = limit. - // (LE2) iv_post_i <= limit: - // iv_post_i < limit + 1 - // -> adjusted limit = limit + 1 - // (LE3) iv_pre_i < limit: - // iv_pre_i + stride < limit + stride - // iv_post_i < limit + stride - // -> adjusted_limit = limit + stride - // (LE4) iv_pre_i <= limit: - // iv_pre_i < limit + 1 - // iv_pre_i + stride < limit + stride + 1 - // iv_post_i < limit + stride + 1 - // -> adjusted_limit = limit + stride + 1 + // If that is not the case, we need to canonicalize the loop exit check by using different values for adjusted_limit + // (see LoopStructure::final_limit_correction()). // - // Note that: + // Note that after canonicalization: // (AL) limit <= adjusted_limit. // // The following loop invariant has to hold for counted loops with n iterations (i.e. loop exit check true after n-th @@ -2259,78 +2313,63 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*& loop, BasicType iv // there is no overflow of the iv phi after the first iteration. In this case, we don't need to check (ii) // again and can skip the predicate. - // Check (vi) and bail out if the stride is too big. - if (stride_con == min_signed_integer(iv_bt) || (ABS(stride_con) > max_signed_integer(iv_bt) / 2)) { - return false; + const TypeInteger* limit_t = igvn->type(_structure.limit())->is_integer(_iv_bt); + StrideOverflowState stride_overflow_state = check_stride_overflow(_structure.final_limit_correction(), limit_t, _iv_bt); + + Node* init_control = _head->in(LoopNode::EntryControl); + const Predicates predicates(init_control); + const PredicateBlock* loop_limit_check_predicate_block = predicates.loop_limit_check_predicate_block(); + + if (stride_overflow_state == Overflow) { + return false; // Bailout: integer overflow is certain. } - // Accounting for (LE3) and (LE4) where we use pre-incremented phis in the loop exit check. - const jlong limit_correction_for_pre_iv_exit_check = (phi_incr != nullptr) ? stride_con : 0; - - // Accounting for (LE2) and (LE4) where we use <= or >= in the loop exit check. - const bool includes_limit = (bt == BoolTest::le || bt == BoolTest::ge); - const jlong limit_correction_for_le_ge_exit_check = (includes_limit ? (stride_con > 0 ? 1 : -1) : 0); - - const jlong limit_correction = limit_correction_for_pre_iv_exit_check + limit_correction_for_le_ge_exit_check; - const jlong canonicalized_correction = stride_con + (stride_con > 0 ? -1 : 1); - const jlong final_correction = canonicalized_correction + limit_correction; - - int sov = check_stride_overflow(final_correction, limit_t, iv_bt); - Node* init_control = x->in(LoopNode::EntryControl); - - // If sov==0, limit's type always satisfies the condition, for + // If stride_overflow_state == NO_OVERFLOW, limit's type always satisfies the condition, for // example, when it is an array length. - if (sov != 0) { - if (sov < 0) { - return false; // Bailout: integer overflow is certain. - } + + _should_insert_stride_overflow_limit_check = false; + if (stride_overflow_state == RequireLimitCheck) { // (1) Loop Limit Check Predicate is required because we could not statically prove that // limit + final_correction = adjusted_limit - 1 + stride <= max_int - assert(!x->as_Loop()->is_loop_nest_inner_loop(), "loop was transformed"); - const Predicates predicates(init_control); - const PredicateBlock* loop_limit_check_predicate_block = predicates.loop_limit_check_predicate_block(); + assert(!_head->as_Loop()->is_loop_nest_inner_loop(), "loop was transformed"); if (!loop_limit_check_predicate_block->has_parse_predicate()) { // The Loop Limit Check Parse Predicate is not generated if this method trapped here before. #ifdef ASSERT if (TraceLoopLimitCheck) { tty->print("Missing Loop Limit Check Parse Predicate:"); - loop->dump_head(); - x->dump(1); + _loop->dump_head(); + _head->dump(1); } #endif return false; } ParsePredicateNode* loop_limit_check_parse_predicate = loop_limit_check_predicate_block->parse_predicate(); - if (!is_dominator(get_ctrl(limit), loop_limit_check_parse_predicate->in(0))) { + if (!_phase->is_dominator(_phase->get_ctrl(_structure.limit()), loop_limit_check_parse_predicate->in(0))) { return false; } - Node* cmp_limit; - Node* bol; - - if (stride_con > 0) { - cmp_limit = CmpNode::make(limit, _igvn.integercon(max_signed_integer(iv_bt) - final_correction, iv_bt), iv_bt); - bol = new BoolNode(cmp_limit, BoolTest::le); - } else { - cmp_limit = CmpNode::make(limit, _igvn.integercon(min_signed_integer(iv_bt) - final_correction, iv_bt), iv_bt); - bol = new BoolNode(cmp_limit, BoolTest::ge); - } - - insert_loop_limit_check_predicate(init_control->as_IfTrue(), cmp_limit, bol); + _should_insert_stride_overflow_limit_check = true; } // (2.3) const bool init_plus_stride_could_overflow = - (stride_con > 0 && init_t->hi_as_long() > max_signed_integer(iv_bt) - stride_con) || - (stride_con < 0 && init_t->lo_as_long() < min_signed_integer(iv_bt) - stride_con); - // (2.1) - const bool init_gte_limit = (stride_con > 0 && init_t->hi_as_long() >= limit_t->lo_as_long()) || - (stride_con < 0 && init_t->lo_as_long() <= limit_t->hi_as_long()); + (stride_con > 0 && init_t->hi_as_long() > max_signed_integer(_iv_bt) - stride_con) || + (stride_con < 0 && init_t->lo_as_long() < min_signed_integer(_iv_bt) - stride_con); + // (2.1) + const bool init_gte_limit = + (stride_con > 0 && init_t->hi_as_long() >= limit_t->lo_as_long()) || + (stride_con < 0 && init_t->lo_as_long() <= limit_t->hi_as_long()); + + _should_insert_init_trip_limit_check = false; if (init_gte_limit && // (2.1) - ((bt == BoolTest::ne || init_plus_stride_could_overflow) && // (2.3) - !has_dominating_loop_limit_check(init_trip, limit, stride_con, iv_bt, init_control))) { // (2.2) + ((_structure.exit_test().mask() == BoolTest::ne || init_plus_stride_could_overflow) && // (2.3) + !has_dominating_loop_limit_check(init_trip, + _structure.limit(), + stride_con, + _iv_bt, + init_control))) { // (2.2) // (2) Iteration Loop Limit Check Predicate is required because neither (2.1), (2.2), nor (2.3) holds. // We use the following condition: // - stride > 0: init < limit @@ -2340,15 +2379,13 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*& loop, BasicType iv // a requirement). We transform the loop exit check by using a less-than-operator. By doing so, we must always // check that init < limit. Otherwise, we could have a different number of iterations at runtime. - const Predicates predicates(init_control); - const PredicateBlock* loop_limit_check_predicate_block = predicates.loop_limit_check_predicate_block(); if (!loop_limit_check_predicate_block->has_parse_predicate()) { // The Loop Limit Check Parse Predicate is not generated if this method trapped here before. #ifdef ASSERT if (TraceLoopLimitCheck) { tty->print("Missing Loop Limit Check Parse Predicate:"); - loop->dump_head(); - x->dump(1); + _loop->dump_head(); + _head->dump(1); } #endif return false; @@ -2356,81 +2393,196 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*& loop, BasicType iv ParsePredicateNode* loop_limit_check_parse_predicate = loop_limit_check_predicate_block->parse_predicate(); Node* parse_predicate_entry = loop_limit_check_parse_predicate->in(0); - if (!is_dominator(get_ctrl(limit), parse_predicate_entry) || - !is_dominator(get_ctrl(init_trip), parse_predicate_entry)) { + if (!_phase->is_dominator(_phase->get_ctrl(_structure.limit()), parse_predicate_entry) || + !_phase->is_dominator(_phase->get_ctrl(init_trip), parse_predicate_entry)) { return false; } - Node* cmp_limit; - Node* bol; - - if (stride_con > 0) { - cmp_limit = CmpNode::make(init_trip, limit, iv_bt); - bol = new BoolNode(cmp_limit, BoolTest::lt); - } else { - cmp_limit = CmpNode::make(init_trip, limit, iv_bt); - bol = new BoolNode(cmp_limit, BoolTest::gt); - } - - insert_loop_limit_check_predicate(init_control->as_IfTrue(), cmp_limit, bol); + _should_insert_init_trip_limit_check = true; } - if (bt == BoolTest::ne) { - // Now we need to canonicalize the loop condition if it is 'ne'. - assert(stride_con == 1 || stride_con == -1, "simple increment only - checked before"); - if (stride_con > 0) { - // 'ne' can be replaced with 'lt' only when init < limit. This is ensured by the inserted predicate above. - bt = BoolTest::lt; - } else { - assert(stride_con < 0, "must be"); - // 'ne' can be replaced with 'gt' only when init > limit. This is ensured by the inserted predicate above. - bt = BoolTest::gt; - } + _structure.exit_test().canonicalize_mask(stride_con); + + if (is_safepoint_invalid(_structure.sfpt())) { + return false; } - Node* sfpt = nullptr; - if (loop->_child == nullptr) { - sfpt = find_safepoint(back_control, x, loop); +#ifdef ASSERT + _checked_for_counted_loop = true; +#endif + +#ifndef PRODUCT + if (StressCountedLoop && (_phase->C->random() % 2 == 0)) { + return false; + } +#endif + + return true; +} + +bool CountedLoopConverter::is_iv_overflowing(const TypeInteger* init_t, jlong stride_con, Node* phi_increment, + BoolTest::mask mask) const { + if (stride_con > 0) { + if (init_t->lo_as_long() > max_signed_integer(_iv_bt) - stride_con) { + return true; // cyclic loop + } } else { - sfpt = iff->in(0); - if (sfpt->Opcode() != Op_SafePoint) { - sfpt = nullptr; + if (init_t->hi_as_long() < min_signed_integer(_iv_bt) - stride_con) { + return true; // cyclic loop } } - if (x->in(LoopNode::LoopBackControl)->Opcode() == Op_SafePoint) { - Node* backedge_sfpt = x->in(LoopNode::LoopBackControl); - if (((iv_bt == T_INT && LoopStripMiningIter != 0) || - iv_bt == T_LONG) && + if (phi_increment != nullptr && mask != BoolTest::ne) { + // check if there is a possibility of IV overflowing after the first increment + if (stride_con > 0) { + if (init_t->hi_as_long() > max_signed_integer(_iv_bt) - stride_con) { + return true; + } + } else { + if (init_t->lo_as_long() < min_signed_integer(_iv_bt) - stride_con) { + return true; + } + } + } + + return false; +} + +bool CountedLoopConverter::LoopStructure::is_infinite_loop() const { + PhaseIterGVN& igvn = _phase->igvn(); + const TypeInteger* limit_t = igvn.type(limit())->is_integer(_iv_bt); + + if (_truncated_increment.outer_trunc() != nullptr) { + // When there is a truncation, we must be sure that after the truncation + // the trip counter will end up higher than the limit, otherwise we are looking + // at an endless loop. Can happen with range checks. + + // Example: + // int i = 0; + // while (true) { + // sum + = array[i]; + // i++; + // i = i && 0x7fff; + // } + // + // If the array is shorter than 0x8000 this exits through an AIOOB + // - Counted loop transformation is ok + // If the array is longer then this is an endless loop + // - No transformation can be done. + + const TypeInteger* incr_t = igvn.type(_iv_incr.incr())->is_integer(_iv_bt); + if (limit_t->hi_as_long() > incr_t->hi_as_long()) { + // if the limit can have a higher value than the increment (before the phi) + return true; + } + } + + return false; +} + +bool CountedLoopConverter::has_truncation_wrap(const TruncatedIncrement& truncation, Node* phi, jlong stride_con) { + // If iv trunc type is smaller than int (i.e., short/char/byte), check for possible wrap. + if (!TypeInteger::bottom(_iv_bt)->higher_equal(truncation.trunc_type())) { + assert(truncation.outer_trunc() != nullptr, "must have found some truncation"); + + // Get a better type for the phi (filtered thru if's) + const TypeInteger* phi_ft = filtered_type(phi); + + // Can iv take on a value that will wrap? + // + // Ensure iv's limit is not within "stride" of the wrap value. + // + // Example for "short" type + // Truncation ensures value is in the range -32768..32767 (iv_trunc_t) + // If the stride is +10, then the last value of the induction + // variable before the increment (phi_ft->_hi) must be + // <= 32767 - 10 and (phi_ft->_lo) must be >= -32768 to + // ensure no truncation occurs after the increment. + + if (stride_con > 0) { + if (truncation.trunc_type()->hi_as_long() - phi_ft->hi_as_long() < stride_con || + truncation.trunc_type()->lo_as_long() > phi_ft->lo_as_long()) { + return true; // truncation may occur + } + } else if (stride_con < 0) { + if (truncation.trunc_type()->lo_as_long() - phi_ft->lo_as_long() > stride_con || + truncation.trunc_type()->hi_as_long() < phi_ft->hi_as_long()) { + return true; // truncation may occur + } + } + + // No possibility of wrap so truncation can be discarded + // Promote iv type to Int + } else { + assert(Type::equals(truncation.trunc_type(), TypeInt::INT) || Type::equals(truncation.trunc_type(), TypeLong::LONG), + "unexpected truncation type"); + assert(truncation.outer_trunc() == nullptr && truncation.inner_trunc() == nullptr, "no truncation for int"); + } + + return false; +} + +SafePointNode* CountedLoopConverter::find_safepoint(Node* iftrue) { + Node* iff = iftrue->in(0); + + if (_loop->_child == nullptr) { + return _phase->find_safepoint(iftrue, _head, _loop); + } + + Node* sfpt = iff->in(0); + if (sfpt->Opcode() == Op_SafePoint) { + return sfpt->as_SafePoint(); + } + return nullptr; +} + +bool CountedLoopConverter::is_safepoint_invalid(SafePointNode* sfpt) const { + if (_head->in(LoopNode::LoopBackControl)->Opcode() == Op_SafePoint) { + if (((_iv_bt == T_INT && LoopStripMiningIter != 0) || + _iv_bt == T_LONG) && sfpt == nullptr) { // Leaving the safepoint on the backedge and creating a // CountedLoop will confuse optimizations. We can't move the // safepoint around because its jvm state wouldn't match a new // location. Give up on that loop. - return false; - } - if (is_deleteable_safept(backedge_sfpt)) { - replace_node_and_forward_ctrl(backedge_sfpt, iftrue); - if (loop->_safepts != nullptr) { - loop->_safepts->yank(backedge_sfpt); - } - loop->_tail = iftrue; + return true; } } + return false; +} +IdealLoopTree* CountedLoopConverter::convert() { #ifdef ASSERT - if (iv_bt == T_INT && - !x->as_Loop()->is_loop_nest_inner_loop() && - StressLongCountedLoop > 0 && - trunc1 == nullptr && - convert_to_long_loop(cmp, phi, loop)) { - return false; - } + assert(_checked_for_counted_loop, "must check for counted loop before conversion"); #endif - Node* adjusted_limit = limit; - if (phi_incr != nullptr) { + PhaseIterGVN* igvn = &_phase->igvn(); + + _phase->C->print_method(PHASE_BEFORE_CLOOPS, 3, _head); + + if (_should_insert_stride_overflow_limit_check) { + insert_stride_overflow_limit_check(); + } + + if (_should_insert_init_trip_limit_check) { + insert_init_trip_limit_check(); + } + + Node* back_control = _phase->loop_exit_control(_loop); + if (_head->in(LoopNode::LoopBackControl)->Opcode() == Op_SafePoint) { + Node* backedge_sfpt = _head->in(LoopNode::LoopBackControl); + if (_phase->is_deleteable_safept(backedge_sfpt)) { + _phase->replace_node_and_forward_ctrl(backedge_sfpt, back_control); + if (_loop->_safepts != nullptr) { + _loop->_safepts->yank(backedge_sfpt); + } + _loop->_tail = back_control; + } + } + + Node* adjusted_limit = _structure.limit(); + if (_structure.iv_incr().phi_incr() != nullptr) { // If compare points directly to the phi we need to adjust // the compare so that it points to the incr. Limit have // to be adjusted to keep trip count the same and we @@ -2440,128 +2592,147 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*& loop, BasicType iv // is converted to // i = init; do {} while(++i < limit+1); // - adjusted_limit = gvn->transform(AddNode::make(limit, stride, iv_bt)); + adjusted_limit = igvn->transform(AddNode::make(_structure.limit(), _structure.stride().stride_node(), _iv_bt)); } - if (includes_limit) { + BoolTest::mask mask = _structure.exit_test().mask(); + if (_structure.exit_test().should_include_limit()) { // The limit check guaranties that 'limit <= (max_jint - stride)' so // we can convert 'i <= limit' to 'i < limit+1' since stride != 0. - // - Node* one = (stride_con > 0) ? gvn->integercon( 1, iv_bt) : gvn->integercon(-1, iv_bt); - adjusted_limit = gvn->transform(AddNode::make(adjusted_limit, one, iv_bt)); - if (bt == BoolTest::le) - bt = BoolTest::lt; - else if (bt == BoolTest::ge) - bt = BoolTest::gt; - else + Node* one = (_structure.stride_con() > 0) ? igvn->integercon(1, _iv_bt) : igvn->integercon(-1, _iv_bt); + adjusted_limit = igvn->transform(AddNode::make(adjusted_limit, one, _iv_bt)); + if (mask == BoolTest::le) { + mask = BoolTest::lt; + } else if (mask == BoolTest::ge) { + mask = BoolTest::gt; + } else { ShouldNotReachHere(); + } } - set_subtree_ctrl(adjusted_limit, false); + _phase->set_subtree_ctrl(adjusted_limit, false); // Build a canonical trip test. // Clone code, as old values may be in use. - incr = incr->clone(); - incr->set_req(1,phi); - incr->set_req(2,stride); - incr = _igvn.register_new_node_with_optimizer(incr); - set_early_ctrl(incr, false); - _igvn.rehash_node_delayed(phi); - phi->set_req_X( LoopNode::LoopBackControl, incr, &_igvn ); + Node* incr = _structure.truncated_increment().incr()->clone(); + incr->set_req(1, _structure.phi()); + incr->set_req(2, _structure.stride().stride_node()); + incr = igvn->register_new_node_with_optimizer(incr); + _phase->set_early_ctrl(incr, false); + igvn->rehash_node_delayed(_structure.phi()); + _structure.phi()->set_req_X(LoopNode::LoopBackControl, incr, igvn); // If phi type is more restrictive than Int, raise to // Int to prevent (almost) infinite recursion in igvn // which can only handle integer types for constants or minint..maxint. - if (!TypeInteger::bottom(iv_bt)->higher_equal(phi->bottom_type())) { - Node* nphi = PhiNode::make(phi->in(0), phi->in(LoopNode::EntryControl), TypeInteger::bottom(iv_bt)); + Node* phi = _structure.phi(); + if (!TypeInteger::bottom(_iv_bt)->higher_equal(phi->bottom_type())) { + Node* nphi = + PhiNode::make(phi->in(0), phi->in(LoopNode::EntryControl), TypeInteger::bottom(_iv_bt)); nphi->set_req(LoopNode::LoopBackControl, phi->in(LoopNode::LoopBackControl)); - nphi = _igvn.register_new_node_with_optimizer(nphi); - set_ctrl(nphi, get_ctrl(phi)); - _igvn.replace_node(phi, nphi); + nphi = igvn->register_new_node_with_optimizer(nphi); + _phase->set_ctrl(nphi, _phase->get_ctrl(phi)); + igvn->replace_node(phi, nphi); phi = nphi->as_Phi(); } - cmp = cmp->clone(); - cmp->set_req(1,incr); - cmp->set_req(2, adjusted_limit); - cmp = _igvn.register_new_node_with_optimizer(cmp); - set_ctrl(cmp, iff->in(0)); - test = test->clone()->as_Bool(); - (*(BoolTest*)&test->_test)._test = bt; - test->set_req(1,cmp); - _igvn.register_new_node_with_optimizer(test); - set_ctrl(test, iff->in(0)); + Node* iftrue = back_control; + const uint iftrue_op = iftrue->Opcode(); + Node* iff = iftrue->in(0); + + // Replace the old CmpNode with new adjusted_limit + Node* new_cmp = _structure.exit_test().cmp()->clone(); + new_cmp->set_req(1, incr); + new_cmp->set_req(2, adjusted_limit); + new_cmp = igvn->register_new_node_with_optimizer(new_cmp); + _phase->set_ctrl(new_cmp, iff->in(0)); + + // Replace the old BoolNode with new CmpNode + BoolNode* new_test = iff->in(1)->clone()->as_Bool(); + const_cast(&new_test->_test)->_test = mask; // Yes, it's a const, but it's a newly cloned node so we should be fine. + new_test->set_req(1, new_cmp); + igvn->register_new_node_with_optimizer(new_test); + _phase->set_ctrl(new_test, iff->in(0)); // Replace the old IfNode with a new LoopEndNode - Node *lex = _igvn.register_new_node_with_optimizer(BaseCountedLoopEndNode::make(iff->in(0), test, cl_prob, iff->as_If()->_fcnt, iv_bt)); - IfNode *le = lex->as_If(); - uint dd = dom_depth(iff); - set_idom(le, le->in(0), dd); // Update dominance for loop exit - set_loop(le, loop); + Node* loop_end = igvn->register_new_node_with_optimizer(BaseCountedLoopEndNode::make(iff->in(0), + new_test, + _structure.exit_test().cl_prob(), + iff->as_If()->_fcnt, + _iv_bt)); + IfNode* loop_end_exit = loop_end->as_If(); + const uint dd = _phase->dom_depth(iff); + _phase->set_idom(loop_end_exit, loop_end_exit->in(0), dd); // Update dominance for loop exit + _phase->set_loop(loop_end_exit, _loop); // Get the loop-exit control - Node *iffalse = iff->as_If()->proj_out(!(iftrue_op == Op_IfTrue)); + Node* iffalse = iff->as_If()->proj_out(!(iftrue_op == Op_IfTrue)); // Need to swap loop-exit and loop-back control? if (iftrue_op == Op_IfFalse) { - Node *ift2=_igvn.register_new_node_with_optimizer(new IfTrueNode (le)); - Node *iff2=_igvn.register_new_node_with_optimizer(new IfFalseNode(le)); + Node* ift2 = igvn->register_new_node_with_optimizer(new IfTrueNode(loop_end_exit)); + Node* iff2 = igvn->register_new_node_with_optimizer(new IfFalseNode(loop_end_exit)); - loop->_tail = back_control = ift2; - set_loop(ift2, loop); - set_loop(iff2, get_loop(iffalse)); + _loop->_tail = back_control = ift2; + _phase->set_loop(ift2, _loop); + _phase->set_loop(iff2, _phase->get_loop(iffalse)); // Lazy update of 'get_ctrl' mechanism. - replace_node_and_forward_ctrl(iffalse, iff2); - replace_node_and_forward_ctrl(iftrue, ift2); + _phase->replace_node_and_forward_ctrl(iffalse, iff2); + _phase->replace_node_and_forward_ctrl(iftrue, ift2); // Swap names iffalse = iff2; - iftrue = ift2; + iftrue = ift2; } else { - _igvn.rehash_node_delayed(iffalse); - _igvn.rehash_node_delayed(iftrue); - iffalse->set_req_X( 0, le, &_igvn ); - iftrue ->set_req_X( 0, le, &_igvn ); + igvn->rehash_node_delayed(iffalse); + igvn->rehash_node_delayed(iftrue); + iffalse->set_req_X(0, loop_end_exit, igvn); + iftrue->set_req_X(0, loop_end_exit, igvn); } - set_idom(iftrue, le, dd+1); - set_idom(iffalse, le, dd+1); + _phase->set_idom(iftrue, loop_end_exit, dd + 1); + _phase->set_idom(iffalse, loop_end_exit, dd + 1); assert(iff->outcnt() == 0, "should be dead now"); - replace_node_and_forward_ctrl(iff, le); // fix 'get_ctrl' + _phase->replace_node_and_forward_ctrl(iff, loop_end_exit); // fix 'get_ctrl' + Node* init_control = _head->in(LoopNode::EntryControl); Node* entry_control = init_control; - bool strip_mine_loop = iv_bt == T_INT && - loop->_child == nullptr && - sfpt != nullptr && - !loop->_has_call && - is_deleteable_safept(sfpt); + bool strip_mine_loop = _iv_bt == T_INT && + _loop->_child == nullptr && + _structure.sfpt() != nullptr && + !_loop->_has_call && + _phase->is_deleteable_safept(_structure.sfpt()); IdealLoopTree* outer_ilt = nullptr; if (strip_mine_loop) { - outer_ilt = create_outer_strip_mined_loop(init_control, loop, cl_prob, le->_fcnt, - entry_control, iffalse); + outer_ilt = _phase->create_outer_strip_mined_loop(init_control, + _loop, + _structure.exit_test().cl_prob(), + loop_end_exit->_fcnt, + entry_control, + iffalse); } // Now setup a new CountedLoopNode to replace the existing LoopNode - BaseCountedLoopNode *l = BaseCountedLoopNode::make(entry_control, back_control, iv_bt); - l->set_unswitch_count(x->as_Loop()->unswitch_count()); // Preserve + BaseCountedLoopNode* l = BaseCountedLoopNode::make(entry_control, back_control, _iv_bt); + l->set_unswitch_count(_head->as_Loop()->unswitch_count()); // Preserve // The following assert is approximately true, and defines the intention // of can_be_counted_loop. It fails, however, because phase->type // is not yet initialized for this loop and its parts. //assert(l->can_be_counted_loop(this), "sanity"); - _igvn.register_new_node_with_optimizer(l); - set_loop(l, loop); - loop->_head = l; + igvn->register_new_node_with_optimizer(l); + _phase->set_loop(l, _loop); + _loop->_head = l; // Fix all data nodes placed at the old loop head. // Uses the lazy-update mechanism of 'get_ctrl'. - replace_node_and_forward_ctrl(x, l); - set_idom(l, entry_control, dom_depth(entry_control) + 1); + _phase->replace_node_and_forward_ctrl(_head, l); + _phase->set_idom(l, entry_control, _phase->dom_depth(entry_control) + 1); - if (iv_bt == T_INT && (LoopStripMiningIter == 0 || strip_mine_loop)) { + if (_iv_bt == T_INT && (LoopStripMiningIter == 0 || strip_mine_loop)) { // Check for immediately preceding SafePoint and remove - if (sfpt != nullptr && (strip_mine_loop || is_deleteable_safept(sfpt))) { + if (_structure.sfpt() != nullptr && (strip_mine_loop || _phase->is_deleteable_safept(_structure.sfpt()))) { if (strip_mine_loop) { Node* outer_le = outer_ilt->_tail->in(0); - Node* sfpt_clone = sfpt->clone(); + Node* sfpt_clone = _structure.sfpt()->clone(); sfpt_clone->set_req(0, iffalse); outer_le->set_req(0, sfpt_clone); @@ -2570,40 +2741,42 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*& loop, BasicType iv // Polling load should be pinned outside inner loop. Node* new_polladdr = polladdr->clone(); new_polladdr->set_req(0, iffalse); - _igvn.register_new_node_with_optimizer(new_polladdr, polladdr); - set_ctrl(new_polladdr, iffalse); + igvn->register_new_node_with_optimizer(new_polladdr, polladdr); + _phase->set_ctrl(new_polladdr, iffalse); sfpt_clone->set_req(TypeFunc::Parms, new_polladdr); } // When this code runs, loop bodies have not yet been populated. const bool body_populated = false; - register_control(sfpt_clone, outer_ilt, iffalse, body_populated); - set_idom(outer_le, sfpt_clone, dom_depth(sfpt_clone)); + _phase->register_control(sfpt_clone, outer_ilt, iffalse, body_populated); + _phase->set_idom(outer_le, sfpt_clone, _phase->dom_depth(sfpt_clone)); } - replace_node_and_forward_ctrl(sfpt, sfpt->in(TypeFunc::Control)); - if (loop->_safepts != nullptr) { - loop->_safepts->yank(sfpt); + _phase->replace_node_and_forward_ctrl(_structure.sfpt(), _structure.sfpt()->in(TypeFunc::Control)); + if (_loop->_safepts != nullptr) { + _loop->_safepts->yank(_structure.sfpt()); } } } #ifdef ASSERT - assert(l->is_valid_counted_loop(iv_bt), "counted loop shape is messed up"); - assert(l == loop->_head && l->phi() == phi && l->loopexit_or_null() == lex, "" ); + assert(l->is_valid_counted_loop(_iv_bt), "counted loop shape is messed up"); + assert(l == _loop->_head && l->phi() == phi && l->loopexit_or_null() == loop_end, "" ); #endif + #ifndef PRODUCT if (TraceLoopOpts) { tty->print("Counted "); - loop->dump_head(); + _loop->dump_head(); } #endif - C->print_method(PHASE_AFTER_CLOOPS, 3, l); - // Capture bounds of the loop in the induction variable Phi before // subsequent transformation (iteration splitting) obscures the // bounds - l->phi()->as_Phi()->set_type(l->phi()->Value(&_igvn)); + l->phi()->as_Phi()->set_type(l->phi()->Value(igvn)); + _phase->C->print_method(PHASE_AFTER_CLOOPS, 3, l); + + IdealLoopTree* loop = _loop; if (strip_mine_loop) { l->mark_strip_mined(); l->verify_strip_mined(1); @@ -2612,21 +2785,24 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*& loop, BasicType iv } #ifndef PRODUCT - if (x->as_Loop()->is_loop_nest_inner_loop() && iv_bt == T_LONG) { + if (_head->as_Loop()->is_loop_nest_inner_loop() && _iv_bt == T_LONG) { AtomicAccess::inc(&_long_loop_counted_loops); } #endif - if (iv_bt == T_LONG && x->as_Loop()->is_loop_nest_outer_loop()) { + + if (_iv_bt == T_LONG && _head->as_Loop()->is_loop_nest_outer_loop()) { l->mark_loop_nest_outer_loop(); } - return true; + return loop; } // Check if there is a dominating loop limit check of the form 'init < limit' starting at the loop entry. // If there is one, then we do not need to create an additional Loop Limit Check Predicate. -bool PhaseIdealLoop::has_dominating_loop_limit_check(Node* init_trip, Node* limit, const jlong stride_con, - const BasicType iv_bt, Node* loop_entry) { +bool CountedLoopConverter::has_dominating_loop_limit_check(Node* init_trip, Node* limit, const jlong stride_con, + const BasicType iv_bt, Node* loop_entry) const { + PhaseIterGVN& _igvn = _phase->igvn(); + // Eagerly call transform() on the Cmp and Bool node to common them up if possible. This is required in order to // successfully find a dominated test with the If node below. Node* cmp_limit; @@ -2649,8 +2825,8 @@ bool PhaseIdealLoop::has_dominating_loop_limit_check(Node* init_trip, Node* limi const bool found_dominating_test = dominated_iff != nullptr && dominated_iff->is_ConI(); // Kill the If with its projections again in the next IGVN round by cutting it off from the graph. - _igvn.replace_input_of(iff, 0, C->top()); - _igvn.replace_input_of(iff, 1, C->top()); + _igvn.replace_input_of(iff, 0, _phase->C->top()); + _igvn.replace_input_of(iff, 1, _phase->C->top()); return found_dominating_test; } @@ -2953,24 +3129,23 @@ Node* LoopLimitNode::Identity(PhaseGVN* phase) { return this; } -//============================================================================= -//----------------------match_incr_with_optional_truncation-------------------- // Match increment with optional truncation: // CHAR: (i+1)&0x7fff, BYTE: ((i+1)<<8)>>8, or SHORT: ((i+1)<<16)>>16 -// Return null for failure. Success returns the increment node. -Node* CountedLoopNode::match_incr_with_optional_truncation(Node* expr, Node** trunc1, Node** trunc2, - const TypeInteger** trunc_type, - BasicType bt) { - // Quick cutouts: - if (expr == nullptr || expr->req() != 3) return nullptr; +void CountedLoopConverter::TruncatedIncrement::build(Node* expr) { + _is_valid = false; - Node *t1 = nullptr; - Node *t2 = nullptr; + // Quick cutouts: + if (expr == nullptr || expr->req() != 3) { + return; + } + + Node* t1 = nullptr; + Node* t2 = nullptr; Node* n1 = expr; int n1op = n1->Opcode(); - const TypeInteger* trunc_t = TypeInteger::bottom(bt); + const TypeInteger* trunc_t = TypeInteger::bottom(_bt); - if (bt == T_INT) { + if (_bt == T_INT) { // Try to strip (n1 & M) or (n1 << N >> N) from n1. if (n1op == Op_AndI && n1->in(2)->is_Con() && @@ -3002,15 +3177,14 @@ Node* CountedLoopNode::match_incr_with_optional_truncation(Node* expr, Node** tr } // If (maybe after stripping) it is an AddI, we won: - if (n1op == Op_Add(bt)) { - *trunc1 = t1; - *trunc2 = t2; - *trunc_type = trunc_t; - return n1; - } + if (n1op == Op_Add(_bt)) { + _incr = n1; + _outer_trunc = t1; + _inner_trunc = t2; + _trunc_type = trunc_t; - // failed - return nullptr; + _is_valid = true; + } } IfNode* CountedLoopNode::find_multiversion_if_from_multiversion_fast_main_loop() { @@ -3662,18 +3836,18 @@ Node *OuterStripMinedLoopEndNode::Ideal(PhaseGVN *phase, bool can_reshape) { // i = ? // } while ( i < 10) // -const TypeInt* PhaseIdealLoop::filtered_type( Node *n, Node* n_ctrl) { +const TypeInt* CountedLoopConverter::filtered_type(Node* n, Node* n_ctrl) { assert(n && n->bottom_type()->is_int(), "must be int"); const TypeInt* filtered_t = nullptr; if (!n->is_Phi()) { - assert(n_ctrl != nullptr || n_ctrl == C->top(), "valid control"); + assert(n_ctrl != nullptr || n_ctrl == _phase->C->top(), "valid control"); filtered_t = filtered_type_from_dominators(n, n_ctrl); } else { Node* phi = n->as_Phi(); Node* region = phi->in(0); assert(n_ctrl == nullptr || n_ctrl == region, "ctrl parameter must be region"); - if (region && region != C->top()) { + if (region && region != _phase->C->top()) { for (uint i = 1; i < phi->req(); i++) { Node* val = phi->in(i); Node* use_c = region->in(i); @@ -3688,7 +3862,7 @@ const TypeInt* PhaseIdealLoop::filtered_type( Node *n, Node* n_ctrl) { } } } - const TypeInt* n_t = _igvn.type(n)->is_int(); + const TypeInt* n_t = _phase->igvn().type(n)->is_int(); if (filtered_t != nullptr) { n_t = n_t->join(filtered_t)->is_int(); } @@ -3698,22 +3872,22 @@ const TypeInt* PhaseIdealLoop::filtered_type( Node *n, Node* n_ctrl) { //------------------------------filtered_type_from_dominators-------------------------------- // Return a possibly more restrictive type for val based on condition control flow of dominators -const TypeInt* PhaseIdealLoop::filtered_type_from_dominators( Node* val, Node *use_ctrl) { +const TypeInt* CountedLoopConverter::filtered_type_from_dominators(Node* val, Node* use_ctrl) { if (val->is_Con()) { return val->bottom_type()->is_int(); } uint if_limit = 10; // Max number of dominating if's visited const TypeInt* rtn_t = nullptr; - if (use_ctrl && use_ctrl != C->top()) { - Node* val_ctrl = get_ctrl(val); - uint val_dom_depth = dom_depth(val_ctrl); + if (use_ctrl && use_ctrl != _phase->C->top()) { + Node* val_ctrl = _phase->get_ctrl(val); + uint val_dom_depth = _phase->dom_depth(val_ctrl); Node* pred = use_ctrl; uint if_cnt = 0; while (if_cnt < if_limit) { if ((pred->Opcode() == Op_IfTrue || pred->Opcode() == Op_IfFalse)) { if_cnt++; - const TypeInt* if_t = IfNode::filtered_int_type(&_igvn, val, pred); + const TypeInt* if_t = IfNode::filtered_int_type(&_phase->igvn(), val, pred); if (if_t != nullptr) { if (rtn_t == nullptr) { rtn_t = if_t; @@ -3722,12 +3896,12 @@ const TypeInt* PhaseIdealLoop::filtered_type_from_dominators( Node* val, Node *u } } } - pred = idom(pred); - if (pred == nullptr || pred == C->top()) { + pred = _phase->idom(pred); + if (pred == nullptr || pred == _phase->C->top()) { break; } // Stop if going beyond definition block of val - if (dom_depth(pred) < val_dom_depth) { + if (_phase->dom_depth(pred) < val_dom_depth) { break; } } @@ -3761,6 +3935,7 @@ IdealLoopTree::IdealLoopTree(PhaseIdealLoop* phase, Node* head, Node* tail): _pa _has_range_checks(0), _has_range_checks_computed(0), _safepts(nullptr), _required_safept(nullptr), + _reachability_fences(nullptr), _allow_optimizations(true) { precond(_head != nullptr); precond(_tail != nullptr); @@ -4172,8 +4347,13 @@ void IdealLoopTree::allpaths_check_safepts(VectorSet &visited, Node_List &stack) visited.set(_head->_idx); while (stack.size() > 0) { Node* n = stack.pop(); - if (n->is_Call() && n->as_Call()->guaranteed_safepoint()) { - // Terminate this path + if (n->is_Call() && n->as_Call()->guaranteed_safepoint() + && !(n->is_CallStaticJava() && n->as_CallStaticJava()->is_boxing_method())) { + // Terminate this path: guaranteed safepoint found. + // Boxing CallStaticJava calls are excluded as they may lack a safepoint on the fast path. This is + // not done via CallStaticJavaNode::guaranteed_safepoint() as that also controls PcDesc emission. + // In the future, guaranteed_safepoint() should be reworked to correctly handle boxing methods + // to avoid this additional check. } else if (n->Opcode() == Op_SafePoint) { if (_phase->get_loop(n) != this) { if (_required_safept == nullptr) _required_safept = new Node_List(); @@ -4271,7 +4451,12 @@ void IdealLoopTree::check_safepts(VectorSet &visited, Node_List &stack) { if (!_irreducible) { // Scan the dom-path nodes from tail to head for (Node* n = tail(); n != _head; n = _phase->idom(n)) { - if (n->is_Call() && n->as_Call()->guaranteed_safepoint()) { + // Boxing CallStaticJava calls are excluded as they may lack a safepoint on the fast path. This is + // not done via CallStaticJavaNode::guaranteed_safepoint() as that also controls PcDesc emission. + // In the future, guaranteed_safepoint() should be reworked to correctly handle boxing methods + // to avoid this additional check. + if (n->is_Call() && n->as_Call()->guaranteed_safepoint() + && !(n->is_CallStaticJava() && n->as_CallStaticJava()->is_boxing_method())) { has_call = true; _has_sfpt = 1; // Then no need for a safept! break; @@ -4335,7 +4520,7 @@ void IdealLoopTree::check_safepts(VectorSet &visited, Node_List &stack) { //---------------------------is_deleteable_safept---------------------------- // Is safept not required by an outer loop? -bool PhaseIdealLoop::is_deleteable_safept(Node* sfpt) { +bool PhaseIdealLoop::is_deleteable_safept(Node* sfpt) const { assert(sfpt->Opcode() == Op_SafePoint, ""); IdealLoopTree* lp = get_loop(sfpt)->_parent; while (lp != nullptr) { @@ -4493,7 +4678,7 @@ void PhaseIdealLoop::replace_parallel_iv(IdealLoopTree *loop) { _igvn.replace_node( phi2, add ); // Sometimes an induction variable is unused if (add->outcnt() == 0) { - _igvn.remove_dead_node(add); + _igvn.remove_dead_node(add, PhaseIterGVN::NodeOrigin::Graph); } --i; // deleted this phi; rescan starting with next position } @@ -4553,9 +4738,7 @@ void IdealLoopTree::counted_loop( PhaseIdealLoop *phase ) { } IdealLoopTree* loop = this; - if (_head->is_CountedLoop() || - phase->is_counted_loop(_head, loop, T_INT)) { - + if (_head->is_CountedLoop() || phase->try_convert_to_counted_loop(_head, loop, T_INT)) { if (LoopStripMiningIter == 0 || _head->as_CountedLoop()->is_strip_mined()) { // Indicate we do not need a safepoint here _has_sfpt = 1; @@ -4567,11 +4750,14 @@ void IdealLoopTree::counted_loop( PhaseIdealLoop *phase ) { // Look for induction variables phase->replace_parallel_iv(this); - } else if (_head->is_LongCountedLoop() || - phase->is_counted_loop(_head, loop, T_LONG)) { + } else if (_head->is_LongCountedLoop() || phase->try_convert_to_counted_loop(_head, loop, T_LONG)) { remove_safepoints(phase, true); } else { - assert(!_head->is_Loop() || !_head->as_Loop()->is_loop_nest_inner_loop(), "transformation to counted loop should not fail"); + // When StressCountedLoop is enabled, this loop may intentionally avoid a counted loop conversion. + // This is expected behavior for the stress mode, which exercises alternative compilation paths. + if (!StressCountedLoop) { + assert(!_head->is_Loop() || !_head->as_Loop()->is_loop_nest_inner_loop(), "transformation to counted loop should not fail"); + } if (_parent != nullptr && !_irreducible) { // Not a counted loop. Keep one safepoint. bool keep_one_sfpt = true; @@ -4671,6 +4857,15 @@ uint IdealLoopTree::est_loop_flow_merge_sz() const { return 0; } +void IdealLoopTree::register_reachability_fence(ReachabilityFenceNode* rf) { + if (_reachability_fences == nullptr) { + _reachability_fences = new Node_List(); + } + if (!_reachability_fences->contains(rf)) { + _reachability_fences->push(rf); + } +} + #ifndef PRODUCT //------------------------------dump_head-------------------------------------- // Dump 1 liner for loop header info @@ -4730,6 +4925,9 @@ void IdealLoopTree::dump_head() { if (_has_call) tty->print(" has_call"); if (_has_sfpt) tty->print(" has_sfpt"); if (_rce_candidate) tty->print(" rce"); + if (_reachability_fences != nullptr && _reachability_fences->size() > 0) { + tty->print(" has_rf"); + } if (_safepts != nullptr && _safepts->size() > 0) { tty->print(" sfpts={"); _safepts->dump_simple(); tty->print(" }"); } @@ -4737,6 +4935,9 @@ void IdealLoopTree::dump_head() { tty->print(" req={"); _required_safept->dump_simple(); tty->print(" }"); } if (Verbose) { + if (_reachability_fences != nullptr && _reachability_fences->size() > 0) { + tty->print(" rfs={"); _reachability_fences->dump_simple(); tty->print(" }"); + } tty->print(" body={"); _body.dump_simple(); tty->print(" }"); } if (_head->is_Loop() && _head->as_Loop()->is_strip_mined()) { @@ -4995,12 +5196,14 @@ bool PhaseIdealLoop::process_expensive_nodes() { // Create a PhaseLoop. Build the ideal Loop tree. Map each Ideal Node to // its corresponding LoopNode. If 'optimize' is true, do some loop cleanups. void PhaseIdealLoop::build_and_optimize() { - assert(!C->post_loop_opts_phase(), "no loop opts allowed"); - bool do_split_ifs = (_mode == LoopOptsDefault); bool skip_loop_opts = (_mode == LoopOptsNone); bool do_max_unroll = (_mode == LoopOptsMaxUnroll); + bool do_verify = (_mode == LoopOptsVerify); + bool do_expand_reachability_fences = (_mode == PostLoopOptsExpandReachabilityFences); + assert(!C->post_loop_opts_phase() || do_expand_reachability_fences || do_verify, + "no loop opts allowed"); bool old_progress = C->major_progress(); uint orig_worklist_size = _igvn._worklist.size(); @@ -5067,11 +5270,13 @@ void PhaseIdealLoop::build_and_optimize() { BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); // Nothing to do, so get out - bool stop_early = !C->has_loops() && !skip_loop_opts && !do_split_ifs && !do_max_unroll && !_verify_me && - !_verify_only && !bs->is_gc_specific_loop_opts_pass(_mode); + bool stop_early = !C->has_loops() && !skip_loop_opts && !do_split_ifs && !do_max_unroll && + !do_expand_reachability_fences && !_verify_me && !_verify_only && + !bs->is_gc_specific_loop_opts_pass(_mode) ; bool do_expensive_nodes = C->should_optimize_expensive_nodes(_igvn); + bool do_optimize_reachability_fences = OptimizeReachabilityFences && (C->reachability_fences_count() > 0); bool strip_mined_loops_expanded = bs->strip_mined_loops_expanded(_mode); - if (stop_early && !do_expensive_nodes) { + if (stop_early && !do_expensive_nodes && !do_optimize_reachability_fences) { return; } @@ -5147,7 +5352,7 @@ void PhaseIdealLoop::build_and_optimize() { // Given early legal placement, try finding counted loops. This placement // is good enough to discover most loop invariants. - if (!_verify_me && !_verify_only && !strip_mined_loops_expanded) { + if (!_verify_me && !_verify_only && !strip_mined_loops_expanded && !do_expand_reachability_fences) { _ltree_root->counted_loop( this ); } @@ -5170,15 +5375,21 @@ void PhaseIdealLoop::build_and_optimize() { // clear out the dead code after build_loop_late while (_deadlist.size()) { - _igvn.remove_globally_dead_node(_deadlist.pop()); + _igvn.remove_globally_dead_node(_deadlist.pop(), PhaseIterGVN::NodeOrigin::Graph); } eliminate_useless_zero_trip_guard(); eliminate_useless_multiversion_if(); if (stop_early) { - assert(do_expensive_nodes, "why are we here?"); - if (process_expensive_nodes()) { + assert(do_expensive_nodes || do_optimize_reachability_fences, "why are we here?"); + // Use the opportunity to optimize reachability fence nodes irrespective of + // whether loop optimizations are performed or not. + if (do_optimize_reachability_fences && optimize_reachability_fences()) { + recompute_dom_depth(); + DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } ); + } + if (do_expensive_nodes && process_expensive_nodes()) { // If we made some progress when processing expensive nodes then // the IGVN may modify the graph in a way that will allow us to // make some more progress: we need to try processing expensive @@ -5206,6 +5417,22 @@ void PhaseIdealLoop::build_and_optimize() { } #endif + if (do_optimize_reachability_fences && optimize_reachability_fences()) { + recompute_dom_depth(); + DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } ); + } + + if (do_expand_reachability_fences) { + assert(C->post_loop_opts_phase(), "required"); + if (expand_reachability_fences()) { + recompute_dom_depth(); + DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } ); + } + return; + } + + assert(!C->post_loop_opts_phase(), "required"); + if (skip_loop_opts) { C->restore_major_progress(old_progress); return; @@ -5373,9 +5600,15 @@ int PhaseIdealLoop::_loop_invokes=0;// Count of PhaseIdealLoop invokes int PhaseIdealLoop::_loop_work=0; // Sum of PhaseIdealLoop x unique volatile int PhaseIdealLoop::_long_loop_candidates=0; // Number of long loops seen volatile int PhaseIdealLoop::_long_loop_nests=0; // Number of long loops successfully transformed to a nest -volatile int PhaseIdealLoop::_long_loop_counted_loops=0; // Number of long loops successfully transformed to a counted loop +// Number of long loops successfully transformed to a counted loop +volatile int CountedLoopConverter::_long_loop_counted_loops = 0; void PhaseIdealLoop::print_statistics() { - tty->print_cr("PhaseIdealLoop=%d, sum _unique=%d, long loops=%d/%d/%d", _loop_invokes, _loop_work, _long_loop_counted_loops, _long_loop_nests, _long_loop_candidates); + tty->print_cr("PhaseIdealLoop=%d, sum _unique=%d, long loops=%d/%d/%d", + _loop_invokes, + _loop_work, + CountedLoopConverter::_long_loop_counted_loops, + _long_loop_nests, + _long_loop_candidates); } #endif @@ -6096,6 +6329,8 @@ int PhaseIdealLoop::build_loop_tree_impl(Node* n, int pre_order) { // Record all safepoints in this loop. if (innermost->_safepts == nullptr) innermost->_safepts = new Node_List(); innermost->_safepts->push(n); + } else if (n->is_ReachabilityFence()) { + innermost->register_reachability_fence(n->as_ReachabilityFence()); } } } diff --git a/src/hotspot/share/opto/loopnode.hpp b/src/hotspot/share/opto/loopnode.hpp index 986cfdaa3f1..26b82259327 100644 --- a/src/hotspot/share/opto/loopnode.hpp +++ b/src/hotspot/share/opto/loopnode.hpp @@ -44,6 +44,7 @@ class PredicateBlock; class PathFrequency; class PhaseIdealLoop; class LoopSelector; +class ReachabilityFenceNode; class UnswitchedLoopSelector; class VectorSet; class VSharedData; @@ -273,11 +274,6 @@ public: CountedLoopEndNode* loopexit() const { return (CountedLoopEndNode*) BaseCountedLoopNode::loopexit(); } int stride_con() const; - // Match increment with optional truncation - static Node* - match_incr_with_optional_truncation(Node* expr, Node** trunc1, Node** trunc2, const TypeInteger** trunc_type, - BasicType bt); - // A 'main' loop has a pre-loop and a post-loop. The 'main' loop // can run short a few iterations and may start a few iterations in. // It will be RCE'd and unrolled and aligned. @@ -667,6 +663,7 @@ public: Node_List* _safepts; // List of safepoints in this loop Node_List* _required_safept; // A inner loop cannot delete these safepts; + Node_List* _reachability_fences; // List of reachability fences in this loop bool _allow_optimizations; // Allow loop optimizations IdealLoopTree(PhaseIdealLoop* phase, Node* head, Node* tail); @@ -725,6 +722,9 @@ public: // Check for Node being a loop-breaking test Node *is_loop_exit(Node *iff) const; + // Return unique loop-exit projection or null if the loop has multiple exits. + IfFalseNode* unique_loop_exit_proj_or_null(); + // Remove simplistic dead code from loop body void DCE_loop_body(); @@ -830,6 +830,9 @@ public: return _head->as_Loop()->is_strip_mined() ? _parent : this; } + // Registers a reachability fence node in the loop. + void register_reachability_fence(ReachabilityFenceNode* rf); + #ifndef PRODUCT void dump_head(); // Dump loop head only void dump(); // Dump this loop recursively @@ -1029,8 +1032,6 @@ private: void rewire_old_target_loop_entry_dependency_to_new_entry(CountedLoopNode* target_loop_head, const Node* old_target_loop_entry, uint node_index_before_new_assertion_predicate_nodes); - void insert_loop_limit_check_predicate(ParsePredicateSuccessProj* loop_limit_check_parse_proj, Node* cmp_limit, - Node* bol); void log_loop_tree(); public: @@ -1174,6 +1175,16 @@ public: forward_ctrl(old_node, new_node); } + void remove_dead_data_node(Node* dead) { + assert(dead->outcnt() == 0 && !dead->is_top(), "must be dead"); + assert(!dead->is_CFG(), "not a data node"); + Node* c = get_ctrl(dead); + IdealLoopTree* lpt = get_loop(c); + _loop_or_ctrl.map(dead->_idx, nullptr); // This node is useless + lpt->_body.yank(dead); + igvn().remove_dead_node(dead, PhaseIterGVN::NodeOrigin::Graph); + } + private: // Place 'n' in some loop nest, where 'n' is a CFG node @@ -1294,7 +1305,7 @@ public: void recompute_dom_depth(); // Is safept not required by an outer loop? - bool is_deleteable_safept(Node* sfpt); + bool is_deleteable_safept(Node* sfpt) const; // Replace parallel induction variable (parallel to trip counter) void replace_parallel_iv(IdealLoopTree *loop); @@ -1345,21 +1356,109 @@ public: // Per-Node transform virtual Node* transform(Node* n) { return nullptr; } - Node* loop_exit_control(Node* x, IdealLoopTree* loop); - Node* loop_exit_test(Node* back_control, IdealLoopTree* loop, Node*& incr, Node*& limit, BoolTest::mask& bt, float& cl_prob); - Node* loop_iv_incr(Node* incr, Node* x, IdealLoopTree* loop, Node*& phi_incr); - Node* loop_iv_stride(Node* incr, Node*& xphi); - PhiNode* loop_iv_phi(Node* xphi, Node* phi_incr, Node* x); + Node* loop_exit_control(const IdealLoopTree* loop) const; - bool is_counted_loop(Node* x, IdealLoopTree*&loop, BasicType iv_bt); + class LoopExitTest { + bool _is_valid; + + const Node* _back_control; + const IdealLoopTree* _loop; + PhaseIdealLoop* _phase; + + Node* _cmp; + Node* _incr; + Node* _limit; + BoolTest::mask _mask; + float _cl_prob; + + public: + LoopExitTest(const Node* back_control, const IdealLoopTree* loop, PhaseIdealLoop* phase) : + _is_valid(false), + _back_control(back_control), + _loop(loop), + _phase(phase), + _cmp(nullptr), + _incr(nullptr), + _limit(nullptr), + _mask(BoolTest::illegal), + _cl_prob(0.0f) {} + + void build(); + void canonicalize_mask(jlong stride_con); + + bool is_valid_with_bt(BasicType bt) const { + return _is_valid && _cmp != nullptr && _cmp->Opcode() == Op_Cmp(bt); + } + + bool should_include_limit() const { return _mask == BoolTest::le || _mask == BoolTest::ge; } + + CmpNode* cmp() const { return _cmp->as_Cmp(); } + Node* incr() const { return _incr; } + Node* limit() const { return _limit; } + BoolTest::mask mask() const { return _mask; } + float cl_prob() const { return _cl_prob; } + }; + + class LoopIVIncr { + bool _is_valid; + + const Node* _head; + const IdealLoopTree* _loop; + + Node* _incr; + Node* _phi_incr; + + public: + LoopIVIncr(const Node* head, const IdealLoopTree* loop) : + _is_valid(false), + _head(head), + _loop(loop), + _incr(nullptr), + _phi_incr(nullptr) {} + + void build(Node* old_incr); + + bool is_valid() const { return _is_valid; } + bool is_valid_with_bt(const BasicType bt) const { + return _is_valid && _incr->Opcode() == Op_Add(bt); + } + + Node* incr() const { return _incr; } + Node* phi_incr() const { return _phi_incr; } + }; + + class LoopIVStride { + bool _is_valid; + + BasicType _iv_bt; + Node* _stride_node; + Node* _xphi; + + public: + LoopIVStride(BasicType iv_bt) : + _is_valid(false), + _iv_bt(iv_bt), + _stride_node(nullptr), + _xphi(nullptr) {} + + void build(const Node* incr); + + bool is_valid() const { return _is_valid && _stride_node != nullptr; } + Node* stride_node() const { return _stride_node; } + Node* xphi() const { return _xphi; } + + jlong compute_non_zero_stride_con(BoolTest::mask mask, BasicType iv_bt) const; + }; + + static PhiNode* loop_iv_phi(const Node* xphi, const Node* phi_incr, const Node* head); + + bool try_convert_to_counted_loop(Node* head, IdealLoopTree*& loop, BasicType iv_bt); Node* loop_nest_replace_iv(Node* iv_to_replace, Node* inner_iv, Node* outer_phi, Node* inner_head, BasicType bt); bool create_loop_nest(IdealLoopTree* loop, Node_List &old_new); -#ifdef ASSERT - bool convert_to_long_loop(Node* cmp, Node* phi, IdealLoopTree* loop); -#endif + void add_parse_predicate(Deoptimization::DeoptReason reason, Node* inner_head, IdealLoopTree* loop, SafePointNode* sfpt); - SafePointNode* find_safepoint(Node* back_control, Node* x, IdealLoopTree* loop); + SafePointNode* find_safepoint(Node* back_control, const Node* head, const IdealLoopTree* loop); void add_parse_predicates(IdealLoopTree* outer_ilt, LoopNode* inner_head, SafePointNode* cloned_sfpt); @@ -1496,8 +1595,6 @@ public: Node* clone_nodes_with_same_ctrl(Node* start_node, ProjNode* old_uncommon_proj, Node* new_uncommon_proj); void fix_cloned_data_node_controls(const ProjNode* orig, Node* new_uncommon_proj, const OrigToNewHashtable& orig_to_clone); - bool has_dominating_loop_limit_check(Node* init_trip, Node* limit, jlong stride_con, BasicType iv_bt, - Node* loop_entry); public: void register_control(Node* n, IdealLoopTree *loop, Node* pred, bool update_body = true); @@ -1520,6 +1617,15 @@ public: // Implementation of the loop predication to promote checks outside the loop bool loop_predication_impl(IdealLoopTree *loop); + // Reachability Fence (RF) support. + private: + void insert_rf(Node* ctrl, Node* referent); + void replace_rf(Node* old_node, Node* new_node); + void remove_rf(ReachabilityFenceNode* rf); + public: + bool optimize_reachability_fences(); + bool expand_reachability_fences(); + private: bool loop_predication_impl_helper(IdealLoopTree* loop, IfProjNode* if_success_proj, ParsePredicateSuccessProj* parse_predicate_proj, CountedLoopNode* cl, ConNode* zero, @@ -1755,12 +1861,6 @@ public: Node*& shift, Node*& offset); private: - // Return a type based on condition control flow - const TypeInt* filtered_type( Node *n, Node* n_ctrl); - const TypeInt* filtered_type( Node *n ) { return filtered_type(n, nullptr); } - // Helpers for filtered type - const TypeInt* filtered_type_from_dominators( Node* val, Node *val_ctrl); - // Helper functions Node *spinup( Node *iff, Node *new_false, Node *new_true, Node *region, Node *phi, small_cache *cache ); Node *find_use_block( Node *use, Node *def, Node *old_false, Node *new_false, Node *old_true, Node *new_true ); @@ -1889,7 +1989,6 @@ public: static int _loop_work; // Sum of PhaseIdealLoop x _unique static volatile int _long_loop_candidates; static volatile int _long_loop_nests; - static volatile int _long_loop_counted_loops; #endif #ifdef ASSERT @@ -1901,7 +2000,7 @@ public: void rpo(Node* start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list) const; - void check_counted_loop_shape(IdealLoopTree* loop, Node* x, BasicType bt) NOT_DEBUG_RETURN; + void check_counted_loop_shape(IdealLoopTree* loop, Node* head, BasicType bt) NOT_DEBUG_RETURN; LoopNode* create_inner_head(IdealLoopTree* loop, BaseCountedLoopNode* head, IfNode* exit_test); @@ -1979,6 +2078,146 @@ public: ConNode* zerocon(BasicType bt); }; +class CountedLoopConverter { + friend class PhaseIdealLoop; + + // Match increment with optional truncation + class TruncatedIncrement { + bool _is_valid; + + BasicType _bt; + + Node* _incr; + Node* _outer_trunc; + Node* _inner_trunc; + const TypeInteger* _trunc_type; + + public: + TruncatedIncrement(BasicType bt) : + _is_valid(false), + _bt(bt), + _incr(nullptr), + _outer_trunc(nullptr), + _inner_trunc(nullptr), + _trunc_type(nullptr) {} + + void build(Node* expr); + + bool is_valid() const { return _is_valid; } + Node* incr() const { return _incr; } + + // Optional truncation for: CHAR: (i+1)&0x7fff, BYTE: ((i+1)<<8)>>8, or SHORT: ((i+1)<<16)>>16 + Node* outer_trunc() const { return _outer_trunc; } // the outermost truncating node (either the & or the final >>) + Node* inner_trunc() const { return _inner_trunc; } // the inner truncating node, if applicable (the << in a <> pair) + const TypeInteger* trunc_type() const { return _trunc_type; } + }; + + class LoopStructure { + bool _is_valid; + + const Node* _head; + const IdealLoopTree* _loop; + PhaseIdealLoop* _phase; + BasicType _iv_bt; + + Node* _back_control; + PhaseIdealLoop::LoopExitTest _exit_test; + PhaseIdealLoop::LoopIVIncr _iv_incr; + TruncatedIncrement _truncated_increment; + PhaseIdealLoop::LoopIVStride _stride; + PhiNode* _phi; + SafePointNode* _safepoint; + + public: + LoopStructure(const Node* head, const IdealLoopTree* loop, PhaseIdealLoop* phase, const BasicType iv_bt) : + _is_valid(false), + _head(head), + _loop(loop), + _phase(phase), + _iv_bt(iv_bt), + _back_control(_phase->loop_exit_control(_loop)), + _exit_test(_back_control, _loop, _phase), + _iv_incr(_head, _loop), + _truncated_increment(_iv_bt), + _stride(PhaseIdealLoop::LoopIVStride(_iv_bt)), + _phi(nullptr), + _safepoint(nullptr) {} + + void build(); + + jlong final_limit_correction() const; // compute adjusted loop limit correction + bool is_infinite_loop() const; + + bool is_valid() const { return _is_valid; } + + Node* back_control() const { return _back_control; } + PhaseIdealLoop::LoopExitTest& exit_test() { return _exit_test; } + PhaseIdealLoop::LoopIVIncr& iv_incr() { return _iv_incr; } + TruncatedIncrement& truncated_increment() { return _truncated_increment; } + PhaseIdealLoop::LoopIVStride& stride() { return _stride; } + PhiNode* phi() const { return _phi; } + SafePointNode* sfpt() const { return _safepoint; } + jlong stride_con() const { return _stride.compute_non_zero_stride_con(_exit_test.mask(), _iv_bt); } + Node* limit() const { return _exit_test.limit(); } + }; + + PhaseIdealLoop* const _phase; + Node* const _head; + IdealLoopTree* const _loop; + const BasicType _iv_bt; + + LoopStructure _structure; + bool _should_insert_stride_overflow_limit_check = false; + bool _should_insert_init_trip_limit_check = false; + + DEBUG_ONLY(bool _checked_for_counted_loop = false;) + + // stats for PhaseIdealLoop::print_statistics() + static volatile int _long_loop_counted_loops; + + // Return a type based on condition control flow + const TypeInt* filtered_type(Node* n, Node* n_ctrl); + const TypeInt* filtered_type(Node* n) { return filtered_type(n, nullptr); } + // Helpers for filtered type + const TypeInt* filtered_type_from_dominators(Node* val, Node* val_ctrl); + + void insert_loop_limit_check_predicate(const ParsePredicateSuccessProj* loop_limit_check_parse_proj, Node* bol) const; + void insert_stride_overflow_limit_check() const; + void insert_init_trip_limit_check() const; + bool has_dominating_loop_limit_check(Node* init_trip, Node* limit, jlong stride_con, BasicType iv_bt, + Node* loop_entry) const; + + bool is_iv_overflowing(const TypeInteger* init_t, jlong stride_con, Node* phi_increment, BoolTest::mask mask) const; + bool has_truncation_wrap(const TruncatedIncrement& truncation, Node* phi, jlong stride_con); + SafePointNode* find_safepoint(Node* iftrue); + bool is_safepoint_invalid(SafePointNode* sfpt) const; + + public: + CountedLoopConverter(PhaseIdealLoop* phase, Node* head, IdealLoopTree* loop, const BasicType iv_bt) + : _phase(phase), + _head(head), + _loop(loop), + _iv_bt(iv_bt), + _structure(LoopStructure(_head, _loop, _phase, _iv_bt)) { + assert(phase != nullptr, "must be"); // Fail early if mandatory parameters are null. + assert(head != nullptr, "must be"); + assert(loop != nullptr, "must be"); + assert(iv_bt == T_INT || iv_bt == T_LONG, "either int or long loops"); + } + + bool is_counted_loop(); + IdealLoopTree* convert(); + + DEBUG_ONLY(bool should_stress_long_counted_loop();) + DEBUG_ONLY(bool stress_long_counted_loop();) + + enum StrideOverflowState { + Overflow = -1, + NoOverflow = 0, + RequireLimitCheck = 1 + }; + static StrideOverflowState check_stride_overflow(jlong final_correction, const TypeInteger* limit_t, BasicType bt); +}; class AutoNodeBudget : public StackObj { diff --git a/src/hotspot/share/opto/loopopts.cpp b/src/hotspot/share/opto/loopopts.cpp index 862cb7067ec..ccd53129a87 100644 --- a/src/hotspot/share/opto/loopopts.cpp +++ b/src/hotspot/share/opto/loopopts.cpp @@ -161,7 +161,7 @@ Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) { } if (the_clone != x) { - _igvn.remove_dead_node(the_clone); + _igvn.remove_dead_node(the_clone, PhaseIterGVN::NodeOrigin::Speculative); } else if (region->is_Loop() && i == LoopNode::LoopBackControl && n->is_Load() && can_move_to_inner_loop(n, region->as_Loop(), x)) { // it is not a win if 'x' moved from an outer to an inner loop @@ -172,7 +172,7 @@ Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) { } // Too few wins? if (!wins.profitable(policy)) { - _igvn.remove_dead_node(phi); + _igvn.remove_dead_node(phi, PhaseIterGVN::NodeOrigin::Speculative); return nullptr; } @@ -620,10 +620,10 @@ Node* PhaseIdealLoop::remix_address_expressions(Node* n) { IdealLoopTree* n23_loop = get_loop(n23_ctrl); if (n22loop != n_loop && n22loop->is_member(n_loop) && n23_loop == n_loop) { - Node* add1 = new AddPNode(n->in(1), n->in(2)->in(2), n->in(3)); + Node* add1 = AddPNode::make_with_base(n->in(1), n->in(2)->in(2), n->in(3)); // Stuff new AddP in the loop preheader register_new_node(add1, n_loop->_head->as_Loop()->skip_strip_mined(1)->in(LoopNode::EntryControl)); - Node* add2 = new AddPNode(n->in(1), add1, n->in(2)->in(3)); + Node* add2 = AddPNode::make_with_base(n->in(1), add1, n->in(2)->in(3)); register_new_node(add2, n_ctrl); _igvn.replace_node(n, add2); return add2; @@ -641,10 +641,10 @@ Node* PhaseIdealLoop::remix_address_expressions(Node* n) { Node *tmp = V; V = I; I = tmp; } if (!ctrl_is_member(n_loop, I)) { - Node* add1 = new AddPNode(n->in(1), n->in(2), I); + Node* add1 = AddPNode::make_with_base(n->in(1), n->in(2), I); // Stuff new AddP in the loop preheader register_new_node(add1, n_loop->_head->as_Loop()->skip_strip_mined(1)->in(LoopNode::EntryControl)); - Node* add2 = new AddPNode(n->in(1), add1, V); + Node* add2 = AddPNode::make_with_base(n->in(1), add1, V); register_new_node(add2, n_ctrl); _igvn.replace_node(n, add2); return add2; @@ -1307,7 +1307,14 @@ Node* PhaseIdealLoop::place_outside_loop(Node* useblock, IdealLoopTree* loop) co bool PhaseIdealLoop::identical_backtoback_ifs(Node *n) { - if (!n->is_If() || n->is_BaseCountedLoopEnd()) { + if (!n->is_If()) { + return false; + } + if (n->outcnt() != n->as_If()->required_outcnt()) { + assert(false, "malformed IfNode with %d outputs", n->outcnt()); + return false; + } + if (n->is_BaseCountedLoopEnd()) { return false; } if (!n->in(0)->is_Region()) { @@ -1433,7 +1440,10 @@ void PhaseIdealLoop::split_if_with_blocks_post(Node *n) { // Check some safety conditions if (iff->is_If()) { // Classic split-if? - if (iff->in(0) != n_ctrl) { + if (iff->outcnt() != iff->as_If()->required_outcnt()) { + assert(false, "malformed IfNode with %d outputs", iff->outcnt()); + return; + } else if (iff->in(0) != n_ctrl) { return; // Compare must be in same blk as if } } else if (iff->is_CMove()) { // Trying to split-up a CMOVE @@ -1478,9 +1488,11 @@ void PhaseIdealLoop::split_if_with_blocks_post(Node *n) { // Now split the IF C->print_method(PHASE_BEFORE_SPLIT_IF, 4, iff); - if (TraceLoopOpts) { - tty->print_cr("Split-If"); +#ifndef PRODUCT + if (TraceLoopOpts || TraceSplitIf) { + tty->print_cr("Split-If: %d %s", iff->_idx, iff->Name()); } +#endif do_split_if(iff); C->print_method(PHASE_AFTER_SPLIT_IF, 4, iff); return; @@ -1590,6 +1602,11 @@ bool PhaseIdealLoop::try_merge_identical_ifs(Node* n) { // Now split the IF RegionNode* new_false_region; RegionNode* new_true_region; +#ifndef PRODUCT + if (TraceLoopOpts || TraceSplitIf) { + tty->print_cr("Split-If Merging Identical Ifs: Dom-If: %d %s, If: %d %s", dom_if->_idx, dom_if->Name(), n->_idx, n->Name()); + } +#endif do_split_if(n, &new_false_region, &new_true_region); assert(new_false_region->req() == new_true_region->req(), ""); #ifdef ASSERT @@ -1859,7 +1876,7 @@ void PhaseIdealLoop::try_sink_out_of_loop(Node* n) { assert(cast != nullptr, "must have added a cast to pin the node"); } } - _igvn.remove_dead_node(n); + _igvn.remove_dead_node(n, PhaseIterGVN::NodeOrigin::Graph); } _dom_lca_tags_round = 0; } @@ -2075,7 +2092,7 @@ Node* PhaseIdealLoop::clone_iff(PhiNode* phi) { // Register with optimizer Node *hit1 = _igvn.hash_find_insert(phi1); if (hit1) { // Hit, toss just made Phi - _igvn.remove_dead_node(phi1); // Remove new phi + _igvn.remove_dead_node(phi1, PhaseIterGVN::NodeOrigin::Speculative); // Remove new phi assert(hit1->is_Phi(), "" ); phi1 = (PhiNode*)hit1; // Use existing phi } else { // Miss @@ -2083,7 +2100,7 @@ Node* PhaseIdealLoop::clone_iff(PhiNode* phi) { } Node *hit2 = _igvn.hash_find_insert(phi2); if (hit2) { // Hit, toss just made Phi - _igvn.remove_dead_node(phi2); // Remove new phi + _igvn.remove_dead_node(phi2, PhaseIterGVN::NodeOrigin::Speculative); // Remove new phi assert(hit2->is_Phi(), "" ); phi2 = (PhiNode*)hit2; // Use existing phi } else { // Miss @@ -2158,7 +2175,7 @@ CmpNode*PhaseIdealLoop::clone_bool(PhiNode* phi) { // Register with optimizer Node *hit1 = _igvn.hash_find_insert(phi1); if( hit1 ) { // Hit, toss just made Phi - _igvn.remove_dead_node(phi1); // Remove new phi + _igvn.remove_dead_node(phi1, PhaseIterGVN::NodeOrigin::Speculative); // Remove new phi assert( hit1->is_Phi(), "" ); phi1 = (PhiNode*)hit1; // Use existing phi } else { // Miss @@ -2166,7 +2183,7 @@ CmpNode*PhaseIdealLoop::clone_bool(PhiNode* phi) { } Node *hit2 = _igvn.hash_find_insert(phi2); if( hit2 ) { // Hit, toss just made Phi - _igvn.remove_dead_node(phi2); // Remove new phi + _igvn.remove_dead_node(phi2, PhaseIterGVN::NodeOrigin::Speculative); // Remove new phi assert( hit2->is_Phi(), "" ); phi2 = (PhiNode*)hit2; // Use existing phi } else { // Miss @@ -2317,7 +2334,7 @@ void PhaseIdealLoop::clone_loop_handle_data_uses(Node* old, Node_List &old_new, _igvn.register_new_node_with_optimizer(phi); // Register new phi } else { // or // Remove the new phi from the graph and use the hit - _igvn.remove_dead_node(phi); + _igvn.remove_dead_node(phi, phi == prev ? PhaseIterGVN::NodeOrigin::Graph : PhaseIterGVN::NodeOrigin::Speculative); phi = hit; // Use existing phi } set_ctrl(phi, prev); @@ -2829,8 +2846,6 @@ void PhaseIdealLoop::clone_loop_body(const Node_List& body, Node_List &old_new, // with an optional truncation (left-shift followed by a right-shift) // of the add. Returns zero if not an iv. int PhaseIdealLoop::stride_of_possible_iv(Node* iff) { - Node* trunc1 = nullptr; - Node* trunc2 = nullptr; const TypeInteger* ttype = nullptr; if (!iff->is_If() || iff->in(1) == nullptr || !iff->in(1)->is_Bool()) { return 0; @@ -2851,23 +2866,23 @@ int PhaseIdealLoop::stride_of_possible_iv(Node* iff) { Node* phi = cmp1; for (uint i = 1; i < phi->req(); i++) { Node* in = phi->in(i); - Node* add = CountedLoopNode::match_incr_with_optional_truncation(in, - &trunc1, &trunc2, &ttype, T_INT); - if (add && add->in(1) == phi) { - add2 = add->in(2); + CountedLoopConverter::TruncatedIncrement add(T_INT); + add.build(in); + if (add.is_valid() && add.incr()->in(1) == phi) { + add2 = add.incr()->in(2); break; } } } else { // (If (Bool (CmpX addtrunc:(Optional-trunc((AddI (Phi ...addtrunc...) add2)) ))) Node* addtrunc = cmp1; - Node* add = CountedLoopNode::match_incr_with_optional_truncation(addtrunc, - &trunc1, &trunc2, &ttype, T_INT); - if (add && add->in(1)->is_Phi()) { - Node* phi = add->in(1); + CountedLoopConverter::TruncatedIncrement add(T_INT); + add.build(addtrunc); + if (add.is_valid() && add.incr()->in(1)->is_Phi()) { + Node* phi = add.incr()->in(1); for (uint i = 1; i < phi->req(); i++) { if (phi->in(i) == addtrunc) { - add2 = add->in(2); + add2 = add.incr()->in(2); break; } } @@ -3467,7 +3482,7 @@ void PhaseIdealLoop::insert_phi_for_loop( Node* use, uint idx, Node* lp_entry_va set_ctrl(phi, lp); } else { // Remove the new phi from the graph and use the hit - _igvn.remove_dead_node(phi); + _igvn.remove_dead_node(phi, PhaseIterGVN::NodeOrigin::Speculative); phi = hit; } _igvn.replace_input_of(use, idx, phi); @@ -4294,54 +4309,50 @@ bool PhaseIdealLoop::duplicate_loop_backedge(IdealLoopTree *loop, Node_List &old #endif //ASSERT { // Is the shape of the loop that of a counted loop... - Node* back_control = loop_exit_control(head, loop); + Node* back_control = loop_exit_control(loop); if (back_control == nullptr) { return false; } - BoolTest::mask bt = BoolTest::illegal; - float cl_prob = 0; - Node* incr = nullptr; - Node* limit = nullptr; - Node* cmp = loop_exit_test(back_control, loop, incr, limit, bt, cl_prob); - if (cmp == nullptr || cmp->Opcode() != Op_CmpI) { + LoopExitTest loop_exit(back_control, loop, this); + loop_exit.build(); + if (!loop_exit.is_valid_with_bt(T_INT)) { return false; } + const Node* loop_incr = loop_exit.incr(); + // With an extra phi for the candidate iv? // Or the region node is the loop head - if (!incr->is_Phi() || incr->in(0) == head) { + if (!loop_incr->is_Phi() || loop_incr->in(0) == head) { return false; } PathFrequency pf(head, this); - region = incr->in(0); + region = loop_incr->in(0); // Go over all paths for the extra phi's region and see if that // path is frequent enough and would match the expected iv shape // if the extra phi is removed inner = 0; - for (uint i = 1; i < incr->req(); ++i) { - Node* in = incr->in(i); - Node* trunc1 = nullptr; - Node* trunc2 = nullptr; - const TypeInteger* iv_trunc_t = nullptr; - Node* orig_in = in; - if (!(in = CountedLoopNode::match_incr_with_optional_truncation(in, &trunc1, &trunc2, &iv_trunc_t, T_INT))) { + for (uint i = 1; i < loop_incr->req(); ++i) { + CountedLoopConverter::TruncatedIncrement increment(T_INT); + increment.build(loop_incr->in(i)); + if (!increment.is_valid()) { continue; } - assert(in->Opcode() == Op_AddI, "wrong increment code"); - Node* xphi = nullptr; - Node* stride = loop_iv_stride(in, xphi); + assert(increment.incr()->Opcode() == Op_AddI, "wrong increment code"); - if (stride == nullptr) { + LoopIVStride stride = LoopIVStride(T_INT); + stride.build(increment.incr()); + if (!stride.is_valid()) { continue; } - PhiNode* phi = loop_iv_phi(xphi, nullptr, head); + PhiNode* phi = loop_iv_phi(stride.xphi(), nullptr, head); if (phi == nullptr || - (trunc1 == nullptr && phi->in(LoopNode::LoopBackControl) != incr) || - (trunc1 != nullptr && phi->in(LoopNode::LoopBackControl) != trunc1)) { + (increment.outer_trunc() == nullptr && phi->in(LoopNode::LoopBackControl) != loop_exit.incr()) || + (increment.outer_trunc() != nullptr && phi->in(LoopNode::LoopBackControl) != increment.outer_trunc())) { return false; } diff --git a/src/hotspot/share/opto/macro.cpp b/src/hotspot/share/opto/macro.cpp index ef38a511a88..8c5dbe7fb48 100644 --- a/src/hotspot/share/opto/macro.cpp +++ b/src/hotspot/share/opto/macro.cpp @@ -44,6 +44,7 @@ #include "opto/node.hpp" #include "opto/opaquenode.hpp" #include "opto/phaseX.hpp" +#include "opto/reachability.hpp" #include "opto/rootnode.hpp" #include "opto/runtime.hpp" #include "opto/subnode.hpp" @@ -52,6 +53,7 @@ #include "prims/jvmtiExport.hpp" #include "runtime/continuation.hpp" #include "runtime/sharedRuntime.hpp" +#include "utilities/globalDefinitions.hpp" #include "utilities/macros.hpp" #include "utilities/powerOfTwo.hpp" #if INCLUDE_G1GC @@ -261,74 +263,128 @@ static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_me } } -// Generate loads from source of the arraycopy for fields of -// destination needed at a deoptimization point -Node* PhaseMacroExpand::make_arraycopy_load(ArrayCopyNode* ac, intptr_t offset, Node* ctl, Node* mem, BasicType ft, const Type *ftype, AllocateNode *alloc) { +// Determine if there is an interfering store between a rematerialization load and an arraycopy that is in the process +// of being elided. Starting from the given rematerialization load this method starts a BFS traversal upwards through +// the memory graph towards the provided ArrayCopyNode. For every node encountered on the traversal, check that it is +// independent from the provided rematerialization. Returns false if every node on the traversal is independent and +// true otherwise. +bool has_interfering_store(const ArrayCopyNode* ac, LoadNode* load, PhaseGVN* phase) { + assert(ac != nullptr && load != nullptr, "sanity"); + AccessAnalyzer acc(phase, load); + ResourceMark rm; + Unique_Node_List to_visit; + to_visit.push(load->in(MemNode::Memory)); + + for (uint worklist_idx = 0; worklist_idx < to_visit.size(); worklist_idx++) { + Node* mem = to_visit.at(worklist_idx); + + if (mem->is_Proj() && mem->in(0) == ac) { + // Reached the target, so visit what is left on the worklist. + continue; + } + + if (mem->is_Phi()) { + assert(mem->bottom_type() == Type::MEMORY, "do not leave memory graph"); + // Add all non-control inputs of phis to be visited. + for (uint phi_in = 1; phi_in < mem->len(); phi_in++) { + Node* input = mem->in(phi_in); + if (input != nullptr) { + to_visit.push(input); + } + } + continue; + } + + AccessAnalyzer::AccessIndependence ind = acc.detect_access_independence(mem); + if (ind.independent) { + to_visit.push(ind.mem); + } else { + return true; + } + } + // Did not find modification of source element in memory graph. + return false; +} + +// Generate loads from source of the arraycopy for fields of destination needed at a deoptimization point. +// Returns nullptr if the load cannot be created because the arraycopy is not suitable for elimination +// (e.g. copy inside the array with non-constant offsets) or the inputs do not match our assumptions (e.g. +// the arraycopy does not actually write something at the provided offset). +Node* PhaseMacroExpand::make_arraycopy_load(ArrayCopyNode* ac, intptr_t offset, Node* ctl, Node* mem, BasicType ft, const Type* ftype, AllocateNode* alloc) { + assert((ctl == ac->control() && mem == ac->memory()) != (mem != ac->memory() && ctl->is_Proj() && ctl->as_Proj()->is_uncommon_trap_proj()), + "Either the control and memory are the same as for the arraycopy or they are pinned in an uncommon trap."); BasicType bt = ft; const Type *type = ftype; if (ft == T_NARROWOOP) { bt = T_OBJECT; type = ftype->make_oopptr(); } - Node* res = nullptr; + Node* base = ac->in(ArrayCopyNode::Src); + Node* adr = nullptr; + const TypePtr* adr_type = nullptr; + if (ac->is_clonebasic()) { assert(ac->in(ArrayCopyNode::Src) != ac->in(ArrayCopyNode::Dest), "clone source equals destination"); - Node* base = ac->in(ArrayCopyNode::Src); - Node* adr = _igvn.transform(new AddPNode(base, base, _igvn.MakeConX(offset))); - const TypePtr* adr_type = _igvn.type(base)->is_ptr()->add_offset(offset); - MergeMemNode* mergemen = _igvn.transform(MergeMemNode::make(mem))->as_MergeMem(); - BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); - res = ArrayCopyNode::load(bs, &_igvn, ctl, mergemen, adr, adr_type, type, bt); + adr = _igvn.transform(AddPNode::make_with_base(base, _igvn.MakeConX(offset))); + adr_type = _igvn.type(base)->is_ptr()->add_offset(offset); } else { - if (ac->modifies(offset, offset, &_igvn, true)) { - assert(ac->in(ArrayCopyNode::Dest) == alloc->result_cast(), "arraycopy destination should be allocation's result"); - uint shift = exact_log2(type2aelembytes(bt)); - Node* src_pos = ac->in(ArrayCopyNode::SrcPos); - Node* dest_pos = ac->in(ArrayCopyNode::DestPos); - const TypeInt* src_pos_t = _igvn.type(src_pos)->is_int(); - const TypeInt* dest_pos_t = _igvn.type(dest_pos)->is_int(); + if (!ac->modifies(offset, offset, &_igvn, true)) { + // If the arraycopy does not copy to this offset, we cannot generate a rematerialization load for it. + return nullptr; + } + assert(ac->in(ArrayCopyNode::Dest) == alloc->result_cast(), "arraycopy destination should be allocation's result"); + uint shift = exact_log2(type2aelembytes(bt)); + Node* src_pos = ac->in(ArrayCopyNode::SrcPos); + Node* dest_pos = ac->in(ArrayCopyNode::DestPos); + const TypeInt* src_pos_t = _igvn.type(src_pos)->is_int(); + const TypeInt* dest_pos_t = _igvn.type(dest_pos)->is_int(); - Node* adr = nullptr; - const TypePtr* adr_type = nullptr; - if (src_pos_t->is_con() && dest_pos_t->is_con()) { - intptr_t off = ((src_pos_t->get_con() - dest_pos_t->get_con()) << shift) + offset; - Node* base = ac->in(ArrayCopyNode::Src); - adr = _igvn.transform(new AddPNode(base, base, _igvn.MakeConX(off))); - adr_type = _igvn.type(base)->is_ptr()->add_offset(off); - if (ac->in(ArrayCopyNode::Src) == ac->in(ArrayCopyNode::Dest)) { - // Don't emit a new load from src if src == dst but try to get the value from memory instead - return value_from_mem(ac->in(TypeFunc::Memory), ctl, ft, ftype, adr_type->isa_oopptr(), alloc); - } - } else { - Node* diff = _igvn.transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos))); -#ifdef _LP64 - diff = _igvn.transform(new ConvI2LNode(diff)); -#endif - diff = _igvn.transform(new LShiftXNode(diff, _igvn.intcon(shift))); - - Node* off = _igvn.transform(new AddXNode(_igvn.MakeConX(offset), diff)); - Node* base = ac->in(ArrayCopyNode::Src); - adr = _igvn.transform(new AddPNode(base, base, off)); - adr_type = _igvn.type(base)->is_ptr()->add_offset(Type::OffsetBot); - if (ac->in(ArrayCopyNode::Src) == ac->in(ArrayCopyNode::Dest)) { - // Non constant offset in the array: we can't statically - // determine the value - return nullptr; - } + if (src_pos_t->is_con() && dest_pos_t->is_con()) { + intptr_t off = ((src_pos_t->get_con() - dest_pos_t->get_con()) << shift) + offset; + adr = _igvn.transform(AddPNode::make_with_base(base, _igvn.MakeConX(off))); + adr_type = _igvn.type(base)->is_ptr()->add_offset(off); + if (ac->in(ArrayCopyNode::Src) == ac->in(ArrayCopyNode::Dest)) { + // Don't emit a new load from src if src == dst but try to get the value from memory instead + return value_from_mem(ac, ctl, ft, ftype, adr_type->isa_oopptr(), alloc); + } + } else { + Node* diff = _igvn.transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos))); +#ifdef _LP64 + diff = _igvn.transform(new ConvI2LNode(diff)); +#endif + diff = _igvn.transform(new LShiftXNode(diff, _igvn.intcon(shift))); + + Node* off = _igvn.transform(new AddXNode(_igvn.MakeConX(offset), diff)); + adr = _igvn.transform(AddPNode::make_with_base(base, off)); + adr_type = _igvn.type(base)->is_ptr()->add_offset(Type::OffsetBot); + if (ac->in(ArrayCopyNode::Src) == ac->in(ArrayCopyNode::Dest)) { + // Non constant offset in the array: we can't statically + // determine the value + return nullptr; } - MergeMemNode* mergemen = _igvn.transform(MergeMemNode::make(mem))->as_MergeMem(); - BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); - res = ArrayCopyNode::load(bs, &_igvn, ctl, mergemen, adr, adr_type, type, bt); } } - if (res != nullptr) { - if (ftype->isa_narrowoop()) { - // PhaseMacroExpand::scalar_replacement adds DecodeN nodes - res = _igvn.transform(new EncodePNode(res, ftype)); - } - return res; + assert(adr != nullptr && adr_type != nullptr, "sanity"); + + // Create the rematerialization load ... + MergeMemNode* mergemem = _igvn.transform(MergeMemNode::make(mem))->as_MergeMem(); + BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); + Node* res = ArrayCopyNode::load(bs, &_igvn, ctl, mergemem, adr, adr_type, type, bt); + assert(res != nullptr, "load should have been created"); + + // ... and ensure that pinning the rematerialization load inside the uncommon path is safe. + if (mem != ac->memory() && ctl->is_Proj() && ctl->as_Proj()->is_uncommon_trap_proj() && res->is_Load() && + has_interfering_store(ac, res->as_Load(), &_igvn)) { + // Not safe: use control and memory from the arraycopy to ensure correct memory state. + _igvn.remove_dead_node(res, PhaseIterGVN::NodeOrigin::Graph); // Clean up the unusable rematerialization load. + return make_arraycopy_load(ac, offset, ac->control(), ac->memory(), ft, ftype, alloc); } - return nullptr; + + if (ftype->isa_narrowoop()) { + // PhaseMacroExpand::scalar_replacement adds DecodeN nodes + res = _igvn.transform(new EncodePNode(res, ftype)); + } + return res; } // @@ -441,21 +497,22 @@ Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type * } // Search the last value stored into the object's field. -Node *PhaseMacroExpand::value_from_mem(Node *sfpt_mem, Node *sfpt_ctl, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, AllocateNode *alloc) { +Node* PhaseMacroExpand::value_from_mem(Node* origin, Node* ctl, BasicType ft, const Type* ftype, const TypeOopPtr* adr_t, AllocateNode* alloc) { assert(adr_t->is_known_instance_field(), "instance required"); int instance_id = adr_t->instance_id(); assert((uint)instance_id == alloc->_idx, "wrong allocation"); int alias_idx = C->get_alias_index(adr_t); int offset = adr_t->offset(); + Node* orig_mem = origin->in(TypeFunc::Memory); Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory); Node *alloc_ctrl = alloc->in(TypeFunc::Control); Node *alloc_mem = alloc->proj_out_or_null(TypeFunc::Memory, /*io_use:*/false); assert(alloc_mem != nullptr, "Allocation without a memory projection."); VectorSet visited; - bool done = sfpt_mem == alloc_mem; - Node *mem = sfpt_mem; + bool done = orig_mem == alloc_mem; + Node *mem = orig_mem; while (!done) { if (visited.test_set(mem->_idx)) { return nullptr; // found a loop, give up @@ -535,17 +592,22 @@ Node *PhaseMacroExpand::value_from_mem(Node *sfpt_mem, Node *sfpt_ctl, BasicType } } } else if (mem->is_ArrayCopy()) { - Node* ctl = mem->in(0); - Node* m = mem->in(TypeFunc::Memory); - if (sfpt_ctl->is_Proj() && sfpt_ctl->as_Proj()->is_uncommon_trap_proj()) { + // Rematerialize the scalar-replaced array. If possible, pin the loads to the uncommon path of the uncommon trap. + // Check for each element of the source array, whether it was modified. If not, pin both memory and control to + // the uncommon path. Otherwise, use the control and memory state of the arraycopy. Control and memory state must + // come from the same source to prevent anti-dependence problems in the backend. + ArrayCopyNode* ac = mem->as_ArrayCopy(); + Node* ac_ctl = ac->control(); + Node* ac_mem = ac->memory(); + if (ctl->is_Proj() && ctl->as_Proj()->is_uncommon_trap_proj()) { // pin the loads in the uncommon trap path - ctl = sfpt_ctl; - m = sfpt_mem; + ac_ctl = ctl; + ac_mem = orig_mem; } - return make_arraycopy_load(mem->as_ArrayCopy(), offset, ctl, m, ft, ftype, alloc); + return make_arraycopy_load(ac, offset, ac_ctl, ac_mem, ft, ftype, alloc); } } - // Something go wrong. + // Something went wrong. return nullptr; } @@ -622,6 +684,8 @@ bool PhaseMacroExpand::can_eliminate_allocation(PhaseIterGVN* igvn, AllocateNode use->as_ArrayCopy()->is_copyofrange_validated()) && use->in(ArrayCopyNode::Dest) == res) { // ok to eliminate + } else if (use->is_ReachabilityFence() && OptimizeReachabilityFences) { + // ok to eliminate } else if (use->is_SafePoint()) { SafePointNode* sfpt = use->as_SafePoint(); if (sfpt->is_Call() && sfpt->as_Call()->has_non_debug_use(res)) { @@ -717,7 +781,13 @@ void PhaseMacroExpand::undo_previous_scalarizations(GrowableArray 0) { SafePointNode* sfpt_done = safepoints_done.pop(); + + SafePointNode::NodeEdgeTempStorage non_debug_edges_worklist(igvn()); + + sfpt_done->remove_non_debug_edges(non_debug_edges_worklist); + // remove any extra entries we added to the safepoint + assert(sfpt_done->jvms()->endoff() == sfpt_done->req(), "no extra edges past debug info allowed"); uint last = sfpt_done->req() - 1; for (int k = 0; k < nfields; k++) { sfpt_done->del_req(last--); @@ -738,6 +808,9 @@ void PhaseMacroExpand::undo_previous_scalarizations(GrowableArray restore_non_debug_edges(non_debug_edges_worklist); + _igvn._worklist.push(sfpt_done); } } @@ -778,6 +851,8 @@ void PhaseMacroExpand::undo_previous_scalarizations(GrowableArray jvms()->endoff() == sfpt->req(), "no extra edges past debug info allowed"); + // Fields of scalar objs are referenced only at the end // of regular debuginfo at the last (youngest) JVMS. // Record relative start index. @@ -852,7 +927,7 @@ SafePointScalarObjectNode* PhaseMacroExpand::create_scalarized_object_descriptio const TypeOopPtr *field_addr_type = res_type->add_offset(offset)->isa_oopptr(); - Node *field_val = value_from_mem(sfpt->memory(), sfpt->control(), basic_elem_type, field_type, field_addr_type, alloc); + Node* field_val = value_from_mem(sfpt, sfpt->control(), basic_elem_type, field_type, field_addr_type, alloc); // We weren't able to find a value for this field, // give up on eliminating this allocation. @@ -903,17 +978,25 @@ SafePointScalarObjectNode* PhaseMacroExpand::create_scalarized_object_descriptio } // Do scalar replacement. -bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray & safepoints) { - GrowableArray safepoints_done; +bool PhaseMacroExpand::scalar_replacement(AllocateNode* alloc, GrowableArray& safepoints) { + GrowableArray safepoints_done; Node* res = alloc->result_cast(); assert(res == nullptr || res->is_CheckCastPP(), "unexpected AllocateNode result"); // Process the safepoint uses while (safepoints.length() > 0) { SafePointNode* sfpt = safepoints.pop(); + + SafePointNode::NodeEdgeTempStorage non_debug_edges_worklist(igvn()); + + // All sfpt inputs are implicitly included into debug info during the scalarization process below. + // Keep non-debug inputs separately, so they stay non-debug. + sfpt->remove_non_debug_edges(non_debug_edges_worklist); + SafePointScalarObjectNode* sobj = create_scalarized_object_description(alloc, sfpt); if (sobj == nullptr) { + sfpt->restore_non_debug_edges(non_debug_edges_worklist); undo_previous_scalarizations(safepoints_done, alloc); return false; } @@ -922,6 +1005,8 @@ bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray jvms(); sfpt->replace_edges_in_range(res, sobj, jvms->debug_start(), jvms->debug_end(), &_igvn); + non_debug_edges_worklist.remove_edge_if_present(res); // drop scalarized input from non-debug info + sfpt->restore_non_debug_edges(non_debug_edges_worklist); _igvn._worklist.push(sfpt); // keep it for rollback @@ -973,7 +1058,7 @@ void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc) { } k -= (oc2 - use->outcnt()); } - _igvn.remove_dead_node(use); + _igvn.remove_dead_node(use, PhaseIterGVN::NodeOrigin::Graph); } else if (use->is_ArrayCopy()) { // Disconnect ArrayCopy node ArrayCopyNode* ac = use->as_ArrayCopy(); @@ -1008,17 +1093,19 @@ void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc) { // src can be top at this point if src and dest of the // arraycopy were the same if (src->outcnt() == 0 && !src->is_top()) { - _igvn.remove_dead_node(src); + _igvn.remove_dead_node(src, PhaseIterGVN::NodeOrigin::Graph); } } _igvn._worklist.push(ac); + } else if (use->is_ReachabilityFence() && OptimizeReachabilityFences) { + use->as_ReachabilityFence()->clear_referent(_igvn); // redundant fence; will be removed during IGVN } else { eliminate_gc_barrier(use); } j -= (oc1 - res->outcnt()); } assert(res->outcnt() == 0, "all uses of allocated objects must be deleted"); - _igvn.remove_dead_node(res); + _igvn.remove_dead_node(res, PhaseIterGVN::NodeOrigin::Graph); } // @@ -1199,7 +1286,7 @@ bool PhaseMacroExpand::eliminate_boxing_node(CallStaticJavaNode *boxing) { Node* PhaseMacroExpand::make_load_raw(Node* ctl, Node* mem, Node* base, int offset, const Type* value_type, BasicType bt) { - Node* adr = basic_plus_adr(top(), base, offset); + Node* adr = off_heap_plus_addr(base, offset); const TypePtr* adr_type = adr->bottom_type()->is_ptr(); Node* value = LoadNode::make(_igvn, ctl, mem, adr, adr_type, value_type, bt, MemNode::unordered); transform_later(value); @@ -1208,7 +1295,7 @@ Node* PhaseMacroExpand::make_load_raw(Node* ctl, Node* mem, Node* base, int offs Node* PhaseMacroExpand::make_store_raw(Node* ctl, Node* mem, Node* base, int offset, Node* value, BasicType bt) { - Node* adr = basic_plus_adr(top(), base, offset); + Node* adr = off_heap_plus_addr(base, offset); mem = StoreNode::make(_igvn, ctl, mem, adr, nullptr, value, bt, MemNode::unordered); transform_later(mem); return mem; @@ -1313,7 +1400,9 @@ void PhaseMacroExpand::expand_allocate_common( initial_slow_test = nullptr; } - bool allocation_has_use = (alloc->result_cast() != nullptr); + // ArrayCopyNode right after an allocation operates on the raw result projection for the Allocate node so it's not + // safe to remove such an allocation even if it has no result cast. + bool allocation_has_use = (alloc->result_cast() != nullptr) || (alloc->initialization() != nullptr && alloc->initialization()->is_complete_with_arraycopy()); if (!allocation_has_use) { InitializeNode* init = alloc->initialization(); if (init != nullptr) { @@ -1500,7 +1589,7 @@ void PhaseMacroExpand::expand_allocate_common( transform_later(_callprojs.fallthrough_memproj); } migrate_outs(_callprojs.catchall_memproj, _callprojs.fallthrough_memproj); - _igvn.remove_dead_node(_callprojs.catchall_memproj); + _igvn.remove_dead_node(_callprojs.catchall_memproj, PhaseIterGVN::NodeOrigin::Graph); } // An allocate node has separate i_o projections for the uses on the control @@ -1519,7 +1608,7 @@ void PhaseMacroExpand::expand_allocate_common( transform_later(_callprojs.fallthrough_ioproj); } migrate_outs(_callprojs.catchall_ioproj, _callprojs.fallthrough_ioproj); - _igvn.remove_dead_node(_callprojs.catchall_ioproj); + _igvn.remove_dead_node(_callprojs.catchall_ioproj, PhaseIterGVN::NodeOrigin::Graph); } // if we generated only a slow call, we are done @@ -1583,11 +1672,11 @@ void PhaseMacroExpand::yank_alloc_node(AllocateNode* alloc) { --i; // back up iterator } assert(_callprojs.resproj->outcnt() == 0, "all uses must be deleted"); - _igvn.remove_dead_node(_callprojs.resproj); + _igvn.remove_dead_node(_callprojs.resproj, PhaseIterGVN::NodeOrigin::Graph); } if (_callprojs.fallthrough_catchproj != nullptr) { migrate_outs(_callprojs.fallthrough_catchproj, ctrl); - _igvn.remove_dead_node(_callprojs.fallthrough_catchproj); + _igvn.remove_dead_node(_callprojs.fallthrough_catchproj, PhaseIterGVN::NodeOrigin::Graph); } if (_callprojs.catchall_catchproj != nullptr) { _igvn.rehash_node_delayed(_callprojs.catchall_catchproj); @@ -1595,16 +1684,16 @@ void PhaseMacroExpand::yank_alloc_node(AllocateNode* alloc) { } if (_callprojs.fallthrough_proj != nullptr) { Node* catchnode = _callprojs.fallthrough_proj->unique_ctrl_out(); - _igvn.remove_dead_node(catchnode); - _igvn.remove_dead_node(_callprojs.fallthrough_proj); + _igvn.remove_dead_node(catchnode, PhaseIterGVN::NodeOrigin::Graph); + _igvn.remove_dead_node(_callprojs.fallthrough_proj, PhaseIterGVN::NodeOrigin::Graph); } if (_callprojs.fallthrough_memproj != nullptr) { migrate_outs(_callprojs.fallthrough_memproj, mem); - _igvn.remove_dead_node(_callprojs.fallthrough_memproj); + _igvn.remove_dead_node(_callprojs.fallthrough_memproj, PhaseIterGVN::NodeOrigin::Graph); } if (_callprojs.fallthrough_ioproj != nullptr) { migrate_outs(_callprojs.fallthrough_ioproj, i_o); - _igvn.remove_dead_node(_callprojs.fallthrough_ioproj); + _igvn.remove_dead_node(_callprojs.fallthrough_ioproj, PhaseIterGVN::NodeOrigin::Graph); } if (_callprojs.catchall_memproj != nullptr) { _igvn.rehash_node_delayed(_callprojs.catchall_memproj); @@ -1623,7 +1712,7 @@ void PhaseMacroExpand::yank_alloc_node(AllocateNode* alloc) { } } #endif - _igvn.remove_dead_node(alloc); + _igvn.remove_dead_node(alloc, PhaseIterGVN::NodeOrigin::Graph); } void PhaseMacroExpand::expand_initialize_membar(AllocateNode* alloc, InitializeNode* init, @@ -1818,81 +1907,81 @@ Node* PhaseMacroExpand::prefetch_allocation(Node* i_o, Node*& needgc_false, Node* old_eden_top, Node* new_eden_top, intx lines) { enum { fall_in_path = 1, pf_path = 2 }; - if( UseTLAB && AllocatePrefetchStyle == 2 ) { + if (UseTLAB && AllocatePrefetchStyle == 2) { // Generate prefetch allocation with watermark check. // As an allocation hits the watermark, we will prefetch starting // at a "distance" away from watermark. - Node *pf_region = new RegionNode(3); - Node *pf_phi_rawmem = new PhiNode( pf_region, Type::MEMORY, - TypeRawPtr::BOTTOM ); + Node* pf_region = new RegionNode(3); + Node* pf_phi_rawmem = new PhiNode(pf_region, Type::MEMORY, + TypeRawPtr::BOTTOM); // I/O is used for Prefetch - Node *pf_phi_abio = new PhiNode( pf_region, Type::ABIO ); + Node* pf_phi_abio = new PhiNode(pf_region, Type::ABIO); - Node *thread = new ThreadLocalNode(); + Node* thread = new ThreadLocalNode(); transform_later(thread); - Node *eden_pf_adr = new AddPNode( top()/*not oop*/, thread, - _igvn.MakeConX(in_bytes(JavaThread::tlab_pf_top_offset())) ); + Node* eden_pf_adr = AddPNode::make_off_heap(thread, + _igvn.MakeConX(in_bytes(JavaThread::tlab_pf_top_offset()))); transform_later(eden_pf_adr); - Node *old_pf_wm = new LoadPNode(needgc_false, + Node* old_pf_wm = new LoadPNode(needgc_false, contended_phi_rawmem, eden_pf_adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered); transform_later(old_pf_wm); // check against new_eden_top - Node *need_pf_cmp = new CmpPNode( new_eden_top, old_pf_wm ); + Node* need_pf_cmp = new CmpPNode(new_eden_top, old_pf_wm); transform_later(need_pf_cmp); - Node *need_pf_bol = new BoolNode( need_pf_cmp, BoolTest::ge ); + Node* need_pf_bol = new BoolNode(need_pf_cmp, BoolTest::ge); transform_later(need_pf_bol); - IfNode *need_pf_iff = new IfNode( needgc_false, need_pf_bol, - PROB_UNLIKELY_MAG(4), COUNT_UNKNOWN ); + IfNode* need_pf_iff = new IfNode(needgc_false, need_pf_bol, + PROB_UNLIKELY_MAG(4), COUNT_UNKNOWN); transform_later(need_pf_iff); // true node, add prefetchdistance - Node *need_pf_true = new IfTrueNode( need_pf_iff ); + Node* need_pf_true = new IfTrueNode(need_pf_iff); transform_later(need_pf_true); - Node *need_pf_false = new IfFalseNode( need_pf_iff ); + Node* need_pf_false = new IfFalseNode(need_pf_iff); transform_later(need_pf_false); - Node *new_pf_wmt = new AddPNode( top(), old_pf_wm, - _igvn.MakeConX(AllocatePrefetchDistance) ); - transform_later(new_pf_wmt ); + Node* new_pf_wmt = AddPNode::make_off_heap(old_pf_wm, + _igvn.MakeConX(AllocatePrefetchDistance)); + transform_later(new_pf_wmt); new_pf_wmt->set_req(0, need_pf_true); - Node *store_new_wmt = new StorePNode(need_pf_true, + Node* store_new_wmt = new StorePNode(need_pf_true, contended_phi_rawmem, eden_pf_adr, TypeRawPtr::BOTTOM, new_pf_wmt, MemNode::unordered); transform_later(store_new_wmt); // adding prefetches - pf_phi_abio->init_req( fall_in_path, i_o ); + pf_phi_abio->init_req(fall_in_path, i_o); - Node *prefetch_adr; - Node *prefetch; + Node* prefetch_adr; + Node* prefetch; uint step_size = AllocatePrefetchStepSize; uint distance = 0; - for ( intx i = 0; i < lines; i++ ) { - prefetch_adr = new AddPNode( old_pf_wm, new_pf_wmt, - _igvn.MakeConX(distance) ); + for (intx i = 0; i < lines; i++) { + prefetch_adr = AddPNode::make_off_heap(new_pf_wmt, + _igvn.MakeConX(distance)); transform_later(prefetch_adr); - prefetch = new PrefetchAllocationNode( i_o, prefetch_adr ); + prefetch = new PrefetchAllocationNode(i_o, prefetch_adr); transform_later(prefetch); distance += step_size; i_o = prefetch; } - pf_phi_abio->set_req( pf_path, i_o ); + pf_phi_abio->set_req(pf_path, i_o); - pf_region->init_req( fall_in_path, need_pf_false ); - pf_region->init_req( pf_path, need_pf_true ); + pf_region->init_req(fall_in_path, need_pf_false); + pf_region->init_req(pf_path, need_pf_true); - pf_phi_rawmem->init_req( fall_in_path, contended_phi_rawmem ); - pf_phi_rawmem->init_req( pf_path, store_new_wmt ); + pf_phi_rawmem->init_req(fall_in_path, contended_phi_rawmem); + pf_phi_rawmem->init_req(pf_path, store_new_wmt); transform_later(pf_region); transform_later(pf_phi_rawmem); @@ -1901,7 +1990,7 @@ Node* PhaseMacroExpand::prefetch_allocation(Node* i_o, Node*& needgc_false, needgc_false = pf_region; contended_phi_rawmem = pf_phi_rawmem; i_o = pf_phi_abio; - } else if( UseTLAB && AllocatePrefetchStyle == 3 ) { + } else if (UseTLAB && AllocatePrefetchStyle == 3) { // Insert a prefetch instruction for each allocation. // This code is used to generate 1 prefetch instruction per cache line. @@ -1910,13 +1999,12 @@ Node* PhaseMacroExpand::prefetch_allocation(Node* i_o, Node*& needgc_false, uint distance = AllocatePrefetchDistance; // Next cache address. - Node *cache_adr = new AddPNode(old_eden_top, old_eden_top, - _igvn.MakeConX(step_size + distance)); + Node* cache_adr = AddPNode::make_off_heap(old_eden_top, + _igvn.MakeConX(step_size + distance)); transform_later(cache_adr); cache_adr = new CastP2XNode(needgc_false, cache_adr); transform_later(cache_adr); - // Address is aligned to execute prefetch to the beginning of cache line size - // (it is important when BIS instruction is used on SPARC as prefetch). + // Address is aligned to execute prefetch to the beginning of cache line size. Node* mask = _igvn.MakeConX(~(intptr_t)(step_size-1)); cache_adr = new AndXNode(cache_adr, mask); transform_later(cache_adr); @@ -1924,36 +2012,36 @@ Node* PhaseMacroExpand::prefetch_allocation(Node* i_o, Node*& needgc_false, transform_later(cache_adr); // Prefetch - Node *prefetch = new PrefetchAllocationNode( contended_phi_rawmem, cache_adr ); + Node* prefetch = new PrefetchAllocationNode(contended_phi_rawmem, cache_adr); prefetch->set_req(0, needgc_false); transform_later(prefetch); contended_phi_rawmem = prefetch; - Node *prefetch_adr; + Node* prefetch_adr; distance = step_size; - for ( intx i = 1; i < lines; i++ ) { - prefetch_adr = new AddPNode( cache_adr, cache_adr, - _igvn.MakeConX(distance) ); + for (intx i = 1; i < lines; i++) { + prefetch_adr = AddPNode::make_off_heap(cache_adr, + _igvn.MakeConX(distance)); transform_later(prefetch_adr); - prefetch = new PrefetchAllocationNode( contended_phi_rawmem, prefetch_adr ); + prefetch = new PrefetchAllocationNode(contended_phi_rawmem, prefetch_adr); transform_later(prefetch); distance += step_size; contended_phi_rawmem = prefetch; } - } else if( AllocatePrefetchStyle > 0 ) { + } else if (AllocatePrefetchStyle > 0) { // Insert a prefetch for each allocation only on the fast-path - Node *prefetch_adr; - Node *prefetch; + Node* prefetch_adr; + Node* prefetch; // Generate several prefetch instructions. uint step_size = AllocatePrefetchStepSize; uint distance = AllocatePrefetchDistance; - for ( intx i = 0; i < lines; i++ ) { - prefetch_adr = new AddPNode( top(), new_eden_top, - _igvn.MakeConX(distance) ); + for (intx i = 0; i < lines; i++) { + prefetch_adr = AddPNode::make_off_heap(new_eden_top, + _igvn.MakeConX(distance)); transform_later(prefetch_adr); - prefetch = new PrefetchAllocationNode( i_o, prefetch_adr ); + prefetch = new PrefetchAllocationNode(i_o, prefetch_adr); // Do not let it float too high, since if eden_top == eden_end, // both might be null. - if( i == 0 ) { // Set control for first prefetch, next follows it + if (i == 0) { // Set control for first prefetch, next follows it prefetch->init_req(0, needgc_false); } transform_later(prefetch); @@ -2499,6 +2587,7 @@ void PhaseMacroExpand::eliminate_macro_nodes() { assert(n->Opcode() == Op_LoopLimit || n->Opcode() == Op_ModD || n->Opcode() == Op_ModF || + n->Opcode() == Op_PowD || n->is_OpaqueConstantBool() || n->is_OpaqueInitializedAssertionPredicate() || n->Opcode() == Op_MaxL || @@ -2655,18 +2744,11 @@ bool PhaseMacroExpand::expand_macro_nodes() { default: switch (n->Opcode()) { case Op_ModD: - case Op_ModF: { - CallNode* mod_macro = n->as_Call(); - CallNode* call = new CallLeafPureNode(mod_macro->tf(), mod_macro->entry_point(), mod_macro->_name); - call->init_req(TypeFunc::Control, mod_macro->in(TypeFunc::Control)); - call->init_req(TypeFunc::I_O, C->top()); - call->init_req(TypeFunc::Memory, C->top()); - call->init_req(TypeFunc::ReturnAdr, C->top()); - call->init_req(TypeFunc::FramePtr, C->top()); - for (unsigned int i = 0; i < mod_macro->tf()->domain()->cnt() - TypeFunc::Parms; i++) { - call->init_req(TypeFunc::Parms + i, mod_macro->in(TypeFunc::Parms + i)); - } - _igvn.replace_node(mod_macro, call); + case Op_ModF: + case Op_PowD: { + CallLeafPureNode* call_macro = n->as_CallLeafPure(); + CallLeafPureNode* call = call_macro->inline_call_leaf_pure_node(); + _igvn.replace_node(call_macro, call); transform_later(call); break; } diff --git a/src/hotspot/share/opto/macro.hpp b/src/hotspot/share/opto/macro.hpp index 0f8d8fb5172..e4b20402e63 100644 --- a/src/hotspot/share/opto/macro.hpp +++ b/src/hotspot/share/opto/macro.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,19 +40,32 @@ private: public: // Helper methods roughly modeled after GraphKit: - Node* basic_plus_adr(Node* base, int offset) { - return (offset == 0)? base: basic_plus_adr(base, MakeConX(offset)); + Node* basic_plus_adr(Node* ptr, int offset, bool raw_base = false) { + return basic_plus_adr(ptr, MakeConX(offset), raw_base); } + Node* basic_plus_adr(Node* base, Node* ptr, int offset) { - return (offset == 0)? ptr: basic_plus_adr(base, ptr, MakeConX(offset)); + return basic_plus_adr(base, ptr, MakeConX(offset)); } - Node* basic_plus_adr(Node* base, Node* offset) { - return basic_plus_adr(base, base, offset); + + Node* basic_plus_adr(Node* ptr, Node* offset, bool raw_base = false) { + Node* base = raw_base ? top() : ptr; + return basic_plus_adr(base, ptr, offset); } + Node* basic_plus_adr(Node* base, Node* ptr, Node* offset) { - Node* adr = new AddPNode(base, ptr, offset); - return transform_later(adr); + return (offset == MakeConX(0)) ? + ptr : transform_later(AddPNode::make_with_base(base, ptr, offset)); } + + Node* off_heap_plus_addr(Node* ptr, int offset) { + return basic_plus_adr(top(), ptr, MakeConX(offset)); + } + + Node* off_heap_plus_addr(Node* ptr, Node* offset) { + return basic_plus_adr(top(), ptr, offset); + } + Node* transform_later(Node* n) { // equivalent to _gvn.transform in GraphKit, Ideal, etc. _igvn.register_new_node_with_optimizer(n); @@ -91,7 +104,7 @@ private: address slow_call_address, Node* valid_length_test); void yank_alloc_node(AllocateNode* alloc); - Node *value_from_mem(Node *mem, Node *ctl, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, AllocateNode *alloc); + Node* value_from_mem(Node* start, Node* ctl, BasicType ft, const Type* ftype, const TypeOopPtr* adr_t, AllocateNode* alloc); Node *value_from_mem_phi(Node *mem, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, AllocateNode *alloc, Node_Stack *value_phis, int level); bool eliminate_boxing_node(CallStaticJavaNode *boxing); @@ -109,7 +122,7 @@ private: // More helper methods modeled after GraphKit for array copy void insert_mem_bar(Node** ctrl, Node** mem, int opcode, int alias_idx, Node* precedent = nullptr); - Node* array_element_address(Node* ary, Node* idx, BasicType elembt); + Node* array_element_address(Node* ary, Node* idx, BasicType elembt, bool raw_base); Node* ConvI2L(Node* offset); // helper methods modeled after LibraryCallKit for array copy @@ -192,7 +205,7 @@ private: Node* klass_node, Node* length, Node* size_in_bytes); - Node* make_arraycopy_load(ArrayCopyNode* ac, intptr_t offset, Node* ctl, Node* mem, BasicType ft, const Type *ftype, AllocateNode *alloc); + Node* make_arraycopy_load(ArrayCopyNode* ac, intptr_t offset, Node* ctl, Node* mem, BasicType ft, const Type* ftype, AllocateNode* alloc); public: PhaseMacroExpand(PhaseIterGVN &igvn) : Phase(Macro_Expand), _igvn(igvn) { diff --git a/src/hotspot/share/opto/macroArrayCopy.cpp b/src/hotspot/share/opto/macroArrayCopy.cpp index 0719ffc45a5..139775506db 100644 --- a/src/hotspot/share/opto/macroArrayCopy.cpp +++ b/src/hotspot/share/opto/macroArrayCopy.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -55,10 +55,10 @@ void PhaseMacroExpand::insert_mem_bar(Node** ctrl, Node** mem, int opcode, int a } } -Node* PhaseMacroExpand::array_element_address(Node* ary, Node* idx, BasicType elembt) { +Node* PhaseMacroExpand::array_element_address(Node* ary, Node* idx, BasicType elembt, bool raw_base) { uint shift = exact_log2(type2aelembytes(elembt)); uint header = arrayOopDesc::base_offset_in_bytes(elembt); - Node* base = basic_plus_adr(ary, header); + Node* base = basic_plus_adr(ary, header, raw_base); #ifdef _LP64 // see comment in GraphKit::array_element_address int index_max = max_jint - 1; // array size is max_jint, index is one less @@ -67,7 +67,7 @@ Node* PhaseMacroExpand::array_element_address(Node* ary, Node* idx, BasicType el #endif Node* scale = new LShiftXNode(idx, intcon(shift)); transform_later(scale); - return basic_plus_adr(ary, base, scale); + return basic_plus_adr(raw_base ? top() : ary, base, scale); } Node* PhaseMacroExpand::ConvI2L(Node* offset) { @@ -379,6 +379,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode* bool disjoint_bases, bool length_never_negative, RegionNode* slow_region) { + Node* orig_dest = dest; if (slow_region == nullptr) { slow_region = new RegionNode(1); transform_later(slow_region); @@ -411,6 +412,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode* assert(dest->is_CheckCastPP(), "sanity"); assert(dest->in(0)->in(0) == init, "dest pinned"); adr_type = TypeRawPtr::BOTTOM; // all initializations are into raw memory + dest = dest->in(1); // writing to raw memory requires a raw base // From this point on, every exit path is responsible for // initializing any non-copied parts of the object to zero. // Also, if this flag is set we make sure that arraycopy interacts properly @@ -638,7 +640,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode* // (At this point we can assume disjoint_bases, since types differ.) int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); - Node* p1 = basic_plus_adr(top(), dest_klass, ek_offset); + Node* p1 = off_heap_plus_addr(dest_klass, ek_offset); Node* n1 = LoadKlassNode::make(_igvn, C->immutable_memory(), p1, TypeRawPtr::BOTTOM); Node* dest_elem_klass = transform_later(n1); Node* cv = generate_checkcast_arraycopy(&local_ctrl, &local_mem, @@ -771,7 +773,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode* local_mem = generate_slow_arraycopy(ac, &local_ctrl, local_mem, &local_io, adr_type, - src, src_offset, dest, dest_offset, + src, src_offset, orig_dest, dest_offset, copy_length, /*dest_uninitialized*/false); result_region->init_req(slow_call_path, local_ctrl); @@ -839,7 +841,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode* _igvn.replace_node(_callprojs.fallthrough_catchproj, *ctrl); #ifdef ASSERT - const TypeOopPtr* dest_t = _igvn.type(dest)->is_oopptr(); + const TypeOopPtr* dest_t = _igvn.type(orig_dest)->is_oopptr(); if (dest_t->is_known_instance()) { ArrayCopyNode* ac = nullptr; assert(ArrayCopyNode::may_modify(dest_t, (*ctrl)->in(0)->as_MemBar(), &_igvn, ac), "dependency on arraycopy lost"); @@ -915,12 +917,12 @@ void PhaseMacroExpand::generate_clear_array(Node* ctrl, MergeMemNode* merge_mem, if (start_con >= 0 && end_con >= 0) { // Constant start and end. Simple. mem = ClearArrayNode::clear_memory(ctrl, mem, dest, - start_con, end_con, false, &_igvn); + start_con, end_con, adr_type == TypeRawPtr::BOTTOM, &_igvn); } else if (start_con >= 0 && dest_size != top()) { // Constant start, pre-rounded end after the tail of the array. Node* end = dest_size; mem = ClearArrayNode::clear_memory(ctrl, mem, dest, - start_con, end, false, &_igvn); + start_con, end, adr_type == TypeRawPtr::BOTTOM, &_igvn); } else if (start_con >= 0 && slice_len != top()) { // Constant start, non-constant end. End needs rounding up. // End offset = round_up(abase + ((slice_idx_con + slice_len) << scale), 8) @@ -933,7 +935,7 @@ void PhaseMacroExpand::generate_clear_array(Node* ctrl, MergeMemNode* merge_mem, end = transform_later(new AddXNode(end, MakeConX(end_base)) ); end = transform_later(new AndXNode(end, MakeConX(~end_round)) ); mem = ClearArrayNode::clear_memory(ctrl, mem, dest, - start_con, end, false, &_igvn); + start_con, end, adr_type == TypeRawPtr::BOTTOM, &_igvn); } else if (start_con < 0 && dest_size != top()) { // Non-constant start, pre-rounded end after the tail of the array. // This is almost certainly a "round-to-end" operation. @@ -960,14 +962,14 @@ void PhaseMacroExpand::generate_clear_array(Node* ctrl, MergeMemNode* merge_mem, if (bump_bit != 0) { // Store a zero to the immediately preceding jint: Node* x1 = transform_later(new AddXNode(start, MakeConX(-bump_bit)) ); - Node* p1 = basic_plus_adr(dest, x1); + Node* p1 = basic_plus_adr(dest, x1, adr_type == TypeRawPtr::BOTTOM); mem = StoreNode::make(_igvn, ctrl, mem, p1, adr_type, intcon(0), T_INT, MemNode::unordered); mem = transform_later(mem); } } Node* end = dest_size; // pre-rounded mem = ClearArrayNode::clear_memory(ctrl, mem, dest, - start, end, false, &_igvn); + start, end, adr_type == TypeRawPtr::BOTTOM, &_igvn); } else { // Non-constant start, unrounded non-constant end. // (Nobody zeroes a random midsection of an array using this routine.) @@ -1009,7 +1011,7 @@ bool PhaseMacroExpand::generate_block_arraycopy(Node** ctrl, MergeMemNode** mem, if (((src_off | dest_off) & (BytesPerLong-1)) == BytesPerInt && ((src_off ^ dest_off) & (BytesPerLong-1)) == 0) { Node* sptr = basic_plus_adr(src, src_off); - Node* dptr = basic_plus_adr(dest, dest_off); + Node* dptr = basic_plus_adr(dest, dest_off, adr_type == TypeRawPtr::BOTTOM); const TypePtr* s_adr_type = _igvn.type(sptr)->is_ptr(); assert(s_adr_type->isa_aryptr(), "impossible slice"); uint s_alias_idx = C->get_alias_index(s_adr_type); @@ -1037,7 +1039,7 @@ bool PhaseMacroExpand::generate_block_arraycopy(Node** ctrl, MergeMemNode** mem, // Do this copy by giant steps. Node* sptr = basic_plus_adr(src, src_off); - Node* dptr = basic_plus_adr(dest, dest_off); + Node* dptr = basic_plus_adr(dest, dest_off, adr_type == TypeRawPtr::BOTTOM); Node* countx = dest_size; countx = transform_later(new SubXNode(countx, MakeConX(dest_off))); countx = transform_later(new URShiftXNode(countx, intcon(LogBytesPerLong))); @@ -1129,13 +1131,13 @@ Node* PhaseMacroExpand::generate_checkcast_arraycopy(Node** ctrl, MergeMemNode** // look in each non-null element's class, at the desired klass's // super_check_offset, for the desired klass. int sco_offset = in_bytes(Klass::super_check_offset_offset()); - Node* p3 = basic_plus_adr(top(), dest_elem_klass, sco_offset); + Node* p3 = off_heap_plus_addr(dest_elem_klass, sco_offset); Node* n3 = new LoadINode(nullptr, *mem /*memory(p3)*/, p3, _igvn.type(p3)->is_ptr(), TypeInt::INT, MemNode::unordered); Node* check_offset = ConvI2X(transform_later(n3)); Node* check_value = dest_elem_klass; - Node* src_start = array_element_address(src, src_offset, T_OBJECT); - Node* dest_start = array_element_address(dest, dest_offset, T_OBJECT); + Node* src_start = array_element_address(src, src_offset, T_OBJECT, false); + Node* dest_start = array_element_address(dest, dest_offset, T_OBJECT, adr_type == TypeRawPtr::BOTTOM); const TypeFunc* call_type = OptoRuntime::checkcast_arraycopy_Type(); Node* call = make_leaf_call(*ctrl, *mem, call_type, copyfunc_addr, "checkcast_arraycopy", adr_type, @@ -1190,8 +1192,8 @@ void PhaseMacroExpand::generate_unchecked_arraycopy(Node** ctrl, MergeMemNode** Node* src_start = src; Node* dest_start = dest; if (src_offset != nullptr || dest_offset != nullptr) { - src_start = array_element_address(src, src_offset, basic_elem_type); - dest_start = array_element_address(dest, dest_offset, basic_elem_type); + src_start = array_element_address(src, src_offset, basic_elem_type, false); + dest_start = array_element_address(dest, dest_offset, basic_elem_type, adr_type == TypeRawPtr::BOTTOM); } // Figure out which arraycopy runtime method to call. diff --git a/src/hotspot/share/opto/matcher.hpp b/src/hotspot/share/opto/matcher.hpp index 9579af84f24..4de41d6f2ef 100644 --- a/src/hotspot/share/opto/matcher.hpp +++ b/src/hotspot/share/opto/matcher.hpp @@ -221,12 +221,12 @@ public: // Convert a machine register to a machine register type, so-as to // properly match spill code. const int *_register_save_type; + #ifdef ASSERT // Maps from machine register to boolean; true if machine register can // be holding a call argument in some signature. static bool can_be_java_arg( int reg ); - // Maps from machine register to boolean; true if machine register holds - // a spillable argument. - static bool is_spillable_arg( int reg ); + #endif + // Number of integer live ranges that constitute high register pressure static uint int_pressure_limit(); // Number of float live ranges that constitute high register pressure @@ -429,10 +429,6 @@ public: // Register for MODL projection of divmodL static const RegMask& modL_proj_mask(); - // Use hardware DIV instruction when it is faster than - // a code which use multiply for division by constant. - static bool use_asm_for_ldiv_by_con( jlong divisor ); - // Java-Interpreter calling convention // (what you use when calling between compiled-Java and Interpreted-Java @@ -443,9 +439,6 @@ public: // The Method-klass-holder may be passed in the inline_cache_reg // and then expanded into the inline_cache_reg and a method_ptr register - // Interpreter's Frame Pointer Register - static OptoReg::Name interpreter_frame_pointer_reg(); - // Java-Native calling convention // (what you use when intercalling between Java and C++ code) diff --git a/src/hotspot/share/opto/memnode.cpp b/src/hotspot/share/opto/memnode.cpp index 21ed15f9ec7..a49d4708d32 100644 --- a/src/hotspot/share/opto/memnode.cpp +++ b/src/hotspot/share/opto/memnode.cpp @@ -582,7 +582,6 @@ bool MemNode::detect_ptr_independence(Node* p1, AllocateNode* a1, return false; } - // Find an arraycopy ac that produces the memory state represented by parameter mem. // Return ac if // (a) can_see_stored_value=true and ac must have set the value for this load or if @@ -697,178 +696,32 @@ ArrayCopyNode* MemNode::find_array_copy_clone(Node* ld_alloc, Node* mem) const { // (Currently, only LoadNode::Ideal has steps (c), (d). More later.) // Node* MemNode::find_previous_store(PhaseValues* phase) { - Node* ctrl = in(MemNode::Control); - Node* adr = in(MemNode::Address); - intptr_t offset = 0; - Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset); - AllocateNode* alloc = AllocateNode::Ideal_allocation(base); + AccessAnalyzer analyzer(phase, this); - const TypePtr* adr_type = this->adr_type(); - if (adr_type == nullptr) { - // This means the access is dead - return phase->C->top(); - } else if (adr_type->base() == TypePtr::AnyPtr) { - assert(adr_type->ptr() == TypePtr::Null, "MemNode should never access a wide memory"); - // Give up, this will upset Compile::get_alias_index - return nullptr; - } - - int alias_idx = phase->C->get_alias_index(adr_type); - assert(alias_idx != Compile::AliasIdxTop, "must not be a dead node"); - assert(alias_idx != Compile::AliasIdxBot || !phase->C->do_aliasing(), "must not be a very wide access"); - - if (offset == Type::OffsetBot) - return nullptr; // cannot unalias unless there are precise offsets - - const bool adr_maybe_raw = check_if_adr_maybe_raw(adr); - const TypeOopPtr *addr_t = adr->bottom_type()->isa_oopptr(); - - intptr_t size_in_bytes = memory_size(); - - Node* mem = in(MemNode::Memory); // start searching here... - - int cnt = 50; // Cycle limiter - for (;;) { // While we can dance past unrelated stores... - if (--cnt < 0) break; // Caught in cycle or a complicated dance? - - Node* prev = mem; - if (mem->is_Store()) { - Node* st_adr = mem->in(MemNode::Address); - intptr_t st_offset = 0; - Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset); - if (st_base == nullptr) { - // inscrutable pointer - break; - } - - // If the bases are the same and the offsets are the same, it seems that this is the exact - // store we are looking for, the caller will check if the type of the store matches using - // MemNode::can_see_stored_value - if (st_base == base && st_offset == offset) { - return mem; // (b) found the store that this access observes - } - - // If it is provable that the memory accessed by mem does not overlap the memory accessed by - // this, we may walk past mem. - // For raw accesses, 2 accesses are independent if they have the same base and the offsets - // say that they do not overlap. - // For heap accesses, 2 accesses are independent if either the bases are provably different - // at runtime or the offsets say that the accesses do not overlap. - if ((adr_maybe_raw || check_if_adr_maybe_raw(st_adr)) && st_base != base) { - // Raw accesses can only be provably independent if they have the same base - break; - } - - // If the offsets say that the accesses do not overlap, then it is provable that mem and this - // do not overlap. For example, a LoadI from Object+8 is independent from a StoreL into - // Object+12, no matter what the bases are. - if (st_offset != offset && st_offset != Type::OffsetBot) { - const int MAX_STORE = MAX2(BytesPerLong, (int)MaxVectorSize); - assert(mem->as_Store()->memory_size() <= MAX_STORE, ""); - if (st_offset >= offset + size_in_bytes || - st_offset <= offset - MAX_STORE || - st_offset <= offset - mem->as_Store()->memory_size()) { - // Success: The offsets are provably independent. - // (You may ask, why not just test st_offset != offset and be done? - // The answer is that stores of different sizes can co-exist - // in the same sequence of RawMem effects. We sometimes initialize - // a whole 'tile' of array elements with a single jint or jlong.) - mem = mem->in(MemNode::Memory); - continue; // (a) advance through the independent store - } - } - - // Same base and overlapping offsets, it seems provable that the accesses overlap, give up - if (st_base == base) { - break; - } - - // Try to prove that 2 different base nodes at compile time are different values at runtime - bool known_independent = false; - if (detect_ptr_independence(base, alloc, st_base, AllocateNode::Ideal_allocation(st_base), phase)) { - known_independent = true; - } - - if (known_independent) { - mem = mem->in(MemNode::Memory); - continue; // (a) advance through the independent store - } - } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) { - InitializeNode* st_init = mem->in(0)->as_Initialize(); - AllocateNode* st_alloc = st_init->allocation(); - if (st_alloc == nullptr) { - break; // something degenerated - } - bool known_identical = false; - bool known_independent = false; - if (alloc == st_alloc) { - known_identical = true; - } else if (alloc != nullptr) { - known_independent = true; - } else if (all_controls_dominate(this, st_alloc)) { - known_independent = true; - } - - if (known_independent) { - // The bases are provably independent: Either they are - // manifestly distinct allocations, or else the control - // of this load dominates the store's allocation. - if (alias_idx == Compile::AliasIdxRaw) { - mem = st_alloc->in(TypeFunc::Memory); - } else { - mem = st_init->memory(alias_idx); - } - continue; // (a) advance through independent store memory - } - - // (b) at this point, if we are not looking at a store initializing - // the same allocation we are loading from, we lose. - if (known_identical) { - // From caller, can_see_stored_value will consult find_captured_store. - return mem; // let caller handle steps (c), (d) - } - - } else if (find_previous_arraycopy(phase, alloc, mem, false) != nullptr) { - if (prev != mem) { - // Found an arraycopy but it doesn't affect that load - continue; - } - // Found an arraycopy that may affect that load - return mem; - } else if (mem->is_MergeMem()) { - mem = mem->as_MergeMem()->memory_at(alias_idx); - continue; - } else if (addr_t != nullptr && addr_t->is_known_instance_field()) { - // Can't use optimize_simple_memory_chain() since it needs PhaseGVN. - if (mem->is_Proj() && mem->in(0)->is_Call()) { - // ArrayCopyNodes processed here as well. - CallNode *call = mem->in(0)->as_Call(); - if (!call->may_modify(addr_t, phase)) { - mem = call->in(TypeFunc::Memory); - continue; // (a) advance through independent call memory - } - } else if (mem->is_Proj() && mem->in(0)->is_MemBar()) { - ArrayCopyNode* ac = nullptr; - if (ArrayCopyNode::may_modify(addr_t, mem->in(0)->as_MemBar(), phase, ac)) { - break; - } - mem = mem->in(0)->in(TypeFunc::Memory); - continue; // (a) advance through independent MemBar memory - } else if (mem->is_ClearArray()) { - if (ClearArrayNode::step_through(&mem, (uint)addr_t->instance_id(), phase)) { - // (the call updated 'mem' value) - continue; // (a) advance through independent allocation memory - } else { - // Can not bypass initialization of the instance - // we are looking for. - return mem; - } - } + Node* mem = in(MemNode::Memory); // start searching here... + int cnt = 50; // Cycle limiter + for (;; cnt--) { + // While we can dance past unrelated stores... + if (phase->type(mem) == Type::TOP) { + // Encounter a dead node + return phase->C->top(); + } else if (cnt <= 0) { + // Caught in cycle or a complicated dance? + return nullptr; + } else if (mem->is_Phi()) { + return nullptr; } - // Unless there is an explicit 'continue', we must bail out here, - // because 'mem' is an inscrutable memory state (e.g., a call). - break; + AccessAnalyzer::AccessIndependence independence = analyzer.detect_access_independence(mem); + if (independence.independent) { + // (a) advance through the independent store + mem = independence.mem; + assert(mem != nullptr, "must not be nullptr"); + } else { + // (b) found the store that this access observes if this is not null + // Otherwise, give up if it is null + return independence.mem; + } } return nullptr; // bail out @@ -918,6 +771,174 @@ uint8_t MemNode::barrier_data(const Node* n) { return 0; } +AccessAnalyzer::AccessAnalyzer(PhaseValues* phase, MemNode* n) + : _phase(phase), _n(n), _memory_size(n->memory_size()), _alias_idx(-1) { + Node* adr = _n->in(MemNode::Address); + _offset = 0; + _base = AddPNode::Ideal_base_and_offset(adr, _phase, _offset); + _maybe_raw = MemNode::check_if_adr_maybe_raw(adr); + _alloc = AllocateNode::Ideal_allocation(_base); + _adr_type = _n->adr_type(); + + if (_adr_type != nullptr && _adr_type->base() != TypePtr::AnyPtr) { + // Avoid the cases that will upset Compile::get_alias_index + _alias_idx = _phase->C->get_alias_index(_adr_type); + assert(_alias_idx != Compile::AliasIdxTop, "must not be a dead node"); + assert(_alias_idx != Compile::AliasIdxBot || !phase->C->do_aliasing(), "must not be a very wide access"); + } +} + +// Decide whether the memory accessed by '_n' and 'other' may overlap. This function may be used +// when we want to walk the memory graph to fold a load, or when we want to hoist a load above a +// loop when there are no stores that may overlap with the load inside the loop. +AccessAnalyzer::AccessIndependence AccessAnalyzer::detect_access_independence(Node* other) const { + assert(_phase->type(other) == Type::MEMORY, "must be a memory node %s", other->Name()); + assert(!other->is_Phi(), "caller must handle Phi"); + + if (_adr_type == nullptr) { + // This means the access is dead + return {false, _phase->C->top()}; + } else if (_adr_type->base() == TypePtr::AnyPtr) { + // An example for this case is an access into the memory address 0 performed using Unsafe + assert(_adr_type->ptr() == TypePtr::Null, "MemNode should never access a wide memory"); + return {false, nullptr}; + } + + if (_offset == Type::OffsetBot) { + // cannot unalias unless there are precise offsets + return {false, nullptr}; + } + + const TypeOopPtr* adr_oop_type = _adr_type->isa_oopptr(); + Node* prev = other; + if (other->is_Store()) { + Node* st_adr = other->in(MemNode::Address); + intptr_t st_offset = 0; + Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, _phase, st_offset); + if (st_base == nullptr) { + // inscrutable pointer + return {false, nullptr}; + } + + // If the bases are the same and the offsets are the same, it seems that this is the exact + // store we are looking for, the caller will check if the type of the store matches using + // MemNode::can_see_stored_value + if (st_base == _base && st_offset == _offset) { + return {false, other}; + } + + // If it is provable that the memory accessed by 'other' does not overlap the memory accessed + // by '_n', we may walk past 'other'. + // For raw accesses, 2 accesses are independent if they have the same base and the offsets + // say that they do not overlap. + // For heap accesses, 2 accesses are independent if either the bases are provably different + // at runtime or the offsets say that the accesses do not overlap. + if ((_maybe_raw || MemNode::check_if_adr_maybe_raw(st_adr)) && st_base != _base) { + // Raw accesses can only be provably independent if they have the same base + return {false, nullptr}; + } + + // If the offsets say that the accesses do not overlap, then it is provable that 'other' and + // '_n' do not overlap. For example, a LoadI from Object+8 is independent from a StoreL into + // Object+12, no matter what the bases are. + if (st_offset != _offset && st_offset != Type::OffsetBot) { + const int MAX_STORE = MAX2(BytesPerLong, (int)MaxVectorSize); + assert(other->as_Store()->memory_size() <= MAX_STORE, ""); + if (st_offset >= _offset + _memory_size || + st_offset <= _offset - MAX_STORE || + st_offset <= _offset - other->as_Store()->memory_size()) { + // Success: The offsets are provably independent. + // (You may ask, why not just test st_offset != offset and be done? + // The answer is that stores of different sizes can co-exist + // in the same sequence of RawMem effects. We sometimes initialize + // a whole 'tile' of array elements with a single jint or jlong.) + return {true, other->in(MemNode::Memory)}; + } + } + + // Same base and overlapping offsets, it seems provable that the accesses overlap, give up + if (st_base == _base) { + return {false, nullptr}; + } + + // Try to prove that 2 different base nodes at compile time are different values at runtime + bool known_independent = false; + if (MemNode::detect_ptr_independence(_base, _alloc, st_base, AllocateNode::Ideal_allocation(st_base), _phase)) { + known_independent = true; + } + + if (known_independent) { + return {true, other->in(MemNode::Memory)}; + } + } else if (other->is_Proj() && other->in(0)->is_Initialize()) { + InitializeNode* st_init = other->in(0)->as_Initialize(); + AllocateNode* st_alloc = st_init->allocation(); + if (st_alloc == nullptr) { + // Something degenerated + return {false, nullptr}; + } + bool known_identical = false; + bool known_independent = false; + if (_alloc == st_alloc) { + known_identical = true; + } else if (_alloc != nullptr) { + known_independent = true; + } else if (MemNode::all_controls_dominate(_n, st_alloc)) { + known_independent = true; + } + + if (known_independent) { + // The bases are provably independent: Either they are + // manifestly distinct allocations, or else the control + // of _n dominates the store's allocation. + if (_alias_idx == Compile::AliasIdxRaw) { + other = st_alloc->in(TypeFunc::Memory); + } else { + other = st_init->memory(_alias_idx); + } + return {true, other}; + } + + // If we are not looking at a store initializing the same + // allocation we are loading from, we lose. + if (known_identical) { + // From caller, can_see_stored_value will consult find_captured_store. + return {false, other}; + } + + } else if (_n->find_previous_arraycopy(_phase, _alloc, other, false) != nullptr) { + // Find an arraycopy that may or may not affect the MemNode + return {prev != other, other}; + } else if (other->is_MergeMem()) { + return {true, other->as_MergeMem()->memory_at(_alias_idx)}; + } else if (adr_oop_type != nullptr && adr_oop_type->is_known_instance_field()) { + // Can't use optimize_simple_memory_chain() since it needs PhaseGVN. + if (other->is_Proj() && other->in(0)->is_Call()) { + // ArrayCopyNodes processed here as well. + CallNode* call = other->in(0)->as_Call(); + if (!call->may_modify(adr_oop_type, _phase)) { + return {true, call->in(TypeFunc::Memory)}; + } + } else if (other->is_Proj() && other->in(0)->is_MemBar()) { + ArrayCopyNode* ac = nullptr; + if (!ArrayCopyNode::may_modify(adr_oop_type, other->in(0)->as_MemBar(), _phase, ac)) { + return {true, other->in(0)->in(TypeFunc::Memory)}; + } + } else if (other->is_ClearArray()) { + if (ClearArrayNode::step_through(&other, (uint)adr_oop_type->instance_id(), _phase)) { + // (the call updated 'other' value) + return {true, other}; + } else { + // Can not bypass initialization of the instance + // we are looking for. + return {false, other}; + } + } + } + + return {false, nullptr}; +} + //============================================================================= // Should LoadNode::Ideal() attempt to remove control edges? bool LoadNode::can_remove_control() const { @@ -989,6 +1010,7 @@ bool LoadNode::is_immutable_value(Node* adr) { Node* LoadNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, BasicType bt, MemOrd mo, ControlDependency control_dependency, bool require_atomic_access, bool unaligned, bool mismatched, bool unsafe, uint8_t barrier_data) { Compile* C = gvn.C; + assert(adr->is_top() || C->get_alias_index(gvn.type(adr)->is_ptr()) == C->get_alias_index(adr_type), "adr and adr_type must agree"); // sanity check the alias category against the created node type assert(!(adr_type->isa_oopptr() && @@ -1134,40 +1156,38 @@ Node* LoadNode::can_see_arraycopy_value(Node* st, PhaseGVN* phase) const { return nullptr; } - -//---------------------------can_see_stored_value------------------------------ // This routine exists to make sure this set of tests is done the same // everywhere. We need to make a coordinated change: first LoadNode::Ideal // will change the graph shape in a way which makes memory alive twice at the // same time (uses the Oracle model of aliasing), then some // LoadXNode::Identity will fold things back to the equivalence-class model // of aliasing. -Node* MemNode::can_see_stored_value(Node* st, PhaseValues* phase) const { +Node* LoadNode::can_see_stored_value_through_membars(Node* st, PhaseValues* phase) const { Node* ld_adr = in(MemNode::Address); - intptr_t ld_off = 0; - Node* ld_base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ld_off); - Node* ld_alloc = AllocateNode::Ideal_allocation(ld_base); const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr(); Compile::AliasType* atp = (tp != nullptr) ? phase->C->alias_type(tp) : nullptr; - // This is more general than load from boxing objects. + if (skip_through_membars(atp, tp, phase->C->eliminate_boxing())) { uint alias_idx = atp->index(); Node* result = nullptr; Node* current = st; - // Skip through chains of MemBarNodes checking the MergeMems for - // new states for the slice of this load. Stop once any other - // kind of node is encountered. Loads from final memory can skip - // through any kind of MemBar but normal loads shouldn't skip - // through MemBarAcquire since the could allow them to move out of - // a synchronized region. It is not safe to step over MemBarCPUOrder, - // because alias info above them may be inaccurate (e.g., due to - // mixed/mismatched unsafe accesses). + // Skip through chains of MemBarNodes checking the MergeMems for new states for the slice of + // this load. Stop once any other kind of node is encountered. + // + // In principle, folding a load is moving it up until it meets a matching store. + // + // store(ptr, v); store(ptr, v); store(ptr, v); + // membar1; -> membar1; -> load(ptr); + // membar2; load(ptr); membar1; + // load(ptr); membar2; membar2; + // + // So, we can decide which kinds of barriers we can walk past. It is not safe to step over + // MemBarCPUOrder, even if the memory is not rewritable, because alias info above them may be + // inaccurate (e.g., due to mixed/mismatched unsafe accesses). bool is_final_mem = !atp->is_rewritable(); while (current->is_Proj()) { int opc = current->in(0)->Opcode(); - if ((is_final_mem && (opc == Op_MemBarAcquire || - opc == Op_MemBarAcquireLock || - opc == Op_LoadFence)) || + if ((is_final_mem && (opc == Op_MemBarAcquire || opc == Op_MemBarAcquireLock || opc == Op_LoadFence)) || opc == Op_MemBarRelease || opc == Op_StoreFence || opc == Op_MemBarReleaseLock || @@ -1194,6 +1214,17 @@ Node* MemNode::can_see_stored_value(Node* st, PhaseValues* phase) const { } } + return can_see_stored_value(st, phase); +} + +// If st is a store to the same location as this, return the stored value +Node* MemNode::can_see_stored_value(Node* st, PhaseValues* phase) const { + Node* ld_adr = in(MemNode::Address); + intptr_t ld_off = 0; + Node* ld_base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ld_off); + Node* ld_alloc = AllocateNode::Ideal_allocation(ld_base); + const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr(); + // Loop around twice in the case Load -> Initialize -> Store. // (See PhaseIterGVN::add_users_to_worklist, which knows about this case.) for (int trip = 0; trip <= 1; trip++) { @@ -1322,7 +1353,7 @@ Node* LoadNode::Identity(PhaseGVN* phase) { // If the previous store-maker is the right kind of Store, and the store is // to the same address, then we are equal to the value stored. Node* mem = in(Memory); - Node* value = can_see_stored_value(mem, phase); + Node* value = can_see_stored_value_through_membars(mem, phase); if( value ) { // byte, short & char stores truncate naturally. // A load has to load the truncated value which requires @@ -1390,8 +1421,12 @@ Node* LoadNode::convert_to_unsigned_load(PhaseGVN& gvn) { assert(false, "no unsigned variant: %s", Name()); return nullptr; } + const Type* mem_t = gvn.type(in(MemNode::Address)); + if (mem_t == Type::TOP) { + return gvn.C->top(); + } return LoadNode::make(gvn, in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), - raw_adr_type(), rt, bt, _mo, _control_dependency, + mem_t->is_ptr(), rt, bt, _mo, _control_dependency, false /*require_atomic_access*/, is_unaligned_access(), is_mismatched_access()); } @@ -1410,8 +1445,12 @@ Node* LoadNode::convert_to_signed_load(PhaseGVN& gvn) { assert(false, "no signed variant: %s", Name()); return nullptr; } + const Type* mem_t = gvn.type(in(MemNode::Address)); + if (mem_t == Type::TOP) { + return gvn.C->top(); + } return LoadNode::make(gvn, in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), - raw_adr_type(), rt, bt, _mo, _control_dependency, + mem_t->is_ptr(), rt, bt, _mo, _control_dependency, false /*require_atomic_access*/, is_unaligned_access(), is_mismatched_access()); } @@ -1438,8 +1477,12 @@ Node* LoadNode::convert_to_reinterpret_load(PhaseGVN& gvn, const Type* rt) { const int op = Opcode(); bool require_atomic_access = (op == Op_LoadL && ((LoadLNode*)this)->require_atomic_access()) || (op == Op_LoadD && ((LoadDNode*)this)->require_atomic_access()); + const Type* mem_t = gvn.type(in(MemNode::Address)); + if (mem_t == Type::TOP) { + return gvn.C->top(); + } return LoadNode::make(gvn, in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), - raw_adr_type(), rt, bt, _mo, _control_dependency, + mem_t->is_ptr(), rt, bt, _mo, _control_dependency, require_atomic_access, is_unaligned_access(), is_mismatched); } @@ -1461,8 +1504,12 @@ Node* StoreNode::convert_to_reinterpret_store(PhaseGVN& gvn, Node* val, const Ty const int op = Opcode(); bool require_atomic_access = (op == Op_StoreL && ((StoreLNode*)this)->require_atomic_access()) || (op == Op_StoreD && ((StoreDNode*)this)->require_atomic_access()); + const Type* mem_t = gvn.type(in(MemNode::Address)); + if (mem_t == Type::TOP) { + return gvn.C->top(); + } StoreNode* st = StoreNode::make(gvn, in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), - raw_adr_type(), val, bt, _mo, require_atomic_access); + mem_t->is_ptr(), val, bt, _mo, require_atomic_access); bool is_mismatched = is_mismatched_access(); const TypeRawPtr* raw_type = gvn.type(in(MemNode::Memory))->isa_rawptr(); @@ -1624,11 +1671,15 @@ bool LoadNode::can_split_through_phi_base(PhaseGVN* phase) { intptr_t ignore = 0; Node* base = AddPNode::Ideal_base_and_offset(address, phase, ignore); + if (base == nullptr) { + return false; + } + if (base->is_CastPP()) { base = base->in(1); } - if (req() > 3 || base == nullptr || !base->is_Phi()) { + if (req() > 3 || !base->is_Phi()) { return false; } @@ -1808,7 +1859,7 @@ Node* LoadNode::split_through_phi(PhaseGVN* phase, bool ignore_missing_instance_ } if (base_is_phi && (base->in(0) == region)) { Node* base_x = base->in(i); // Clone address for loads from boxed objects. - Node* adr_x = phase->transform(new AddPNode(base_x,base_x,address->in(AddPNode::Offset))); + Node* adr_x = phase->transform(AddPNode::make_with_base(base_x, address->in(AddPNode::Offset))); x->set_req(Address, adr_x); } } @@ -1851,7 +1902,7 @@ Node* LoadNode::split_through_phi(PhaseGVN* phase, bool ignore_missing_instance_ } } if (x != the_clone && the_clone != nullptr) { - igvn->remove_dead_node(the_clone); + igvn->remove_dead_node(the_clone, PhaseIterGVN::NodeOrigin::Speculative); } phi->set_req(i, x); } @@ -2004,7 +2055,7 @@ Node *LoadNode::Ideal(PhaseGVN *phase, bool can_reshape) { // (c) See if we can fold up on the spot, but don't fold up here. // Fold-up might require truncation (for LoadB/LoadS/LoadUS) or // just return a prior value, which is done by Identity calls. - if (can_see_stored_value(prev_mem, phase)) { + if (can_see_stored_value_through_membars(prev_mem, phase)) { // Make ready for step (d): set_req_X(MemNode::Memory, prev_mem, phase); return this; @@ -2061,7 +2112,7 @@ const Type* LoadNode::Value(PhaseGVN* phase) const { Compile* C = phase->C; // If load can see a previous constant store, use that. - Node* value = can_see_stored_value(mem, phase); + Node* value = can_see_stored_value_through_membars(mem, phase); if (value != nullptr && value->is_Con()) { assert(value->bottom_type()->higher_equal(_type), "sanity"); return value->bottom_type(); @@ -2312,7 +2363,7 @@ uint LoadNode::match_edge(uint idx) const { // Node* LoadBNode::Ideal(PhaseGVN* phase, bool can_reshape) { Node* mem = in(MemNode::Memory); - Node* value = can_see_stored_value(mem,phase); + Node* value = can_see_stored_value_through_membars(mem, phase); if (value != nullptr) { Node* narrow = Compile::narrow_value(T_BYTE, value, _type, phase, false); if (narrow != value) { @@ -2325,7 +2376,7 @@ Node* LoadBNode::Ideal(PhaseGVN* phase, bool can_reshape) { const Type* LoadBNode::Value(PhaseGVN* phase) const { Node* mem = in(MemNode::Memory); - Node* value = can_see_stored_value(mem,phase); + Node* value = can_see_stored_value_through_membars(mem, phase); if (value != nullptr && value->is_Con() && !value->bottom_type()->higher_equal(_type)) { // If the input to the store does not fit with the load's result type, @@ -2346,7 +2397,7 @@ const Type* LoadBNode::Value(PhaseGVN* phase) const { // Node* LoadUBNode::Ideal(PhaseGVN* phase, bool can_reshape) { Node* mem = in(MemNode::Memory); - Node* value = can_see_stored_value(mem, phase); + Node* value = can_see_stored_value_through_membars(mem, phase); if (value != nullptr) { Node* narrow = Compile::narrow_value(T_BOOLEAN, value, _type, phase, false); if (narrow != value) { @@ -2359,7 +2410,7 @@ Node* LoadUBNode::Ideal(PhaseGVN* phase, bool can_reshape) { const Type* LoadUBNode::Value(PhaseGVN* phase) const { Node* mem = in(MemNode::Memory); - Node* value = can_see_stored_value(mem,phase); + Node* value = can_see_stored_value_through_membars(mem, phase); if (value != nullptr && value->is_Con() && !value->bottom_type()->higher_equal(_type)) { // If the input to the store does not fit with the load's result type, @@ -2380,7 +2431,7 @@ const Type* LoadUBNode::Value(PhaseGVN* phase) const { // Node* LoadUSNode::Ideal(PhaseGVN* phase, bool can_reshape) { Node* mem = in(MemNode::Memory); - Node* value = can_see_stored_value(mem,phase); + Node* value = can_see_stored_value_through_membars(mem, phase); if (value != nullptr) { Node* narrow = Compile::narrow_value(T_CHAR, value, _type, phase, false); if (narrow != value) { @@ -2393,7 +2444,7 @@ Node* LoadUSNode::Ideal(PhaseGVN* phase, bool can_reshape) { const Type* LoadUSNode::Value(PhaseGVN* phase) const { Node* mem = in(MemNode::Memory); - Node* value = can_see_stored_value(mem,phase); + Node* value = can_see_stored_value_through_membars(mem, phase); if (value != nullptr && value->is_Con() && !value->bottom_type()->higher_equal(_type)) { // If the input to the store does not fit with the load's result type, @@ -2414,7 +2465,7 @@ const Type* LoadUSNode::Value(PhaseGVN* phase) const { // Node* LoadSNode::Ideal(PhaseGVN* phase, bool can_reshape) { Node* mem = in(MemNode::Memory); - Node* value = can_see_stored_value(mem,phase); + Node* value = can_see_stored_value_through_membars(mem, phase); if (value != nullptr) { Node* narrow = Compile::narrow_value(T_SHORT, value, _type, phase, false); if (narrow != value) { @@ -2427,7 +2478,7 @@ Node* LoadSNode::Ideal(PhaseGVN* phase, bool can_reshape) { const Type* LoadSNode::Value(PhaseGVN* phase) const { Node* mem = in(MemNode::Memory); - Node* value = can_see_stored_value(mem,phase); + Node* value = can_see_stored_value_through_membars(mem, phase); if (value != nullptr && value->is_Con() && !value->bottom_type()->higher_equal(_type)) { // If the input to the store does not fit with the load's result type, @@ -2448,7 +2499,6 @@ Node* LoadKlassNode::make(PhaseGVN& gvn, Node* mem, Node* adr, const TypePtr* at assert(adr_type != nullptr, "expecting TypeKlassPtr"); #ifdef _LP64 if (adr_type->is_ptr_to_narrowklass()) { - assert(UseCompressedClassPointers, "no compressed klasses"); Node* load_klass = gvn.transform(new LoadNKlassNode(mem, adr, at, tk->make_narrowklass(), MemNode::unordered)); return new DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr()); } @@ -2757,6 +2807,7 @@ Node* LoadRangeNode::Identity(PhaseGVN* phase) { StoreNode* StoreNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt, MemOrd mo, bool require_atomic_access) { assert((mo == unordered || mo == release), "unexpected"); Compile* C = gvn.C; + assert(adr_type == nullptr || adr->is_top() || C->get_alias_index(gvn.type(adr)->is_ptr()) == C->get_alias_index(adr_type), "adr and adr_type must agree"); assert(C->get_alias_index(adr_type) != Compile::AliasIdxRaw || ctl != nullptr, "raw memory operations should have control edge"); @@ -2777,8 +2828,7 @@ StoreNode* StoreNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const val = gvn.transform(new EncodePNode(val, val->bottom_type()->make_narrowoop())); return new StoreNNode(ctl, mem, adr, adr_type, val, mo); } else if (adr->bottom_type()->is_ptr_to_narrowklass() || - (UseCompressedClassPointers && val->bottom_type()->isa_klassptr() && - adr->bottom_type()->isa_rawptr())) { + (val->bottom_type()->isa_klassptr() && adr->bottom_type()->isa_rawptr())) { val = gvn.transform(new EncodePKlassNode(val, val->bottom_type()->make_narrowklass())); return new StoreNKlassNode(ctl, mem, adr, adr_type, val, mo); } @@ -4035,7 +4085,7 @@ uint LoadStoreNode::ideal_reg() const { bool LoadStoreNode::result_not_used() const { for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { Node *x = fast_out(i); - if (x->Opcode() == Op_SCMemProj) { + if (x->Opcode() == Op_SCMemProj || x->is_ReachabilityFence()) { continue; } if (x->bottom_type() == TypeTuple::MEMBAR && @@ -4152,10 +4202,10 @@ Node *ClearArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) { Node *off = phase->MakeConX(BytesPerLong); mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false); count--; - while( count-- ) { + while (count--) { mem = phase->transform(mem); - adr = phase->transform(new AddPNode(base,adr,off)); - mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false); + adr = phase->transform(AddPNode::make_with_base(base, adr, off)); + mem = new StoreLNode(in(0), mem, adr, atp, zero, MemNode::unordered, false); } return mem; } @@ -4192,7 +4242,7 @@ Node* ClearArrayNode::make_address(Node* dest, Node* offset, bool raw_base, Phas // May be called as part of the initialization of a just allocated object base = phase->C->top(); } - return phase->transform(new AddPNode(base, dest, offset)); + return phase->transform(AddPNode::make_with_base(base, dest, offset)); } //----------------------------clear_memory------------------------------------- @@ -4310,7 +4360,9 @@ MemBarNode* MemBarNode::make(Compile* C, int opcode, int atp, Node* pn) { case Op_StoreStoreFence: return new StoreStoreFenceNode(C, atp, pn); case Op_MemBarAcquireLock: return new MemBarAcquireLockNode(C, atp, pn); case Op_MemBarReleaseLock: return new MemBarReleaseLockNode(C, atp, pn); + case Op_MemBarStoreLoad: return new MemBarStoreLoadNode(C, atp, pn); case Op_MemBarVolatile: return new MemBarVolatileNode(C, atp, pn); + case Op_MemBarFull: return new MemBarFullNode(C, atp, pn); case Op_MemBarCPUOrder: return new MemBarCPUOrderNode(C, atp, pn); case Op_OnSpinWait: return new OnSpinWaitNode(C, atp, pn); case Op_Initialize: return new InitializeNode(C, atp, pn); @@ -5001,8 +5053,7 @@ Node* InitializeNode::make_raw_address(intptr_t offset, Node* addr = in(RawAddress); if (offset != 0) { Compile* C = phase->C; - addr = phase->transform( new AddPNode(C->top(), addr, - phase->MakeConX(offset)) ); + addr = phase->transform(AddPNode::make_off_heap(addr, phase->MakeConX(offset))); } return addr; } @@ -5049,7 +5100,7 @@ Node* InitializeNode::capture_store(StoreNode* st, intptr_t start, else ins_req(i, C->top()); // build a new edge } - Node* new_st = st->clone(); + Node* new_st = st->clone_with_adr_type(TypeRawPtr::BOTTOM); BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); new_st->set_req(MemNode::Control, in(Control)); new_st->set_req(MemNode::Memory, prev_mem); diff --git a/src/hotspot/share/opto/memnode.hpp b/src/hotspot/share/opto/memnode.hpp index 39b1ee88333..8efb5521e7c 100644 --- a/src/hotspot/share/opto/memnode.hpp +++ b/src/hotspot/share/opto/memnode.hpp @@ -26,6 +26,7 @@ #ifndef SHARE_OPTO_MEMNODE_HPP #define SHARE_OPTO_MEMNODE_HPP +#include "memory/allocation.hpp" #include "opto/multnode.hpp" #include "opto/node.hpp" #include "opto/opcodes.hpp" @@ -46,6 +47,8 @@ private: bool _unsafe_access; // Access of unsafe origin. uint8_t _barrier_data; // Bit field with barrier information + friend class AccessAnalyzer; + protected: #ifdef ASSERT const TypePtr* _adr_type; // What kind of memory is being addressed? @@ -170,6 +173,53 @@ public: static void dump_adr_type(const TypePtr* adr_type, outputStream* st); virtual void dump_spec(outputStream *st) const; #endif + + MemNode* clone_with_adr_type(const TypePtr* adr_type) const { + MemNode* new_node = clone()->as_Mem(); +#ifdef ASSERT + new_node->_adr_type = adr_type; +#endif + return new_node; + } +}; + +// Analyze a MemNode to try to prove that it is independent from other memory accesses +class AccessAnalyzer : StackObj { +private: + PhaseValues* const _phase; + MemNode* const _n; + Node* _base; + intptr_t _offset; + const int _memory_size; + bool _maybe_raw; + AllocateNode* _alloc; + const TypePtr* _adr_type; + int _alias_idx; + +public: + AccessAnalyzer(PhaseValues* phase, MemNode* n); + + // The result of deciding whether a memory node 'other' writes into the memory which '_n' + // observes. + class AccessIndependence { + public: + // Whether 'other' writes into the memory which '_n' observes. This value is conservative, that + // is, it is only true when it is provable that the memory accessed by the nodes is + // non-overlapping. + bool independent; + + // If 'independent' is true, this is the memory input of 'other' that corresponds to the memory + // location that '_n' observes. For example, if 'other' is a StoreNode, then 'mem' is its + // memory input, if 'other' is a MergeMemNode, then 'mem' is the memory input corresponding to + // the alias class of '_n'. + // If 'independent' is false, + // - 'mem' is non-nullptr if it seems that 'other' writes to the exact memory location '_n' + // observes. + // - 'mem' is nullptr otherwise. + Node* mem; + }; + + AccessIndependence detect_access_independence(Node* other) const; }; //------------------------------LoadNode--------------------------------------- @@ -216,6 +266,7 @@ protected: const Type* const _type; // What kind of value is loaded? virtual Node* find_previous_arraycopy(PhaseValues* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const; + Node* can_see_stored_value_through_membars(Node* st, PhaseValues* phase) const; public: LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo, ControlDependency control_dependency) @@ -1271,6 +1322,13 @@ public: virtual int Opcode() const; }; +class MemBarStoreLoadNode : public MemBarNode { +public: + MemBarStoreLoadNode(Compile* C, int alias_idx, Node* precedent) + : MemBarNode(C, alias_idx, precedent) {} + virtual int Opcode() const; +}; + // Ordering between a volatile store and a following volatile load. // Requires multi-CPU visibility? class MemBarVolatileNode: public MemBarNode { @@ -1280,6 +1338,14 @@ public: virtual int Opcode() const; }; +// A full barrier blocks all loads and stores from moving across it +class MemBarFullNode : public MemBarNode { +public: + MemBarFullNode(Compile* C, int alias_idx, Node* precedent) + : MemBarNode(C, alias_idx, precedent) {} + virtual int Opcode() const; +}; + // Ordering within the same CPU. Used to order unsafe memory references // inside the compiler when we lack alias info. Not needed "outside" the // compiler because the CPU does all the ordering for us. diff --git a/src/hotspot/share/opto/mulnode.cpp b/src/hotspot/share/opto/mulnode.cpp index 9bdaa3b9f34..d7022b5f7ec 100644 --- a/src/hotspot/share/opto/mulnode.cpp +++ b/src/hotspot/share/opto/mulnode.cpp @@ -651,31 +651,19 @@ Node* AndINode::Identity(PhaseGVN* phase) { return in(1); } - Node* in1 = in(1); - uint op = in1->Opcode(); - const TypeInt* t2 = phase->type(in(2))->isa_int(); - if (t2 && t2->is_con()) { - int con = t2->get_con(); - // Masking off high bits which are always zero is useless. - const TypeInt* t1 = phase->type(in(1))->isa_int(); - if (t1 != nullptr && t1->_lo >= 0) { - jint t1_support = right_n_bits(1 + log2i_graceful(t1->_hi)); - if ((t1_support & con) == t1_support) - return in1; - } - // Masking off the high bits of a unsigned-shift-right is not - // needed either. - if (op == Op_URShiftI) { - const TypeInt* t12 = phase->type(in1->in(2))->isa_int(); - if (t12 && t12->is_con()) { // Shift is by a constant - int shift = t12->get_con(); - shift &= BitsPerJavaInteger - 1; // semantics of Java shifts - int mask = max_juint >> shift; - if ((mask & con) == mask) // If AND is useless, skip it - return in1; - } - } + const TypeInt* t1 = phase->type(in(1))->is_int(); + const TypeInt* t2 = phase->type(in(2))->is_int(); + + if ((~t1->_bits._ones & ~t2->_bits._zeros) == 0) { + // All bits that might be 0 in in1 are known to be 0 in in2 + return in(2); } + + if ((~t2->_bits._ones & ~t1->_bits._zeros) == 0) { + // All bits that might be 0 in in2 are known to be 0 in in1 + return in(1); + } + return MulNode::Identity(phase); } @@ -779,32 +767,19 @@ Node* AndLNode::Identity(PhaseGVN* phase) { return in(1); } - Node *usr = in(1); - const TypeLong *t2 = phase->type( in(2) )->isa_long(); - if( t2 && t2->is_con() ) { - jlong con = t2->get_con(); - // Masking off high bits which are always zero is useless. - const TypeLong* t1 = phase->type( in(1) )->isa_long(); - if (t1 != nullptr && t1->_lo >= 0) { - int bit_count = log2i_graceful(t1->_hi) + 1; - jlong t1_support = jlong(max_julong >> (BitsPerJavaLong - bit_count)); - if ((t1_support & con) == t1_support) - return usr; - } - uint lop = usr->Opcode(); - // Masking off the high bits of a unsigned-shift-right is not - // needed either. - if( lop == Op_URShiftL ) { - const TypeInt *t12 = phase->type( usr->in(2) )->isa_int(); - if( t12 && t12->is_con() ) { // Shift is by a constant - int shift = t12->get_con(); - shift &= BitsPerJavaLong - 1; // semantics of Java shifts - jlong mask = max_julong >> shift; - if( (mask&con) == mask ) // If AND is useless, skip it - return usr; - } - } + const TypeLong* t1 = phase->type(in(1))->is_long(); + const TypeLong* t2 = phase->type(in(2))->is_long(); + + if ((~t1->_bits._ones & ~t2->_bits._zeros) == 0) { + // All bits that might be 0 in in1 are known to be 0 in in2 + return in(2); } + + if ((~t2->_bits._ones & ~t1->_bits._zeros) == 0) { + // All bits that might be 0 in in2 are known to be 0 in in1 + return in(1); + } + return MulNode::Identity(phase); } @@ -1143,21 +1118,24 @@ const Type* LShiftNode::ValueIL(PhaseGVN* phase, BasicType bt) const { return t1; } - // Either input is BOTTOM ==> the result is BOTTOM - if ((t1 == TypeInteger::bottom(bt)) || (t2 == TypeInt::INT) || - (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM)) { + // If nothing is known about the shift amount then the result is BOTTOM + if (t2 == TypeInt::INT) { return TypeInteger::bottom(bt); } const TypeInteger* r1 = t1->is_integer(bt); // Handy access - const TypeInt* r2 = t2->is_int(); // Handy access + // Since the shift semantics in Java take into account only the bottom five + // bits for ints and the bottom six bits for longs, we can further constrain + // the range of values of the shift amount by ANDing with the right mask based + // on whether the type is int or long. + const TypeInt* mask = TypeInt::make(bits_per_java_integer(bt) - 1); + const TypeInt* r2 = RangeInference::infer_and(t2->is_int(), mask); if (!r2->is_con()) { return TypeInteger::bottom(bt); } uint shift = r2->get_con(); - shift &= bits_per_java_integer(bt) - 1; // semantics of Java shifts // Shift by a multiple of 32/64 does nothing: if (shift == 0) { return t1; @@ -1166,22 +1144,20 @@ const Type* LShiftNode::ValueIL(PhaseGVN* phase, BasicType bt) const { // If the shift is a constant, shift the bounds of the type, // unless this could lead to an overflow. if (!r1->is_con()) { - jlong lo = r1->lo_as_long(), hi = r1->hi_as_long(); #ifdef ASSERT if (bt == T_INT) { + jlong lo = r1->lo_as_long(), hi = r1->hi_as_long(); jint lo_int = r1->is_int()->_lo, hi_int = r1->is_int()->_hi; assert((java_shift_right(java_shift_left(lo, shift, bt), shift, bt) == lo) == (((lo_int << shift) >> shift) == lo_int), "inconsistent"); assert((java_shift_right(java_shift_left(hi, shift, bt), shift, bt) == hi) == (((hi_int << shift) >> shift) == hi_int), "inconsistent"); } #endif - if (java_shift_right(java_shift_left(lo, shift, bt), shift, bt) == lo && - java_shift_right(java_shift_left(hi, shift, bt), shift, bt) == hi) { - // No overflow. The range shifts up cleanly. - return TypeInteger::make(java_shift_left(lo, shift, bt), - java_shift_left(hi, shift, bt), - MAX2(r1->_widen, r2->_widen), bt); + + if (bt == T_INT) { + return RangeInference::infer_lshift(r1->is_int(), shift); } - return TypeInteger::bottom(bt); + + return RangeInference::infer_lshift(r1->is_long(), shift); } return TypeInteger::make(java_shift_left(r1->get_con_as_long(bt), shift, bt), bt); @@ -1539,15 +1515,20 @@ Node* URShiftINode::Ideal(PhaseGVN* phase, bool can_reshape) { Node *add = in(1); if (in1_op == Op_AddI) { Node *lshl = add->in(1); + Node *y = add->in(2); + if (lshl->Opcode() != Op_LShiftI) { + lshl = add->in(2); + y = add->in(1); + } // Compare shift counts by value, not by node pointer, to also match a not-yet-normalized // negative constant (e.g. -1 vs 31) int lshl_con = 0; if (lshl->Opcode() == Op_LShiftI && const_shift_count(phase, lshl, &lshl_con) && (lshl_con & (BitsPerJavaInteger - 1)) == con) { - Node *y_z = phase->transform( new URShiftINode(add->in(2),in(2)) ); - Node *sum = phase->transform( new AddINode( lshl->in(1), y_z ) ); - return new AndINode( sum, phase->intcon(mask) ); + Node *y_z = phase->transform(new URShiftINode(y, in(2))); + Node *sum = phase->transform(new AddINode(lshl->in(1), y_z)); + return new AndINode(sum, phase->intcon(mask)); } } @@ -1699,13 +1680,18 @@ Node* URShiftLNode::Ideal(PhaseGVN* phase, bool can_reshape) { const TypeInt *t2 = phase->type(in(2))->isa_int(); if (add->Opcode() == Op_AddL) { Node *lshl = add->in(1); + Node *y = add->in(2); + if (lshl->Opcode() != Op_LShiftL) { + lshl = add->in(2); + y = add->in(1); + } // Compare shift counts by value, not by node pointer, to also match a not-yet-normalized // negative constant (e.g. -1 vs 63) int lshl_con = 0; if (lshl->Opcode() == Op_LShiftL && const_shift_count(phase, lshl, &lshl_con) && (lshl_con & (BitsPerJavaLong - 1)) == con) { - Node* y_z = phase->transform(new URShiftLNode(add->in(2), in(2))); + Node* y_z = phase->transform(new URShiftLNode(y, in(2))); Node* sum = phase->transform(new AddLNode(lshl->in(1), y_z)); return new AndLNode(sum, phase->longcon(mask)); } diff --git a/src/hotspot/share/opto/narrowptrnode.cpp b/src/hotspot/share/opto/narrowptrnode.cpp index 7f86b8caecf..8b91bfaa944 100644 --- a/src/hotspot/share/opto/narrowptrnode.cpp +++ b/src/hotspot/share/opto/narrowptrnode.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -102,7 +102,7 @@ const Type* EncodePKlassNode::Value(PhaseGVN* phase) const { if (t == Type::TOP) return Type::TOP; assert (t != TypePtr::NULL_PTR, "null klass?"); - assert(UseCompressedClassPointers && t->isa_klassptr(), "only klass ptr here"); + assert(t->isa_klassptr(), "only klass ptr here"); return t->make_narrowklass(); } diff --git a/src/hotspot/share/opto/node.cpp b/src/hotspot/share/opto/node.cpp index cb5795a1250..3eafd97d7c1 100644 --- a/src/hotspot/share/opto/node.cpp +++ b/src/hotspot/share/opto/node.cpp @@ -38,6 +38,7 @@ #include "opto/matcher.hpp" #include "opto/node.hpp" #include "opto/opcodes.hpp" +#include "opto/reachability.hpp" #include "opto/regmask.hpp" #include "opto/rootnode.hpp" #include "opto/type.hpp" @@ -503,6 +504,9 @@ Node *Node::clone() const { if (is_expensive()) { C->add_expensive_node(n); } + if (is_ReachabilityFence()) { + C->add_reachability_fence(n->as_ReachabilityFence()); + } if (for_post_loop_opts_igvn()) { // Don't add cloned node to Compile::_for_post_loop_opts_igvn list automatically. // If it is applicable, it will happen anyway when the cloned node is registered with IGVN. @@ -622,6 +626,9 @@ void Node::destruct(PhaseValues* phase) { if (is_expensive()) { compile->remove_expensive_node(this); } + if (is_ReachabilityFence()) { + compile->remove_reachability_fence(as_ReachabilityFence()); + } if (is_OpaqueTemplateAssertionPredicate()) { compile->remove_template_assertion_predicate_opaque(as_OpaqueTemplateAssertionPredicate()); } @@ -2994,6 +3001,25 @@ bool Node::is_data_proj_of_pure_function(const Node* maybe_pure_function) const return Opcode() == Op_Proj && as_Proj()->_con == TypeFunc::Parms && maybe_pure_function->is_CallLeafPure(); } +//--------------------------has_non_debug_uses------------------------------ +// Checks whether the node has any non-debug uses or not. +bool Node::has_non_debug_uses() const { + for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { + Node* u = fast_out(i); + if (u->is_SafePoint()) { + if (u->is_Call() && u->as_Call()->has_non_debug_use(this)) { + return true; + } + // Non-call safepoints have only debug uses. + } else if (u->is_ReachabilityFence()) { + // Reachability fence is treated as debug use. + } else { + return true; // everything else is conservatively treated as non-debug use + } + } + return false; // no non-debug uses found +} + //============================================================================= //------------------------------yank------------------------------------------- // Find and remove diff --git a/src/hotspot/share/opto/node.hpp b/src/hotspot/share/opto/node.hpp index 46b89aa2c5f..8c6622e643e 100644 --- a/src/hotspot/share/opto/node.hpp +++ b/src/hotspot/share/opto/node.hpp @@ -168,6 +168,7 @@ class Pipeline; class PopulateIndexNode; class ProjNode; class RangeCheckNode; +class ReachabilityFenceNode; class ReductionNode; class RegMask; class RegionNode; @@ -452,6 +453,9 @@ public: // Check whether node has become unreachable bool is_unreachable(PhaseIterGVN &igvn) const; + // Does the node have any immediate non-debug uses? + bool has_non_debug_uses() const; + // Set a required input edge, also updates corresponding output edge void add_req( Node *n ); // Append a NEW required input void add_req( Node *n0, Node *n1 ) { @@ -824,6 +828,7 @@ public: DEFINE_CLASS_ID(Move, Node, 20) DEFINE_CLASS_ID(LShift, Node, 21) DEFINE_CLASS_ID(Neg, Node, 22) + DEFINE_CLASS_ID(ReachabilityFence, Node, 23) _max_classes = ClassMask_Neg }; @@ -1013,6 +1018,7 @@ public: DEFINE_CLASS_QUERY(PCTable) DEFINE_CLASS_QUERY(Phi) DEFINE_CLASS_QUERY(Proj) + DEFINE_CLASS_QUERY(ReachabilityFence) DEFINE_CLASS_QUERY(Reduction) DEFINE_CLASS_QUERY(Region) DEFINE_CLASS_QUERY(Root) @@ -1180,6 +1186,7 @@ public: return nullptr; } assert(!res->depends_only_on_test(), "the result must not depends_only_on_test"); + assert(Opcode() == res->Opcode(), "pinning must result in the same kind of node %s - %s", Name(), res->Name()); return res; } diff --git a/src/hotspot/share/opto/output.cpp b/src/hotspot/share/opto/output.cpp index 136fc8ac864..a70620fac5b 100644 --- a/src/hotspot/share/opto/output.cpp +++ b/src/hotspot/share/opto/output.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -363,15 +363,6 @@ bool PhaseOutput::need_stack_bang(int frame_size_in_bytes) const { DEBUG_ONLY(|| true))); } -bool PhaseOutput::need_register_stack_bang() const { - // Determine if we need to generate a register stack overflow check. - // This is only used on architectures which have split register - // and memory stacks. - // Bang if the method is not a stub function and has java calls - return (C->stub_function() == nullptr && C->has_java_calls()); -} - - // Compute the size of first NumberOfLoopInstrToAlign instructions at the top // of a loop. When aligning a loop we need to provide enough instructions // in cpu's fetch buffer to feed decoders. The loop alignment could be @@ -2923,17 +2914,30 @@ void Scheduling::ComputeRegisterAntidependencies(Block *b) { Node *m = b->get_node(i); - // Add precedence edge from following safepoint to use of derived pointer - if( last_safept_node != end_node && + if (last_safept_node != end_node && m != last_safept_node) { + bool need_safept_prec = false; + // Add precedence edge from following safepoint to use of derived pointer for (uint k = 1; k < m->req(); k++) { const Type *t = m->in(k)->bottom_type(); - if( t->isa_oop_ptr() && - t->is_ptr()->offset() != 0 ) { - last_safept_node->add_prec( m ); + if (t->isa_oop_ptr() && + t->is_ptr()->offset() != 0) { + need_safept_prec = true; break; } } + // A CheckCastPP whose input is still RawPtr must stay above the following safepoint. + // Otherwise post-regalloc block-local scheduling can leave a live raw oop at the safepoint. + if (!need_safept_prec && m->is_Mach() && + m->as_Mach()->ideal_Opcode() == Op_CheckCastPP) { + Node* def = m->in(1); + if (def != nullptr && def->bottom_type()->base() == Type::RawPtr) { + need_safept_prec = true; + } + } + if (need_safept_prec) { + last_safept_node->add_prec(m); + } } if( n->jvms() ) { // Precedence edge from derived to safept diff --git a/src/hotspot/share/opto/output.hpp b/src/hotspot/share/opto/output.hpp index 432ad3638b2..5cca59ea0e4 100644 --- a/src/hotspot/share/opto/output.hpp +++ b/src/hotspot/share/opto/output.hpp @@ -109,7 +109,6 @@ public: // Convert Nodes to instruction bits and pass off to the VM void Output(); bool need_stack_bang(int frame_size_in_bytes) const; - bool need_register_stack_bang() const; void compute_loop_first_inst_sizes(); void install_code(ciMethod* target, diff --git a/src/hotspot/share/opto/parse.hpp b/src/hotspot/share/opto/parse.hpp index 397a7796f88..5118019fc31 100644 --- a/src/hotspot/share/opto/parse.hpp +++ b/src/hotspot/share/opto/parse.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -356,6 +356,7 @@ class Parse : public GraphKit { bool _wrote_stable; // Did we write a @Stable field? bool _wrote_fields; // Did we write any field? Node* _alloc_with_final_or_stable; // An allocation node with final or @Stable field + Node* _stress_rf_hook; // StressReachabilityFences support // Variables which track Java semantics during bytecode parsing: @@ -474,8 +475,8 @@ class Parse : public GraphKit { void merge( int target_bci); // Same as plain merge, except that it allocates a new path number. void merge_new_path( int target_bci); - // Merge the current mapping into an exception handler. - void merge_exception(int target_bci); + // Push the exception oop and merge the current mapping into an exception handler. + void push_and_merge_exception(int target_bci, Node* ex_oop); // Helper: Merge the current mapping into the given basic block void merge_common(Block* target, int pnum); // Helper functions for merging individual cells. diff --git a/src/hotspot/share/opto/parse1.cpp b/src/hotspot/share/opto/parse1.cpp index 2f699650037..6a400631bff 100644 --- a/src/hotspot/share/opto/parse1.cpp +++ b/src/hotspot/share/opto/parse1.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -104,9 +104,9 @@ void Parse::print_statistics() { Node *Parse::fetch_interpreter_state(int index, BasicType bt, Node* local_addrs) { - Node *mem = memory(Compile::AliasIdxRaw); - Node *adr = basic_plus_adr(top(), local_addrs, -index*wordSize); - Node *ctl = control(); + Node* mem = memory(Compile::AliasIdxRaw); + Node* adr = off_heap_plus_addr(local_addrs, -index*wordSize); + Node* ctl = control(); // Very similar to LoadNode::make, except we handle un-aligned longs and // doubles on Sparc. Intel can handle them just fine directly. @@ -120,7 +120,7 @@ Node *Parse::fetch_interpreter_state(int index, case T_DOUBLE: { // Since arguments are in reverse order, the argument address 'adr' // refers to the back half of the long/double. Recompute adr. - adr = basic_plus_adr(top(), local_addrs, -(index+1)*wordSize); + adr = off_heap_plus_addr(local_addrs, -(index+1)*wordSize); if (Matcher::misaligned_doubles_ok) { l = (bt == T_DOUBLE) ? (Node*)new LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered) @@ -219,7 +219,7 @@ void Parse::load_interpreter_state(Node* osr_buf) { // Commute monitors from interpreter frame to compiler frame. assert(jvms()->monitor_depth() == 0, "should be no active locks at beginning of osr"); int mcnt = osr_block->flow()->monitor_count(); - Node *monitors_addr = basic_plus_adr(top(), osr_buf, (max_locals+mcnt*2-1)*wordSize); + Node* monitors_addr = off_heap_plus_addr(osr_buf, (max_locals+mcnt*2-1)*wordSize); for (index = 0; index < mcnt; index++) { // Make a BoxLockNode for the monitor. BoxLockNode* osr_box = new BoxLockNode(next_monitor()); @@ -270,7 +270,7 @@ void Parse::load_interpreter_state(Node* osr_buf) { } // Extract the needed locals from the interpreter frame. - Node *locals_addr = basic_plus_adr(top(), osr_buf, (max_locals-1)*wordSize); + Node* locals_addr = off_heap_plus_addr(osr_buf, (max_locals-1)*wordSize); // find all the locals that the interpreter thinks contain live oops const ResourceBitMap live_oops = method()->live_local_oops_at_bci(osr_bci()); @@ -369,6 +369,15 @@ void Parse::load_interpreter_state(Node* osr_buf) { continue; } set_local(index, check_interpreter_type(l, type, bad_type_exit)); + if (StressReachabilityFences && type->isa_oopptr() != nullptr) { + // Keep all oop locals alive until the method returns as if there are + // reachability fences for them at the end of the method. + Node* loc = local(index); + if (loc->bottom_type() != TypePtr::NULL_PTR) { + assert(loc->bottom_type()->isa_oopptr() != nullptr, "%s", Type::str(loc->bottom_type())); + _stress_rf_hook->add_req(loc); + } + } } for (index = 0; index < sp(); index++) { @@ -377,6 +386,15 @@ void Parse::load_interpreter_state(Node* osr_buf) { if (l->is_top()) continue; // nothing here const Type *type = osr_block->stack_type_at(index); set_stack(index, check_interpreter_type(l, type, bad_type_exit)); + if (StressReachabilityFences && type->isa_oopptr() != nullptr) { + // Keep all oops on stack alive until the method returns as if there are + // reachability fences for them at the end of the method. + Node* stk = stack(index); + if (stk->bottom_type() != TypePtr::NULL_PTR) { + assert(stk->bottom_type()->isa_oopptr() != nullptr, "%s", Type::str(stk->bottom_type())); + _stress_rf_hook->add_req(stk); + } + } } if (bad_type_exit->control()->req() > 1) { @@ -411,6 +429,7 @@ Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses) _wrote_stable = false; _wrote_fields = false; _alloc_with_final_or_stable = nullptr; + _stress_rf_hook = (StressReachabilityFences ? new Node(1) : nullptr); _block = nullptr; _first_return = true; _replaced_nodes_for_exceptions = false; @@ -642,6 +661,11 @@ Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses) if (log) log->done("parse nodes='%d' live='%d' memory='%zu'", C->unique(), C->live_nodes(), C->node_arena()->used()); + + if (StressReachabilityFences) { + _stress_rf_hook->destruct(&_gvn); + _stress_rf_hook = nullptr; + } } //---------------------------do_all_blocks------------------------------------- @@ -1194,6 +1218,14 @@ SafePointNode* Parse::create_entry_map() { return entry_map; } +//-----------------------is_auto_boxed_primitive------------------------------ +// Helper method to detect auto-boxed primitives (result of valueOf() call). +static bool is_auto_boxed_primitive(Node* n) { + return (n->is_Proj() && n->as_Proj()->_con == TypeFunc::Parms && + n->in(0)->is_CallJava() && + n->in(0)->as_CallJava()->method()->is_boxing_method()); +} + //-----------------------------do_method_entry-------------------------------- // Emit any code needed in the pseudo-block before BCI zero. // The main thing to do is lock the receiver of a synchronized method. @@ -1207,6 +1239,19 @@ void Parse::do_method_entry() { make_dtrace_method_entry(method()); } + if (StressReachabilityFences) { + // Keep all oop arguments alive until the method returns as if there are + // reachability fences for them at the end of the method. + int max_locals = jvms()->loc_size(); + for (int idx = 0; idx < max_locals; idx++) { + Node* loc = local(idx); + if (loc->bottom_type()->isa_oopptr() != nullptr && + !is_auto_boxed_primitive(loc)) { // ignore auto-boxed primitives + _stress_rf_hook->add_req(loc); + } + } + } + #ifdef ASSERT // Narrow receiver type when it is too broad for the method being parsed. if (!method()->is_static()) { @@ -1651,9 +1696,14 @@ void Parse::merge_new_path(int target_bci) { } //-------------------------merge_exception------------------------------------- -// Merge the current mapping into the basic block starting at bci -// The ex_oop must be pushed on the stack, unlike throw_to_exit. -void Parse::merge_exception(int target_bci) { +// Push the given ex_oop onto the stack, then merge the current mapping into +// the basic block starting at target_bci. +void Parse::push_and_merge_exception(int target_bci, Node* ex_oop) { + // Add the safepoint before trimming the stack and pushing the exception oop. + // We could add the safepoint after, but then the bci would also need to be + // advanced to target_bci first, so the stack state matches. + maybe_add_safepoint(target_bci); + push_ex_oop(ex_oop); // Push exception oop for handler #ifdef ASSERT if (target_bci <= bci()) { C->set_exception_backedge(); @@ -2127,7 +2177,7 @@ void Parse::call_register_finalizer() { Node* klass_addr = basic_plus_adr( receiver, receiver, oopDesc::klass_offset_in_bytes() ); Node* klass = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(), klass_addr, TypeInstPtr::KLASS)); - Node* access_flags_addr = basic_plus_adr(top(), klass, in_bytes(Klass::misc_flags_offset())); + Node* access_flags_addr = off_heap_plus_addr(klass, in_bytes(Klass::misc_flags_offset())); Node* access_flags = make_load(nullptr, access_flags_addr, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered); Node* mask = _gvn.transform(new AndINode(access_flags, intcon(KlassFlags::_misc_has_finalizer))); @@ -2192,6 +2242,15 @@ void Parse::return_current(Node* value) { call_register_finalizer(); } + if (StressReachabilityFences) { + // Insert reachability fences for all oop arguments at the end of the method. + for (uint i = 1; i < _stress_rf_hook->req(); i++) { + Node* referent = _stress_rf_hook->in(i); + assert(referent->bottom_type()->isa_oopptr(), "%s", Type::str(referent->bottom_type())); + insert_reachability_fence(referent); + } + } + // Do not set_parse_bci, so that return goo is credited to the return insn. set_bci(InvocationEntryBci); if (method()->is_synchronized()) { @@ -2273,9 +2332,9 @@ void Parse::add_safepoint() { sfpnt->init_req(TypeFunc::FramePtr , top() ); // Create a node for the polling address - Node *polladr; - Node *thread = _gvn.transform(new ThreadLocalNode()); - Node *polling_page_load_addr = _gvn.transform(basic_plus_adr(top(), thread, in_bytes(JavaThread::polling_page_offset()))); + Node* polladr; + Node* thread = _gvn.transform(new ThreadLocalNode()); + Node* polling_page_load_addr = _gvn.transform(off_heap_plus_addr(thread, in_bytes(JavaThread::polling_page_offset()))); polladr = make_load(control(), polling_page_load_addr, TypeRawPtr::BOTTOM, T_ADDRESS, MemNode::unordered); sfpnt->init_req(TypeFunc::Parms+0, _gvn.transform(polladr)); diff --git a/src/hotspot/share/opto/parse2.cpp b/src/hotspot/share/opto/parse2.cpp index eac2b3e863a..d732e6f04e1 100644 --- a/src/hotspot/share/opto/parse2.cpp +++ b/src/hotspot/share/opto/parse2.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -966,12 +966,28 @@ void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, _max_switch_depth = 0; _est_switch_depth = log2i_graceful((hi - lo + 1) - 1) + 1; } + SwitchRange* orig_lo = lo; + SwitchRange* orig_hi = hi; #endif - assert(lo <= hi, "must be a non-empty set of ranges"); - if (lo == hi) { - jump_if_always_fork(lo->dest(), trim_ranges && lo->cnt() == 0); - } else { + // The lower-range processing is done iteratively to avoid O(N) stack depth + // when the profiling-based pivot repeatedly selects mid==lo (JDK-8366138). + // The upper-range processing remains recursive but is only reached for + // balanced splits, bounding its depth to O(log N). + // Termination: every iteration either exits or strictly decreases hi-lo: + // lo == mid && mid < hi, increments lo + // lo < mid <= hi, sets hi = mid - 1. + for (int depth = switch_depth;; depth++) { +#ifndef PRODUCT + _max_switch_depth = MAX2(depth, _max_switch_depth); +#endif + + assert(lo <= hi, "must be a non-empty set of ranges"); + if (lo == hi) { + jump_if_always_fork(lo->dest(), trim_ranges && lo->cnt() == 0); + break; + } + assert(lo->hi() == (lo+1)->lo()-1, "contiguous ranges"); assert(hi->lo() == (hi-1)->hi()+1, "contiguous ranges"); @@ -981,7 +997,12 @@ void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, float total_cnt = sum_of_cnts(lo, hi); int nr = hi - lo + 1; - if (UseSwitchProfiling) { + // With total_cnt==0 the profiling pivot degenerates to mid==lo + // (0 >= 0/2), producing a linear chain of If nodes instead of a + // balanced tree. A balanced tree is strictly better here: all paths + // are cold, so a balanced split gives fewer comparisons at runtime + // and avoids pathological memory usage in the optimizer. + if (UseSwitchProfiling && total_cnt > 0) { // Don't keep the binary search tree balanced: pick up mid point // that split frequencies in half. float cnt = 0; @@ -1002,7 +1023,7 @@ void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, assert(nr != 2 || mid == hi, "should pick higher of 2"); assert(nr != 3 || mid == hi-1, "should pick middle of 3"); } - + assert(mid != nullptr, "mid must be set"); Node *test_val = _gvn.intcon(mid == lo ? mid->hi() : mid->lo()); @@ -1025,7 +1046,7 @@ void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, Node *iffalse = _gvn.transform( new IfFalseNode(iff_lt) ); { PreserveJVMState pjvms(this); set_control(iffalse); - jump_switch_ranges(key_val, mid+1, hi, switch_depth+1); + jump_switch_ranges(key_val, mid+1, hi, depth+1); } set_control(iftrue); } @@ -1043,21 +1064,22 @@ void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, Node *iffalse = _gvn.transform( new IfFalseNode(iff_ge) ); { PreserveJVMState pjvms(this); set_control(iftrue); - jump_switch_ranges(key_val, mid == lo ? mid+1 : mid, hi, switch_depth+1); + jump_switch_ranges(key_val, mid == lo ? mid+1 : mid, hi, depth+1); } set_control(iffalse); } } - // in any case, process the lower range + // Process the lower range: iterate instead of recursing. if (mid == lo) { if (mid->is_singleton()) { - jump_switch_ranges(key_val, lo+1, hi, switch_depth+1); + lo++; } else { jump_if_always_fork(lo->dest(), trim_ranges && lo->cnt() == 0); + break; } } else { - jump_switch_ranges(key_val, lo, mid-1, switch_depth+1); + hi = mid - 1; } } @@ -1072,23 +1094,22 @@ void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, } #ifndef PRODUCT - _max_switch_depth = MAX2(switch_depth, _max_switch_depth); if (TraceOptoParse && Verbose && WizardMode && switch_depth == 0) { SwitchRange* r; int nsing = 0; - for( r = lo; r <= hi; r++ ) { + for (r = orig_lo; r <= orig_hi; r++) { if( r->is_singleton() ) nsing++; } tty->print(">>> "); _method->print_short_name(); tty->print_cr(" switch decision tree"); tty->print_cr(" %d ranges (%d singletons), max_depth=%d, est_depth=%d", - (int) (hi-lo+1), nsing, _max_switch_depth, _est_switch_depth); + (int) (orig_hi-orig_lo+1), nsing, _max_switch_depth, _est_switch_depth); if (_max_switch_depth > _est_switch_depth) { tty->print_cr("******** BAD SWITCH DEPTH ********"); } tty->print(" "); - for( r = lo; r <= hi; r++ ) { + for (r = orig_lo; r <= orig_hi; r++) { r->print(); } tty->cr(); @@ -1736,6 +1757,12 @@ static bool match_type_check(PhaseGVN& gvn, // Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq]) // or the narrowOop equivalent. (*obj) = extract_obj_from_klass_load(&gvn, val); + // Some klass comparisons are not directly in the form + // Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq]), + // e.g. Bool(CmpP(CastPP(LoadKlass(...)), ConP(klass)), [eq]). + // These patterns with nullable klasses arise from example from + // load_array_klass_from_mirror. + if (*obj == nullptr) { return false; } (*cast_type) = tcon->isa_klassptr()->as_instance_type(); return true; // found } @@ -1776,8 +1803,8 @@ static bool match_type_check(PhaseGVN& gvn, assert(idx == 1 || idx == 2, ""); Node* vcon = val->in(idx); - assert(val->find_edge(con) > 0, ""); if ((btest == BoolTest::eq && vcon == con) || (btest == BoolTest::ne && vcon != con)) { + assert(val->find_edge(con) > 0, "mismatch"); SubTypeCheckNode* sub = b1->in(1)->as_SubTypeCheck(); Node* obj_or_subklass = sub->in(SubTypeCheckNode::ObjOrSubKlass); Node* superklass = sub->in(SubTypeCheckNode::SuperKlass); @@ -1806,17 +1833,21 @@ void Parse::sharpen_type_after_if(BoolTest::mask btest, &obj, &cast_type)) { assert(obj != nullptr && cast_type != nullptr, "missing type check info"); const Type* obj_type = _gvn.type(obj); - const TypeOopPtr* tboth = obj_type->join_speculative(cast_type)->isa_oopptr(); - if (tboth != nullptr && tboth != obj_type && tboth->higher_equal(obj_type)) { + const Type* tboth = obj_type->filter_speculative(cast_type); + assert(tboth->higher_equal(obj_type) && tboth->higher_equal(cast_type), "sanity"); + if (tboth == Type::TOP && KillPathsReachableByDeadTypeNode) { + // Let dead type node cleaning logic prune effectively dead path for us. + // CheckCastPP::Value() == TOP and it will trigger the cleanup during GVN. + // Don't materialize the cast when cleanup is disabled, because + // it kills data and control leaving IR in broken state. + tboth = cast_type; + } + if (tboth != Type::TOP && tboth != obj_type) { int obj_in_map = map()->find_edge(obj); - JVMState* jvms = this->jvms(); if (obj_in_map >= 0 && - (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) { + (jvms()->is_loc(obj_in_map) || jvms()->is_stk(obj_in_map))) { TypeNode* ccast = new CheckCastPPNode(control(), obj, tboth); - const Type* tcc = ccast->as_Type()->type(); - assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve"); - // Delay transform() call to allow recovery of pre-cast value - // at the control merge. + // Delay transform() call to allow recovery of pre-cast value at the control merge. _gvn.set_type_bottom(ccast); record_for_igvn(ccast); // Here's the payoff. diff --git a/src/hotspot/share/opto/parseHelper.cpp b/src/hotspot/share/opto/parseHelper.cpp index 2e8fca68a8d..232f5d6c89a 100644 --- a/src/hotspot/share/opto/parseHelper.cpp +++ b/src/hotspot/share/opto/parseHelper.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -220,7 +220,7 @@ void Parse::array_store_check() { // Extract the array element class int element_klass_offset = in_bytes(ObjArrayKlass::element_klass_offset()); - Node* p2 = basic_plus_adr(top(), array_klass, element_klass_offset); + Node* p2 = off_heap_plus_addr(array_klass, element_klass_offset); Node* a_e_klass = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(), p2, tak)); assert(array_klass->is_Con() == a_e_klass->is_Con() || StressReflectiveCode, "a constant array type must come with a constant element type"); diff --git a/src/hotspot/share/opto/phase.cpp b/src/hotspot/share/opto/phase.cpp index 5603033ce69..3f1866990e2 100644 --- a/src/hotspot/share/opto/phase.cpp +++ b/src/hotspot/share/opto/phase.cpp @@ -90,6 +90,9 @@ void Phase::print_timers() { tty->print_cr (" Prune Useless: %7.3f s", timers[_t_vector_pru].seconds()); tty->print_cr (" Renumber Live: %7.3f s", timers[_t_renumberLive].seconds()); tty->print_cr (" IdealLoop: %7.3f s", timers[_t_idealLoop].seconds()); + tty->print_cr (" ReachabilityFence: %7.3f s", timers[_t_reachability].seconds()); + tty->print_cr (" Optimize: %7.3f s", timers[_t_reachability_optimize].seconds()); + tty->print_cr (" Expand: %7.3f s", timers[_t_reachability_expand].seconds()); tty->print_cr (" AutoVectorize: %7.3f s", timers[_t_autoVectorize].seconds()); tty->print_cr (" IdealLoop Verify: %7.3f s", timers[_t_idealLoopVerify].seconds()); tty->print_cr (" Cond Const Prop: %7.3f s", timers[_t_ccp].seconds()); diff --git a/src/hotspot/share/opto/phase.hpp b/src/hotspot/share/opto/phase.hpp index 6700df6ec17..5bd3c34f15f 100644 --- a/src/hotspot/share/opto/phase.hpp +++ b/src/hotspot/share/opto/phase.hpp @@ -85,6 +85,9 @@ public: f( _t_vector_pru, "vector_pru") \ f( _t_renumberLive, "") \ f( _t_idealLoop, "idealLoop") \ + f( _t_reachability, "reachabilityFence") \ + f( _t_reachability_optimize, "reachabilityFence_optimize") \ + f( _t_reachability_expand, "reachabilityFence_expand") \ f( _t_autoVectorize, "autoVectorize") \ f( _t_idealLoopVerify, "idealLoopVerify") \ f( _t_ccp, "ccp") \ diff --git a/src/hotspot/share/opto/phaseX.cpp b/src/hotspot/share/opto/phaseX.cpp index 3cbbc114778..a4d6a6c33d0 100644 --- a/src/hotspot/share/opto/phaseX.cpp +++ b/src/hotspot/share/opto/phaseX.cpp @@ -360,6 +360,16 @@ NodeHash::~NodeHash() { } #endif +// Add users of 'n' that match 'predicate' to worklist +template +static void add_users_to_worklist_if(Unique_Node_List& worklist, const Node* n, Predicate predicate) { + for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { + Node* u = n->fast_out(i); + if (predicate(u)) { + worklist.push(u); + } + } +} //============================================================================= //------------------------------PhaseRemoveUseless----------------------------- @@ -565,7 +575,7 @@ PhaseValues::~PhaseValues() { _table.dump(); // Statistics for value progress and efficiency if( PrintCompilation && Verbose && WizardMode ) { - tty->print("\n%sValues: %d nodes ---> %d/%d (%d)", + tty->print("\n%sValues: %d nodes ---> " UINT64_FORMAT "/%d (%d)", is_IterGVN() ? "Iter" : " ", C->unique(), made_progress(), made_transforms(), made_new_values()); if( made_transforms() != 0 ) { tty->print_cr(" ratio %f", made_progress()/(float)made_transforms() ); @@ -721,14 +731,14 @@ Node* PhaseGVN::transform(Node* n) { } if (t->singleton() && !k->is_Con()) { - NOT_PRODUCT(set_progress();) + set_progress(); return makecon(t); // Turn into a constant } // Now check for Identities i = k->Identity(this); // Look for a nearby replacement if (i != k) { // Found? Return replacement! - NOT_PRODUCT(set_progress();) + set_progress(); return i; } @@ -736,7 +746,7 @@ Node* PhaseGVN::transform(Node* n) { i = hash_find_insert(k); // Insert if new if (i && (i != k)) { // Return the pre-existing node - NOT_PRODUCT(set_progress();) + set_progress(); return i; } @@ -899,9 +909,9 @@ void PhaseIterGVN::verify_step(Node* n) { } } -void PhaseIterGVN::trace_PhaseIterGVN(Node* n, Node* nn, const Type* oldtype) { +void PhaseIterGVN::trace_PhaseIterGVN(Node* n, Node* nn, const Type* oldtype, bool progress) { const Type* newtype = type_or_null(n); - if (nn != n || oldtype != newtype) { + if (progress) { C->print_method(PHASE_AFTER_ITER_GVN_STEP, 5, n); } if (TraceIterativeGVN) { @@ -967,7 +977,7 @@ void PhaseIterGVN::init_verifyPhaseIterGVN() { #endif } -void PhaseIterGVN::verify_PhaseIterGVN() { +void PhaseIterGVN::verify_PhaseIterGVN(bool deep_revisit_converged) { #ifdef ASSERT // Verify nodes with changed inputs. Unique_Node_List* modified_list = C->modified_nodes(); @@ -1000,7 +1010,7 @@ void PhaseIterGVN::verify_PhaseIterGVN() { } } - verify_optimize(); + verify_optimize(deep_revisit_converged); #endif } #endif /* PRODUCT */ @@ -1030,8 +1040,155 @@ void PhaseIterGVN::trace_PhaseIterGVN_verbose(Node* n, int num_processed) { } #endif /* ASSERT */ -void PhaseIterGVN::optimize() { - DEBUG_ONLY(uint num_processed = 0;) +bool PhaseIterGVN::needs_deep_revisit(const Node* n) const { + // LoadNode::Value() -> can_see_stored_value() walks up through many memory + // nodes. LoadNode::Ideal() -> find_previous_store() also walks up to 50 + // nodes through stores and arraycopy nodes. + if (n->is_Load()) { + return true; + } + // CmpPNode::sub() -> detect_ptr_independence() -> all_controls_dominate() + // walks CFG dominator relationships extensively. This only triggers when + // both inputs are oop pointers (subnode.cpp:984). + if (n->Opcode() == Op_CmpP) { + const Type* t1 = type_or_null(n->in(1)); + const Type* t2 = type_or_null(n->in(2)); + return t1 != nullptr && t1->isa_oopptr() && + t2 != nullptr && t2->isa_oopptr(); + } + // IfNode::Ideal() -> search_identical() walks up the CFG dominator tree. + // RangeCheckNode::Ideal() scans up to ~999 nodes up the chain. + // CountedLoopEndNode/LongCountedLoopEndNode::Ideal() via simple_subsuming + // looks for dominating test that subsumes the current test. + switch (n->Opcode()) { + case Op_If: + case Op_RangeCheck: + case Op_CountedLoopEnd: + case Op_LongCountedLoopEnd: + return true; + default: + break; + } + return false; +} + +bool PhaseIterGVN::drain_worklist() { + uint loop_count = 1; + const int max_live_nodes_increase_per_iteration = NodeLimitFudgeFactor * 3; + while (_worklist.size() != 0) { + if (C->check_node_count(max_live_nodes_increase_per_iteration, "Out of nodes")) { + C->print_method(PHASE_AFTER_ITER_GVN, 3); + return true; + } + Node* n = _worklist.pop(); + if (loop_count >= K * C->live_nodes()) { + DEBUG_ONLY(dump_infinite_loop_info(n, "PhaseIterGVN::drain_worklist");) + C->record_method_not_compilable("infinite loop in PhaseIterGVN::drain_worklist"); + C->print_method(PHASE_AFTER_ITER_GVN, 3); + return true; + } + DEBUG_ONLY(trace_PhaseIterGVN_verbose(n, _num_processed++);) + if (n->outcnt() != 0) { + NOT_PRODUCT(const Type* oldtype = type_or_null(n)); + // Do the transformation + DEBUG_ONLY(int live_nodes_before = C->live_nodes();) + NOT_PRODUCT(uint progress_before = made_progress();) + Node* nn = transform_old(n); + NOT_PRODUCT(bool progress = (made_progress() - progress_before) > 0;) + DEBUG_ONLY(int live_nodes_after = C->live_nodes();) + // Ensure we did not increase the live node count with more than + // max_live_nodes_increase_per_iteration during the call to transform_old. + DEBUG_ONLY(int increase = live_nodes_after - live_nodes_before;) + assert(increase < max_live_nodes_increase_per_iteration, + "excessive live node increase in single iteration of IGVN: %d " + "(should be at most %d)", + increase, max_live_nodes_increase_per_iteration); + NOT_PRODUCT(trace_PhaseIterGVN(n, nn, oldtype, progress);) + } else if (!n->is_top()) { + remove_dead_node(n, NodeOrigin::Graph); + } + loop_count++; + } + return false; +} + +void PhaseIterGVN::push_deep_revisit_candidates() { + ResourceMark rm; + Unique_Node_List all_nodes; + all_nodes.push(C->root()); + for (uint j = 0; j < all_nodes.size(); j++) { + Node* n = all_nodes.at(j); + if (needs_deep_revisit(n)) { + _worklist.push(n); + } + for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { + all_nodes.push(n->fast_out(i)); + } + } +} + +bool PhaseIterGVN::deep_revisit() { + // Re-process nodes that inspect the graph deeply. After the main worklist drains, walk + // the graph to find all live deep-inspection nodes and push them to the worklist + // for re-evaluation. If any produce changes, drain the worklist again. + // Repeat until stable. This mirrors PhaseCCP::analyze()'s revisit loop. + const uint max_deep_revisit_rounds = 10; // typically converges in <2 rounds + uint round = 0; + for (; round < max_deep_revisit_rounds; round++) { + push_deep_revisit_candidates(); + if (_worklist.size() == 0) { + break; // No deep-inspection nodes to revisit, done. + } + +#ifndef PRODUCT + uint candidates = _worklist.size(); + uint n_if = 0; uint n_rc = 0; uint n_load = 0; uint n_cmpp = 0; uint n_cle = 0; uint n_lcle = 0; + if (TraceIterativeGVN) { + for (uint i = 0; i < _worklist.size(); i++) { + Node* n = _worklist.at(i); + switch (n->Opcode()) { + case Op_If: n_if++; break; + case Op_RangeCheck: n_rc++; break; + case Op_CountedLoopEnd: n_cle++; break; + case Op_LongCountedLoopEnd: n_lcle++; break; + case Op_CmpP: n_cmpp++; break; + default: if (n->is_Load()) n_load++; break; + } + } + } +#endif + + // Convergence: if the drain does not make progress (no Ideal, Value, Identity or GVN changes), + // we are at a fixed point. We use made_progress() rather than live_nodes because live_nodes + // misses non-structural changes like a LoadNode dropping its control input. + uint progress_before = made_progress(); + if (drain_worklist()) { + return false; + } + uint progress = made_progress() - progress_before; + +#ifndef PRODUCT + if (TraceIterativeGVN) { + tty->print("deep_revisit round %u: %u candidates (If=%u RC=%u Load=%u CmpP=%u CLE=%u LCLE=%u), progress=%u (%s)", + round, candidates, n_if, n_rc, n_load, n_cmpp, n_cle, n_lcle, progress, progress != 0 ? "changed" : "converged"); + if (C->method() != nullptr) { + tty->print(", "); + C->method()->print_short_name(tty); + } + tty->cr(); + } +#endif + + if (progress == 0) { + break; + } + } + return round < max_deep_revisit_rounds; +} + +void PhaseIterGVN::optimize(bool deep) { + bool deep_revisit_converged = false; + DEBUG_ONLY(_num_processed = 0;) NOT_PRODUCT(init_verifyPhaseIterGVN();) NOT_PRODUCT(C->reset_igv_phase_iter(PHASE_AFTER_ITER_GVN_STEP);) C->print_method(PHASE_BEFORE_ITER_GVN, 3); @@ -1039,54 +1196,24 @@ void PhaseIterGVN::optimize() { shuffle_worklist(); } - // The node count check in the loop below (check_node_count) assumes that we - // increase the live node count with at most - // max_live_nodes_increase_per_iteration in between checks. If this - // assumption does not hold, there is a risk that we exceed the max node - // limit in between checks and trigger an assert during node creation. - const int max_live_nodes_increase_per_iteration = NodeLimitFudgeFactor * 3; - - uint loop_count = 0; - // Pull from worklist and transform the node. If the node has changed, - // update edge info and put uses on worklist. - while (_worklist.size() > 0) { - if (C->check_node_count(max_live_nodes_increase_per_iteration, "Out of nodes")) { - C->print_method(PHASE_AFTER_ITER_GVN, 3); - return; - } - Node* n = _worklist.pop(); - if (loop_count >= K * C->live_nodes()) { - DEBUG_ONLY(dump_infinite_loop_info(n, "PhaseIterGVN::optimize");) - C->record_method_not_compilable("infinite loop in PhaseIterGVN::optimize"); - C->print_method(PHASE_AFTER_ITER_GVN, 3); - return; - } - DEBUG_ONLY(trace_PhaseIterGVN_verbose(n, num_processed++);) - if (n->outcnt() != 0) { - NOT_PRODUCT(const Type* oldtype = type_or_null(n)); - // Do the transformation - DEBUG_ONLY(int live_nodes_before = C->live_nodes();) - Node* nn = transform_old(n); - DEBUG_ONLY(int live_nodes_after = C->live_nodes();) - // Ensure we did not increase the live node count with more than - // max_live_nodes_increase_per_iteration during the call to transform_old - DEBUG_ONLY(int increase = live_nodes_after - live_nodes_before;) - assert(increase < max_live_nodes_increase_per_iteration, - "excessive live node increase in single iteration of IGVN: %d " - "(should be at most %d)", - increase, max_live_nodes_increase_per_iteration); - NOT_PRODUCT(trace_PhaseIterGVN(n, nn, oldtype);) - } else if (!n->is_top()) { - remove_dead_node(n); - } - loop_count++; + // Pull from worklist and transform the node. + if (drain_worklist()) { + return; } - NOT_PRODUCT(verify_PhaseIterGVN();) + + if (deep && UseDeepIGVNRevisit) { + deep_revisit_converged = deep_revisit(); + if (C->failing()) { + return; + } + } + + NOT_PRODUCT(verify_PhaseIterGVN(deep_revisit_converged);) C->print_method(PHASE_AFTER_ITER_GVN, 3); } #ifdef ASSERT -void PhaseIterGVN::verify_optimize() { +void PhaseIterGVN::verify_optimize(bool deep_revisit_converged) { assert(_worklist.size() == 0, "igvn worklist must be empty before verify"); if (is_verify_Value() || @@ -1104,11 +1231,11 @@ void PhaseIterGVN::verify_optimize() { // in PhaseIterGVN::add_users_to_worklist to update it again or add an exception // in the verification methods below if that is not possible for some reason (like Load nodes). if (is_verify_Value()) { - verify_Value_for(n); + verify_Value_for(n, deep_revisit_converged /* strict */); } if (is_verify_Ideal()) { - verify_Ideal_for(n, false); - verify_Ideal_for(n, true); + verify_Ideal_for(n, false /* can_reshape */, deep_revisit_converged); + verify_Ideal_for(n, true /* can_reshape */, deep_revisit_converged); } if (is_verify_Identity()) { verify_Identity_for(n); @@ -1230,52 +1357,15 @@ void PhaseIterGVN::verify_Value_for(const Node* n, bool strict) { // Check that all Ideal optimizations that could be done were done. // Asserts if it found missed optimization opportunities or encountered unexpected changes, and // returns normally otherwise (no missed optimization, or skipped verification). -void PhaseIterGVN::verify_Ideal_for(Node* n, bool can_reshape) { +void PhaseIterGVN::verify_Ideal_for(Node* n, bool can_reshape, bool deep_revisit_converged) { + if (!deep_revisit_converged && needs_deep_revisit(n)) { + return; + } + // First, we check a list of exceptions, where we skip verification, // because there are known cases where Ideal can optimize after IGVN. // Some may be expected and cannot be fixed, and others should be fixed. switch (n->Opcode()) { - // RangeCheckNode::Ideal looks up the chain for about 999 nodes - // (see "Range-Check scan limit"). So, it is possible that something - // is optimized in that input subgraph, and the RangeCheck was not - // added to the worklist because it would be too expensive to walk - // down the graph for 1000 nodes and put all on the worklist. - // - // Found with: - // java -XX:VerifyIterativeGVN=0100 -Xbatch --version - case Op_RangeCheck: - return; - - // IfNode::Ideal does: - // Node* prev_dom = search_identical(dist, igvn); - // which means we seach up the CFG, traversing at most up to a distance. - // If anything happens rather far away from the If, we may not put the If - // back on the worklist. - // - // Found with: - // java -XX:VerifyIterativeGVN=0100 -Xcomp --version - case Op_If: - return; - - // IfNode::simple_subsuming - // Looks for dominating test that subsumes the current test. - // Notification could be difficult because of larger distance. - // - // Found with: - // runtime/exceptionMsgs/ArrayIndexOutOfBoundsException/ArrayIndexOutOfBoundsExceptionTest.java#id1 - // -XX:VerifyIterativeGVN=1110 - case Op_CountedLoopEnd: - return; - - // LongCountedLoopEndNode::Ideal - // Probably same issue as above. - // - // Found with: - // compiler/predicates/assertion/TestAssertionPredicates.java#NoLoopPredicationXbatch - // -XX:StressLongCountedLoop=2000000 -XX:+IgnoreUnrecognizedVMOptions -XX:VerifyIterativeGVN=1110 - case Op_LongCountedLoopEnd: - return; - // RegionNode::Ideal does "Skip around the useless IF diamond". // 245 IfTrue === 244 // 258 If === 245 257 @@ -1747,22 +1837,6 @@ void PhaseIterGVN::verify_Ideal_for(Node* n, bool can_reshape) { return; } - if (n->is_Load()) { - // LoadNode::Ideal uses tries to find an earlier memory state, and - // checks can_see_stored_value for it. - // - // Investigate why this was not already done during IGVN. - // A similar issue happens with Identity. - // - // There seem to be other cases where loads go up some steps, like - // LoadNode::Ideal going up 10x steps to find dominating load. - // - // Found with: - // test/hotspot/jtreg/compiler/arraycopy/TestCloneAccess.java - // -XX:VerifyIterativeGVN=1110 - return; - } - if (n->is_Store()) { // StoreNode::Ideal can do this: // // Capture an unaliased, unconditional, simple store into an initializer. @@ -1847,8 +1921,16 @@ void PhaseIterGVN::verify_Ideal_for(Node* n, bool can_reshape) { return; } - // The number of nodes shoud not increase. - uint old_unique = C->unique(); + // Ideal should not make progress if it returns nullptr. + // We use made_progress() rather than unique() or live_nodes() because some + // Ideal implementations speculatively create nodes and kill them before + // returning nullptr (e.g. split_if clones a Cmp to check is_canonical). + // unique() is a high-water mark that is not decremented by remove_dead_node, + // so it would cause false-positives. live_nodes() accounts for dead nodes but can + // decrease when Ideal removes existing nodes as side effects. + // made_progress() precisely tracks meaningful transforms, and speculative + // work killed via NodeOrigin::Speculative does not increment it. + uint old_progress = made_progress(); // The hash of a node should not change, this would indicate different inputs uint old_hash = n->hash(); // Remove 'n' from hash table in case it gets modified. We want to avoid @@ -1860,14 +1942,15 @@ void PhaseIterGVN::verify_Ideal_for(Node* n, bool can_reshape) { Node* i = n->Ideal(this, can_reshape); // If there was no new Idealization, we are probably happy. if (i == nullptr) { - if (old_unique < C->unique()) { + uint progress = made_progress() - old_progress; + if (progress != 0) { stringStream ss; // Print as a block without tty lock. ss.cr(); - ss.print_cr("Ideal optimization did not make progress but created new unused nodes."); - ss.print_cr(" old_unique = %d, unique = %d", old_unique, C->unique()); + ss.print_cr("Ideal optimization did not make progress but had side effects."); + ss.print_cr(" %u transforms made progress", progress); n->dump_bfs(1, nullptr, "", &ss); tty->print_cr("%s", ss.as_string()); - assert(false, "Unexpected new unused nodes from applying Ideal optimization on %s", n->Name()); + assert(false, "Unexpected side effects from applying Ideal optimization on %s", n->Name()); } if (old_hash != n->hash()) { @@ -2040,7 +2123,12 @@ void PhaseIterGVN::verify_Identity_for(Node* n) { if (n->is_Vector()) { // Found with tier1-3. Not investigated yet. - // The observed issue was with AndVNode::Identity + // The observed issue was with AndVNode::Identity and + // VectorStoreMaskNode::Identity (see JDK-8370863). + // + // Found with: + // compiler/vectorapi/VectorStoreMaskIdentityTest.java + // -XX:CompileThreshold=100 -XX:-TieredCompilation -XX:VerifyIterativeGVN=1110 return; } @@ -2142,6 +2230,9 @@ Node *PhaseIterGVN::transform_old(Node* n) { #endif DEBUG_ONLY(uint loop_count = 1;) + if (i != nullptr) { + set_progress(); + } while (i != nullptr) { #ifdef ASSERT if (loop_count >= K + C->live_nodes()) { @@ -2187,10 +2278,8 @@ Node *PhaseIterGVN::transform_old(Node* n) { // cache Value. Later requests for the local phase->type of this Node can // use the cached Value instead of suffering with 'bottom_type'. if (type_or_null(k) != t) { -#ifndef PRODUCT - inc_new_values(); + NOT_PRODUCT(inc_new_values();) set_progress(); -#endif set_type(k, t); // If k is a TypeNode, capture any more-precise type permanently into Node k->raise_bottom_type(t); @@ -2199,7 +2288,7 @@ Node *PhaseIterGVN::transform_old(Node* n) { } // If 'k' computes a constant, replace it with a constant if (t->singleton() && !k->is_Con()) { - NOT_PRODUCT(set_progress();) + set_progress(); Node* con = makecon(t); // Make a constant add_users_to_worklist(k); subsume_node(k, con); // Everybody using k now uses con @@ -2209,7 +2298,7 @@ Node *PhaseIterGVN::transform_old(Node* n) { // Now check for Identities i = k->Identity(this); // Look for a nearby replacement if (i != k) { // Found? Return replacement! - NOT_PRODUCT(set_progress();) + set_progress(); add_users_to_worklist(k); subsume_node(k, i); // Everybody using k now uses i return i; @@ -2219,7 +2308,7 @@ Node *PhaseIterGVN::transform_old(Node* n) { i = hash_find_insert(k); // Check for pre-existing node if (i && (i != k)) { // Return the pre-existing node if it isn't dead - NOT_PRODUCT(set_progress();) + set_progress(); add_users_to_worklist(k); subsume_node(k, i); // Everybody using k now uses i return i; @@ -2238,7 +2327,7 @@ const Type* PhaseIterGVN::saturate(const Type* new_type, const Type* old_type, //------------------------------remove_globally_dead_node---------------------- // Kill a globally dead Node. All uses are also globally dead and are // aggressively trimmed. -void PhaseIterGVN::remove_globally_dead_node( Node *dead ) { +void PhaseIterGVN::remove_globally_dead_node(Node* dead, NodeOrigin origin) { enum DeleteProgress { PROCESS_INPUTS, PROCESS_OUTPUTS @@ -2255,11 +2344,13 @@ void PhaseIterGVN::remove_globally_dead_node( Node *dead ) { uint progress_state = stack.index(); assert(dead != C->root(), "killing root, eh?"); assert(!dead->is_top(), "add check for top when pushing"); - NOT_PRODUCT( set_progress(); ) if (progress_state == PROCESS_INPUTS) { // After following inputs, continue to outputs stack.set_index(PROCESS_OUTPUTS); if (!dead->is_Con()) { // Don't kill cons but uses + if (origin != NodeOrigin::Speculative) { + set_progress(); + } bool recurse = false; // Remove from hash table _table.hash_delete( dead ); @@ -2298,12 +2389,7 @@ void PhaseIterGVN::remove_globally_dead_node( Node *dead ) { // A Load that directly follows an InitializeNode is // going away. The Stores that follow are candidates // again to be captured by the InitializeNode. - for (DUIterator_Fast jmax, j = in->fast_outs(jmax); j < jmax; j++) { - Node *n = in->fast_out(j); - if (n->is_Store()) { - _worklist.push(n); - } - } + add_users_to_worklist_if(_worklist, in, [](Node* n) { return n->is_Store(); }); } } // if (in != nullptr && in != C->top()) } // for (uint i = 0; i < dead->req(); i++) @@ -2374,7 +2460,7 @@ void PhaseIterGVN::subsume_node( Node *old, Node *nn ) { // Smash all inputs to 'old', isolating him completely Node *temp = new Node(1); temp->init_req(0,nn); // Add a use to nn to prevent him from dying - remove_dead_node( old ); + remove_dead_node(old, NodeOrigin::Graph); temp->del_req(0); // Yank bogus edge if (nn != nullptr && nn->outcnt() == 0) { _worklist.push(nn); @@ -2559,41 +2645,49 @@ void PhaseIterGVN::add_users_of_use_to_worklist(Node* n, Node* use, Unique_Node_ // If changed LShift inputs, check RShift/URShift users for // "(X << C) >> C" sign-ext and "(X << C) >>> C" zero-ext optimizations. if (use_op == Op_LShiftI || use_op == Op_LShiftL) { - for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) { - Node* u = use->fast_out(i2); - if (u->Opcode() == Op_RShiftI || u->Opcode() == Op_RShiftL || - u->Opcode() == Op_URShiftI || u->Opcode() == Op_URShiftL) { - worklist.push(u); - } - } + add_users_to_worklist_if(worklist, use, [](Node* u) { + return u->Opcode() == Op_RShiftI || u->Opcode() == Op_RShiftL || + u->Opcode() == Op_URShiftI || u->Opcode() == Op_URShiftL; + }); } // If changed LShift inputs, check And users for shift and mask (And) operation if (use_op == Op_LShiftI || use_op == Op_LShiftL) { - for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) { - Node* u = use->fast_out(i2); - if (u->Opcode() == Op_AndI || u->Opcode() == Op_AndL) { - worklist.push(u); - } - } + add_users_to_worklist_if(worklist, use, [](Node* u) { + return u->Opcode() == Op_AndI || u->Opcode() == Op_AndL; + }); } // If changed AddI/SubI inputs, check CmpU for range check optimization. if (use_op == Op_AddI || use_op == Op_SubI) { + add_users_to_worklist_if(worklist, use, [](Node* u) { + return u->Opcode() == Op_CmpU; + }); + } + // If changed AddI/AddL inputs, check URShift users for + // "((X << z) + Y) >>> z" optimization in URShift{I,L}Node::Ideal. + if (use_op == Op_AddI || use_op == Op_AddL) { + add_users_to_worklist_if(worklist, use, [](Node* u) { + return u->Opcode() == Op_URShiftI || u->Opcode() == Op_URShiftL; + }); + } + // If changed LShiftI/LShiftL inputs, check AddI/AddL users for their + // URShiftI/URShiftL users for "((x << z) + y) >>> z" optimization opportunity + // (see URShiftINode::Ideal). Handles the case where the LShift input changes. + if (use_op == Op_LShiftI || use_op == Op_LShiftL) { for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) { - Node* u = use->fast_out(i2); - if (u->is_Cmp() && (u->Opcode() == Op_CmpU)) { - worklist.push(u); + Node* add = use->fast_out(i2); + if (add->Opcode() == Op_AddI || add->Opcode() == Op_AddL) { + add_users_to_worklist_if(worklist, add, [](Node* u) { + return u->Opcode() == Op_URShiftI || u->Opcode() == Op_URShiftL; + }); } } } // If changed AndI/AndL inputs, check RShift/URShift users for "(x & mask) >> shift" optimization opportunity if (use_op == Op_AndI || use_op == Op_AndL) { - for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) { - Node* u = use->fast_out(i2); - if (u->Opcode() == Op_RShiftI || u->Opcode() == Op_RShiftL || - u->Opcode() == Op_URShiftI || u->Opcode() == Op_URShiftL) { - worklist.push(u); - } - } + add_users_to_worklist_if(worklist, use, [](Node* u) { + return u->Opcode() == Op_RShiftI || u->Opcode() == Op_RShiftL || + u->Opcode() == Op_URShiftI || u->Opcode() == Op_URShiftL; + }); } // Check for redundant conversion patterns: // ConvD2L->ConvL2D->ConvD2L @@ -2606,35 +2700,22 @@ void PhaseIterGVN::add_users_of_use_to_worklist(Node* n, Node* use, Unique_Node_ use_op == Op_ConvI2F || use_op == Op_ConvL2F || use_op == Op_ConvF2I) { - for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) { - Node* u = use->fast_out(i2); - if ((use_op == Op_ConvL2D && u->Opcode() == Op_ConvD2L) || - (use_op == Op_ConvI2F && u->Opcode() == Op_ConvF2I) || - (use_op == Op_ConvL2F && u->Opcode() == Op_ConvF2L) || - (use_op == Op_ConvF2I && u->Opcode() == Op_ConvI2F)) { - worklist.push(u); - } - } + add_users_to_worklist_if(worklist, use, [=](Node* u) { + return (use_op == Op_ConvL2D && u->Opcode() == Op_ConvD2L) || + (use_op == Op_ConvI2F && u->Opcode() == Op_ConvF2I) || + (use_op == Op_ConvL2F && u->Opcode() == Op_ConvF2L) || + (use_op == Op_ConvF2I && u->Opcode() == Op_ConvI2F); + }); } // ConvD2F::Ideal matches ConvD2F(SqrtD(ConvF2D(x))) => SqrtF(x). // Notify ConvD2F users of SqrtD when any input of the SqrtD changes. if (use_op == Op_SqrtD) { - for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) { - Node* u = use->fast_out(i2); - if (u->Opcode() == Op_ConvD2F) { - worklist.push(u); - } - } + add_users_to_worklist_if(worklist, use, [](Node* u) { return u->Opcode() == Op_ConvD2F; }); } // ConvF2HF::Ideal matches ConvF2HF(binopF(ConvHF2F(...))) => FP16BinOp(...). // Notify ConvF2HF users of float binary ops when any input changes. if (Float16NodeFactory::is_float32_binary_oper(use_op)) { - for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) { - Node* u = use->fast_out(i2); - if (u->Opcode() == Op_ConvF2HF) { - worklist.push(u); - } - } + add_users_to_worklist_if(worklist, use, [](Node* u) { return u->Opcode() == Op_ConvF2HF; }); } // If changed AddP inputs: // - check Stores for loop invariant, and @@ -2642,33 +2723,21 @@ void PhaseIterGVN::add_users_of_use_to_worklist(Node* n, Node* use, Unique_Node_ // address expression flattening. if (use_op == Op_AddP) { bool offset_changed = n == use->in(AddPNode::Offset); - for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) { - Node* u = use->fast_out(i2); - if (u->is_Mem()) { - worklist.push(u); - } else if (offset_changed && u->is_AddP() && u->in(AddPNode::Offset)->is_Con()) { - worklist.push(u); - } - } + add_users_to_worklist_if(worklist, use, [=](Node* u) { + return u->is_Mem() || + (offset_changed && u->is_AddP() && u->in(AddPNode::Offset)->is_Con()); + }); } // Check for "abs(0-x)" into "abs(x)" conversion if (use->is_Sub()) { - for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) { - Node* u = use->fast_out(i2); - if (u->Opcode() == Op_AbsD || u->Opcode() == Op_AbsF || - u->Opcode() == Op_AbsL || u->Opcode() == Op_AbsI) { - worklist.push(u); - } - } + add_users_to_worklist_if(worklist, use, [](Node* u) { + return u->Opcode() == Op_AbsD || u->Opcode() == Op_AbsF || + u->Opcode() == Op_AbsL || u->Opcode() == Op_AbsI; + }); } // Check for Max/Min(A, Max/Min(B, C)) where A == B or A == C if (use->is_MinMax()) { - for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) { - Node* u = use->fast_out(i2); - if (u->is_MinMax()) { - worklist.push(u); - } - } + add_users_to_worklist_if(worklist, use, [](Node* u) { return u->is_MinMax(); }); } auto enqueue_init_mem_projs = [&](ProjNode* proj) { add_users_to_worklist0(proj, worklist); @@ -2707,12 +2776,9 @@ void PhaseIterGVN::add_users_of_use_to_worklist(Node* n, Node* use, Unique_Node_ if (u->Opcode() == Op_LoadP && ut->isa_instptr()) { if (has_load_barrier_nodes) { // Search for load barriers behind the load - for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) { - Node* b = u->fast_out(i3); - if (bs->is_gc_barrier_node(b)) { - worklist.push(b); - } - } + add_users_to_worklist_if(worklist, u, [&](Node* b) { + return bs->is_gc_barrier_node(b); + }); } worklist.push(u); } @@ -2725,17 +2791,17 @@ void PhaseIterGVN::add_users_of_use_to_worklist(Node* n, Node* use, Unique_Node_ worklist.push(cmp); } } + // VectorMaskToLongNode::Ideal_MaskAll looks through VectorStoreMask + // to fold constant masks. + if (use_op == Op_VectorStoreMask) { + add_users_to_worklist_if(worklist, use, [](Node* u) { return u->Opcode() == Op_VectorMaskToLong; }); + } // From CastX2PNode::Ideal // CastX2P(AddX(x, y)) // CastX2P(SubX(x, y)) if (use->Opcode() == Op_AddX || use->Opcode() == Op_SubX) { - for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) { - Node* u = use->fast_out(i2); - if (u->Opcode() == Op_CastX2P) { - worklist.push(u); - } - } + add_users_to_worklist_if(worklist, use, [](Node* u) { return u->Opcode() == Op_CastX2P; }); } /* AndNode has a special handling when one of the operands is a LShiftNode: @@ -2770,12 +2836,7 @@ void PhaseIterGVN::add_users_of_use_to_worklist(Node* n, Node* use, Unique_Node_ // e.g., (x - y) + y -> x; x + (y - x) -> y. if (use_op == Op_SubI || use_op == Op_SubL) { const int add_op = (use_op == Op_SubI) ? Op_AddI : Op_AddL; - for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) { - Node* u = use->fast_out(i2); - if (u->Opcode() == add_op) { - worklist.push(u); - } - } + add_users_to_worklist_if(worklist, use, [=](Node* u) { return u->Opcode() == add_op; }); } } @@ -2960,6 +3021,10 @@ void PhaseCCP::dump_type_and_node(const Node* n, const Type* t) { } #endif +bool PhaseCCP::not_bottom_type(Node* n) const { + return n->bottom_type() != type(n); +} + // We need to propagate the type change of 'n' to all its uses. Depending on the kind of node, additional nodes // (grandchildren or even further down) need to be revisited as their types could also be improved as a result // of the new type of 'n'. Push these nodes to the worklist. @@ -2972,7 +3037,7 @@ void PhaseCCP::push_child_nodes_to_worklist(Unique_Node_List& worklist, Node* n) } void PhaseCCP::push_if_not_bottom_type(Unique_Node_List& worklist, Node* n) const { - if (n->bottom_type() != type(n)) { + if (not_bottom_type(n)) { worklist.push(n); } } @@ -2995,9 +3060,9 @@ void PhaseCCP::push_more_uses(Unique_Node_List& worklist, Node* parent, const No // We must recheck Phis too if use is a Region. void PhaseCCP::push_phis(Unique_Node_List& worklist, const Node* use) const { if (use->is_Region()) { - for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) { - push_if_not_bottom_type(worklist, use->fast_out(i)); - } + add_users_to_worklist_if(worklist, use, [&](Node* u) { + return not_bottom_type(u); + }); } } @@ -3024,14 +3089,11 @@ void PhaseCCP::push_catch(Unique_Node_List& worklist, const Node* use) { void PhaseCCP::push_cmpu(Unique_Node_List& worklist, const Node* use) const { uint use_op = use->Opcode(); if (use_op == Op_AddI || use_op == Op_SubI) { - for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) { - Node* cmpu = use->fast_out(i); - const uint cmpu_opcode = cmpu->Opcode(); - if (cmpu_opcode == Op_CmpU || cmpu_opcode == Op_CmpU3) { - // Got a CmpU or CmpU3 which might need the new type information from node n. - push_if_not_bottom_type(worklist, cmpu); - } - } + // Got a CmpU or CmpU3 which might need the new type information from node n. + add_users_to_worklist_if(worklist, use, [&](Node* u) { + uint op = u->Opcode(); + return (op == Op_CmpU || op == Op_CmpU3) && not_bottom_type(u); + }); } } @@ -3120,12 +3182,9 @@ void PhaseCCP::push_loadp(Unique_Node_List& worklist, const Node* use) const { } void PhaseCCP::push_load_barrier(Unique_Node_List& worklist, const BarrierSetC2* barrier_set, const Node* use) { - for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) { - Node* barrier_node = use->fast_out(i); - if (barrier_set->is_gc_barrier_node(barrier_node)) { - worklist.push(barrier_node); - } - } + add_users_to_worklist_if(worklist, use, [&](Node* u) { + return barrier_set->is_gc_barrier_node(u); + }); } // AndI/L::Value() optimizes patterns similar to (v << 2) & 3, or CON & 3 to zero if they are bitwise disjoint. @@ -3161,12 +3220,9 @@ void PhaseCCP::push_and(Unique_Node_List& worklist, const Node* parent, const No void PhaseCCP::push_cast_ii(Unique_Node_List& worklist, const Node* parent, const Node* use) const { if (use->Opcode() == Op_CmpI && use->in(2) == parent) { Node* other_cmp_input = use->in(1); - for (DUIterator_Fast imax, i = other_cmp_input->fast_outs(imax); i < imax; i++) { - Node* cast_ii = other_cmp_input->fast_out(i); - if (cast_ii->is_CastII()) { - push_if_not_bottom_type(worklist, cast_ii); - } - } + add_users_to_worklist_if(worklist, other_cmp_input, [&](Node* u) { + return u->is_CastII() && not_bottom_type(u); + }); } } diff --git a/src/hotspot/share/opto/phaseX.hpp b/src/hotspot/share/opto/phaseX.hpp index cd38f37ccf5..014d16f92f6 100644 --- a/src/hotspot/share/opto/phaseX.hpp +++ b/src/hotspot/share/opto/phaseX.hpp @@ -187,8 +187,8 @@ public: class PhaseTransform : public Phase { public: PhaseTransform(PhaseNumber pnum) : Phase(pnum) { -#ifndef PRODUCT clear_progress(); +#ifndef PRODUCT clear_transforms(); set_allow_progress(true); #endif @@ -201,12 +201,31 @@ public: // true if CFG node d dominates CFG node n virtual bool is_dominator(Node *d, Node *n) { fatal("unimplemented for this pass"); return false; }; -#ifndef PRODUCT - uint _count_progress; // For profiling, count transforms that make progress - void set_progress() { ++_count_progress; assert( allow_progress(),"No progress allowed during verification"); } - void clear_progress() { _count_progress = 0; } - uint made_progress() const { return _count_progress; } + uint64_t _count_progress; // Count transforms that make progress + void set_progress() { ++_count_progress; assert(allow_progress(), "No progress allowed during verification"); } + void clear_progress() { _count_progress = 0; } + uint64_t made_progress() const { return _count_progress; } + // RAII guard for speculative transforms. Restores _count_progress in the destructor + // unless commit() is called, so that abandoned speculative work does not count as progress. + // In case multiple nodes are created and only some are speculative, commit() should still be called. + class SpeculativeProgressGuard { + PhaseTransform* _phase; + uint64_t _saved_progress; + bool _committed; + public: + SpeculativeProgressGuard(PhaseTransform* phase) : + _phase(phase), _saved_progress(phase->made_progress()), _committed(false) {} + ~SpeculativeProgressGuard() { + if (!_committed) { + _phase->_count_progress = _saved_progress; + } + } + + void commit() { _committed = true; } + }; + +#ifndef PRODUCT uint _count_transforms; // For profiling, count transforms performed void set_transforms() { ++_count_transforms; } void clear_transforms() { _count_transforms = 0; } @@ -446,10 +465,30 @@ class PhaseIterGVN : public PhaseGVN { private: bool _delay_transform; // When true simply register the node when calling transform // instead of actually optimizing it + DEBUG_ONLY(uint _num_processed;) // Running count for trace_PhaseIterGVN_verbose // Idealize old Node 'n' with respect to its inputs and its value virtual Node *transform_old( Node *a_node ); + // Drain the IGVN worklist: process nodes until the worklist is empty. + // Returns true if compilation was aborted (node limit or infinite loop), + // false on normal completion. + bool drain_worklist(); + + // Walk all live nodes and push deep-inspection candidates to _worklist. + void push_deep_revisit_candidates(); + + // After the main worklist drains, re-process deep-inspection nodes to + // catch optimization opportunities from far-away changes. Repeats until + // convergence (no progress made) or max rounds reached. + // Returns true if converged. + bool deep_revisit(); + + // Returns true for nodes that inspect the graph beyond their direct + // inputs, and therefore may miss optimization opportunities when + // changes happen far away. + bool needs_deep_revisit(const Node* n) const; + // Subsume users of node 'old' into node 'nn' void subsume_node( Node *old, Node *nn ); @@ -493,20 +532,25 @@ public: // Given def-use info and an initial worklist, apply Node::Ideal, // Node::Value, Node::Identity, hash-based value numbering, Node::Ideal_DU // and dominator info to a fixed point. - void optimize(); + // When deep is true, after the main worklist drains, re-process + // nodes that inspect the graph deeply (Load, CmpP, If, RangeCheck, + // CountedLoopEnd, LongCountedLoopEnd) to catch optimization opportunities + // from changes far away that the normal notification mechanism misses. + void optimize(bool deep = false); + #ifdef ASSERT - void verify_optimize(); + void verify_optimize(bool deep_revisit_converged); void verify_Value_for(const Node* n, bool strict = false); - void verify_Ideal_for(Node* n, bool can_reshape); + void verify_Ideal_for(Node* n, bool can_reshape, bool deep_revisit_converged); void verify_Identity_for(Node* n); void verify_node_invariants_for(const Node* n); void verify_empty_worklist(Node* n); #endif #ifndef PRODUCT - void trace_PhaseIterGVN(Node* n, Node* nn, const Type* old_type); + void trace_PhaseIterGVN(Node* n, Node* nn, const Type* old_type, bool progress); void init_verifyPhaseIterGVN(); - void verify_PhaseIterGVN(); + void verify_PhaseIterGVN(bool deep_revisit_converged); #endif #ifdef ASSERT @@ -522,15 +566,21 @@ public: // It is significant only for debugging and profiling. Node* register_new_node_with_optimizer(Node* n, Node* orig = nullptr); - // Kill a globally dead Node. All uses are also globally dead and are + // Origin of a dead node, describing why it is dying. + // Speculative: a temporarily created node that was never part of the graph + // (e.g., a speculative clone in split_if to test constant foldability). + // Its death does not count as progress for convergence tracking. + enum class NodeOrigin { Graph, Speculative }; + + // Kill a globally dead Node. All uses are also globally dead and are // aggressively trimmed. - void remove_globally_dead_node( Node *dead ); + void remove_globally_dead_node(Node* dead, NodeOrigin origin); // Kill all inputs to a dead node, recursively making more dead nodes. // The Node must be dead locally, i.e., have no uses. - void remove_dead_node( Node *dead ) { + void remove_dead_node(Node* dead, NodeOrigin origin) { assert(dead->outcnt() == 0 && !dead->is_top(), "node must be dead"); - remove_globally_dead_node(dead); + remove_globally_dead_node(dead, origin); } // Add users of 'n' to worklist @@ -652,6 +702,7 @@ class PhaseCCP : public PhaseIterGVN { Node* fetch_next_node(Unique_Node_List& worklist); static void dump_type_and_node(const Node* n, const Type* t) PRODUCT_RETURN; + bool not_bottom_type(Node* n) const; void push_child_nodes_to_worklist(Unique_Node_List& worklist, Node* n) const; void push_if_not_bottom_type(Unique_Node_List& worklist, Node* n) const; void push_more_uses(Unique_Node_List& worklist, Node* parent, const Node* use) const; diff --git a/src/hotspot/share/opto/phasetype.hpp b/src/hotspot/share/opto/phasetype.hpp index f388dc6cdc6..2b599ace03a 100644 --- a/src/hotspot/share/opto/phasetype.hpp +++ b/src/hotspot/share/opto/phasetype.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #ifndef SHARE_OPTO_PHASETYPE_HPP #define SHARE_OPTO_PHASETYPE_HPP +#include "memory/allocation.hpp" #include "utilities/bitMap.inline.hpp" #include "utilities/stringUtils.hpp" @@ -105,6 +106,7 @@ flags(PHASEIDEALLOOP1, "PhaseIdealLoop 1") \ flags(PHASEIDEALLOOP2, "PhaseIdealLoop 2") \ flags(PHASEIDEALLOOP3, "PhaseIdealLoop 3") \ + flags(EXPAND_REACHABILITY_FENCES, "Expand Reachability Fences") \ flags(AUTO_VECTORIZATION1_BEFORE_APPLY, "AutoVectorization 1, before Apply") \ flags(AUTO_VECTORIZATION3_AFTER_ADJUST_LIMIT, "AutoVectorization 2, after Adjusting Pre-loop Limit") \ flags(AUTO_VECTORIZATION4_AFTER_SPECULATIVE_RUNTIME_CHECKS, "AutoVectorization 3, after Adding Speculative Runtime Checks") \ diff --git a/src/hotspot/share/opto/rangeinference.hpp b/src/hotspot/share/opto/rangeinference.hpp index 66ea741a2da..7c0f12f6ef7 100644 --- a/src/hotspot/share/opto/rangeinference.hpp +++ b/src/hotspot/share/opto/rangeinference.hpp @@ -28,6 +28,7 @@ #include "cppstdlib/limits.hpp" #include "cppstdlib/type_traits.hpp" #include "utilities/globalDefinitions.hpp" +#include "utilities/intn_t.hpp" class outputStream; class Type; @@ -407,6 +408,40 @@ public: return TypeIntMirror, U>::make(TypeIntPrototype, U>{{lo, hi}, {ulo, uhi}, {zeros, ones}}); }); } + + // Compute `known_bits` by shifting known bits of `t1` left and setting the + // low `shift` bits to zeros. Also update the signed and unsigned ranges when + // the shift operation does not cause an overflow. The caller is responsible + // for normalizing the shift amount (i.e. masking with 31 for ints or 63 for + // longs). + template + static CTP infer_lshift(CTP t1, int masked_shift) { + assert(masked_shift >= 0 && + masked_shift < HotSpotNumerics::type_width>(), + "shift is out of range"); + + U pattern = (U(1) << masked_shift) - U(1); + U known_one_bits = t1->_bits._ones << masked_shift; + U known_zero_bits = (t1->_bits._zeros << masked_shift) | pattern; + KnownBits> known_bits{known_zero_bits, known_one_bits}; + + S shifted_slo = S(U(t1->_lo) << masked_shift); + S shifted_shi = S(U(t1->_hi) << masked_shift); + bool s_overflow = (shifted_slo >> masked_shift) != t1->_lo || + (shifted_shi >> masked_shift) != t1->_hi; + S slo = s_overflow ? std::numeric_limits>::min() : shifted_slo; + S shi = s_overflow ? std::numeric_limits>::max() : shifted_shi; + + U shifted_ulo = t1->_ulo << masked_shift; + U shifted_uhi = t1->_uhi << masked_shift; + bool u_overflow = (shifted_ulo >> masked_shift) != t1->_ulo || + (shifted_uhi >> masked_shift) != t1->_uhi; + U ulo = u_overflow ? std::numeric_limits>::min() : shifted_ulo; + U uhi = u_overflow ? std::numeric_limits>::max() : shifted_uhi; + + TypeIntPrototype, U> proto{{slo, shi}, {ulo, uhi}, known_bits}; + return CT::make(proto, t1->_widen); + } }; #endif // SHARE_OPTO_RANGEINFERENCE_HPP diff --git a/src/hotspot/share/opto/reachability.cpp b/src/hotspot/share/opto/reachability.cpp new file mode 100644 index 00000000000..b45b340ab85 --- /dev/null +++ b/src/hotspot/share/opto/reachability.cpp @@ -0,0 +1,512 @@ +/* + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "opto/c2_MacroAssembler.hpp" +#include "opto/callnode.hpp" +#include "opto/compile.hpp" +#include "opto/loopnode.hpp" +#include "opto/phaseX.hpp" +#include "opto/reachability.hpp" +#include "opto/regalloc.hpp" +#include "opto/runtime.hpp" +#include "utilities/pair.hpp" + +/* + * java.lang.ref.Reference::reachabilityFence support. + * + * Reachability Fence (RF) ensures that the given object (referent) remains strongly reachable + * regardless of any optimizing transformations the virtual machine may perform that might otherwise + * allow the object to become unreachable. + * + * RFs are intended to be used in performance-critical code, so the primary goal for C2 support is + * to reduce their runtime overhead as much as possible. + * + * Reference::reachabilityFence() calls are intrinsified into ReachabilityFence CFG nodes. RF node keeps + * its referent alive, so the referent's location is recorded at every safepoint (in its oop map) which + * interferes with referent's live range. + * + * It is tempting to directly attach referents to interfering safepoints right from the beginning, but it + * doesn't play well with some optimizations C2 does (e.g., during loop-invariant code motion a safepoint + * can become interfering once a load is hoisted). + * + * Instead, reachability representation transitions through multiple phases: + * (0) initial set of RFs is materialized during parsing (as a result of + * Reference.reachabilityFence intrinsification); + * (1) optimization pass during loop opts eliminates redundant RF nodes and + * moves the ones with loop-invariant referents outside (after) loops; + * (2) after loop opts are over, RF nodes are eliminated and their referents are transferred to + * safepoint nodes (appended as edges after debug info); + * (3) during final graph reshaping, referent edges are removed from safepoints and materialized as RF nodes + * attached to their safepoint node (closely following it in CFG graph). + * + * Some implementation considerations. + * + * (a) It looks attractive to get rid of RF nodes early and transfer to safepoint-attached representation, + * but it is not correct until loop opts are done. + * + * Live ranges of values are routinely extended during loop opts. And it can break the invariant that + * all interfering safepoints contain the referent in their oop map. (If an interfering safepoint doesn't + * keep the referent alive, then it becomes possible for the referent to be prematurely GCed.) + * + * compiler/c2/TestReachabilityFence.java demonstrates a situation where a load is hoisted out of a loop thus + * extending the live range of the value it produces beyond the safepoint on loop-back edge. + * + * After loop opts are over, it becomes possible to reliably enumerate all interfering safepoints and + * to ensure that the referent is present in their oop maps. Current assumption is that after loop opts the IR graph + * is stable enough, so relative order of memory operations and safepoints is preserved and only safepoints between + * a referent and it's uses are taken into account. A more conservative analysis can be employed -- any safepoint dominated + * by a referent is treated as interfering with it -- if it turns out that the assumption doesn't hold. + * + * (b) RF nodes may interfere with Register Allocator (RA). If a safepoint is pruned during macro expansion, + * it can make some RF nodes redundant, but we don't have information about their relations anymore to detect that. + * Redundant RF node unnecessarily extends referent's live range and increases register pressure. + * + * Hence, we eliminate RF nodes and transfer their referents to corresponding safepoints (phase #2). + * When safepoints are pruned, corresponding reachability edges also go away. + * + * (c) Unfortunately, it's not straightforward to stay with safepoint-attached representation till the very end, + * because information about derived oops is attached to safepoints in a similar way. So, for now RFs are + * rematerialized at safepoints before RA (phase #3). + */ + +bool ReachabilityFenceNode::is_redundant(PhaseGVN& gvn) { + const Type* referent_t = gvn.type(referent()); + if (referent_t == TypePtr::NULL_PTR) { + return true; // no-op fence: null referent + } + if (!OptimizeReachabilityFences) { + return false; // keep reachability fence nodes intact + } + if (!PreserveReachabilityFencesOnConstants && referent_t->singleton()) { + return true; // no-op fence: constants are strongly reachable + } + return false; +} + +Node* ReachabilityFenceNode::Ideal(PhaseGVN* phase, bool can_reshape) { + return (remove_dead_region(phase, can_reshape) ? this : nullptr); +} + +Node* ReachabilityFenceNode::Identity(PhaseGVN* phase) { + if (is_redundant(*phase)) { + return in(0); + } + return this; +} + +// Turn the RF node into a no-op by setting its referent to null. +// Subsequent IGVN pass removes cleared nodes. +bool ReachabilityFenceNode::clear_referent(PhaseIterGVN& phase) { + if (phase.type(referent()) == TypePtr::NULL_PTR) { + return false; + } else { + phase.replace_input_of(this, 1, phase.makecon(TypePtr::NULL_PTR)); + return true; + } +} + +#ifndef PRODUCT +static void rf_desc(outputStream* st, const ReachabilityFenceNode* rf, PhaseRegAlloc* ra) { + char buf[50]; + ra->dump_register(rf->referent(), buf, sizeof(buf)); + st->print("reachability fence [%s]", buf); +} + +void ReachabilityFenceNode::format(PhaseRegAlloc* ra, outputStream* st) const { + rf_desc(st, this, ra); +} + +void ReachabilityFenceNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra) const { + ResourceMark rm; + stringStream ss; + rf_desc(&ss, this, ra); + const char* desc = masm->code_string(ss.freeze()); + masm->block_comment(desc); +} +#endif + +// Detect safepoint nodes which are important for reachability tracking purposes. +// Some SafePoint nodes can't affect referent's reachability in any noticeable way and +// can be safely ignored during the analysis. +static bool is_interfering_sfpt_candidate(SafePointNode* sfpt) { + if (sfpt->jvms() == nullptr) { + return false; // not a real safepoint + } else if (sfpt->is_CallStaticJava() && sfpt->as_CallStaticJava()->is_uncommon_trap()) { + return false; // uncommon traps are exit points + } + return true; // a full-blown safepoint can interfere with a reachability fence and its referent +} + +void PhaseIdealLoop::insert_rf(Node* ctrl, Node* referent) { + IdealLoopTree* lpt = get_loop(ctrl); + Node* ctrl_end = ctrl->unique_ctrl_out(); + + auto new_rf = new ReachabilityFenceNode(C, ctrl, referent); + + register_control(new_rf, lpt, ctrl); + set_idom(new_rf, ctrl, dom_depth(ctrl) + 1); + lpt->register_reachability_fence(new_rf); + + igvn().rehash_node_delayed(ctrl_end); + ctrl_end->replace_edge(ctrl, new_rf); + + if (idom(ctrl_end) == ctrl) { + set_idom(ctrl_end, new_rf, dom_depth(new_rf) + 1); + } else { + assert(ctrl_end->is_Region(), ""); + } +} + +void PhaseIdealLoop::replace_rf(Node* old_node, Node* new_node) { + assert(old_node->is_ReachabilityFence() || + (old_node->is_Proj() && old_node->in(0)->is_ReachabilityFence()), + "%s", NodeClassNames[old_node->Opcode()]); + + IdealLoopTree* lpt = get_loop(old_node); + lpt->_body.yank(old_node); + assert(lpt->_reachability_fences != nullptr, "missing"); + assert(lpt->_reachability_fences->contains(old_node), "missing"); + lpt->_reachability_fences->yank(old_node); + replace_node_and_forward_ctrl(old_node, new_node); +} + +void PhaseIdealLoop::remove_rf(ReachabilityFenceNode* rf) { + Node* rf_ctrl_in = rf->in(0); + Node* referent = rf->referent(); + if (rf->clear_referent(igvn()) && referent->outcnt() == 0) { + remove_dead_data_node(referent); + } + replace_rf(rf, rf_ctrl_in); +} + +//====================================================================== +//---------------------------- Phase 1 --------------------------------- +// Optimization pass over reachability fences during loop opts. +// Moves RFs with loop-invariant referents out of the loop. +bool PhaseIdealLoop::optimize_reachability_fences() { + Compile::TracePhase tp(_t_reachability_optimize); + + assert(OptimizeReachabilityFences, "required"); + + // ResourceMark rm; // NB! not safe because insert_rf may trigger _idom reallocation + Unique_Node_List redundant_rfs; + typedef Pair LoopExit; + GrowableArray worklist; + + for (int i = 0; i < C->reachability_fences_count(); i++) { + ReachabilityFenceNode* rf = C->reachability_fence(i); + assert(!rf->is_redundant(igvn()), "required"); + // Move RFs out of counted loops when possible. + IdealLoopTree* lpt = get_loop(rf); + Node* referent = rf->referent(); + if (lpt->is_invariant(referent)) { + IfFalseNode* unique_loop_exit = lpt->unique_loop_exit_proj_or_null(); + if (unique_loop_exit != nullptr) { + // Skip over an outer strip-mined loop. + if (!lpt->is_root()) { + IdealLoopTree* outer_lpt = lpt->_parent; + if (outer_lpt->head()->is_OuterStripMinedLoop()) { + if (outer_lpt->is_invariant(referent)) { + IfNode* outer_loop_end = outer_lpt->head()->as_OuterStripMinedLoop()->outer_loop_end(); + if (outer_loop_end != nullptr && outer_loop_end->false_proj_or_null() != nullptr) { + unique_loop_exit = outer_loop_end->false_proj_or_null(); + } + } else { + // An attempt to insert an RF node inside outer strip-mined loop breaks + // its IR invariants and manifests as assertion failures. + assert(false, "not loop invariant in outer strip-mined loop"); + continue; // skip + } + } + } + + LoopExit p(referent, unique_loop_exit); + worklist.push(p); + redundant_rfs.push(rf); + +#ifndef PRODUCT + if (TraceLoopOpts) { + IdealLoopTree* loop = get_loop(unique_loop_exit->in(0)); + tty->print_cr("ReachabilityFence: N%d: %s N%d/N%d -> loop exit N%d (%s N%d/N%d)", + rf->_idx, lpt->head()->Name(), lpt->head()->_idx, lpt->tail()->_idx, + unique_loop_exit->_idx, + loop->head()->Name(), loop->head()->_idx, loop->tail()->_idx); + } +#endif // !PRODUCT + } + } + } + + // Populate RFs outside loops. + while (worklist.is_nonempty()) { + LoopExit p = worklist.pop(); + Node* referent = p.first; + Node* ctrl_out = p.second; + insert_rf(ctrl_out, referent); + } + + // Eliminate redundant RFs. + bool progress = (redundant_rfs.size() > 0); + while (redundant_rfs.size() > 0) { + remove_rf(redundant_rfs.pop()->as_ReachabilityFence()); + } + + return progress; +} + +//====================================================================== +//---------------------------- Phase 2 --------------------------------- + +// Linearly traverse CFG upwards starting at ctrl_start until first merge point. +// All encountered safepoints are recorded in safepoints list, except +// the ones filtered out by is_interfering_sfpt_candidate(). +static void enumerate_interfering_sfpts_linear_traversal(Node* ctrl_start, Node_Stack& stack, VectorSet& visited, + Node_List& interfering_sfpts) { + for (Node* ctrl = ctrl_start; ctrl != nullptr; ctrl = ctrl->in(0)) { + assert(ctrl->is_CFG(), ""); + if (visited.test_set(ctrl->_idx)) { + return; + } else { + if (ctrl->is_Region()) { + stack.push(ctrl, 1); + return; // stop at merge points + } else if (ctrl->is_SafePoint() && is_interfering_sfpt_candidate(ctrl->as_SafePoint())) { + assert(!ctrl->is_CallStaticJava() || !ctrl->as_CallStaticJava()->is_uncommon_trap(), + "uncommon traps should not be enumerated"); + interfering_sfpts.push(ctrl); + } + } + } +} + +// Enumerate all safepoints which are reachable from the RF to its referent through CFG. +// Start at RF node and traverse CFG upwards until referent's control node is reached. +static void enumerate_interfering_sfpts(ReachabilityFenceNode* rf, PhaseIdealLoop* phase, + Node_Stack& stack, VectorSet& visited, + Node_List& interfering_sfpts) { + assert(stack.is_empty(), "required"); + assert(visited.is_empty(), "required"); + + Node* referent = rf->referent(); + Node* referent_ctrl = phase->get_ctrl(referent); + assert(phase->is_dominator(referent_ctrl, rf), "sanity"); + + visited.set(referent_ctrl->_idx); // end point + enumerate_interfering_sfpts_linear_traversal(rf, stack, visited, interfering_sfpts); // starting point in CFG + while (stack.is_nonempty()) { + Node* cur = stack.node(); + uint idx = stack.index(); + + assert(cur != nullptr, ""); + assert(cur->is_Region(), "%s", NodeClassNames[cur->Opcode()]); + assert(phase->is_dominator(referent_ctrl, cur), ""); + assert(idx > 0 && idx <= cur->req(), "%d %d", idx, cur->req()); + + if (idx < cur->req()) { + stack.set_index(idx + 1); + enumerate_interfering_sfpts_linear_traversal(cur->in(idx), stack, visited, interfering_sfpts); + } else { + stack.pop(); + } + } + // Reset temporary structures to their initial state. + assert(stack.is_empty(), "required"); + visited.clear(); +} + +// Start offset for reachability info on a safepoint node. +static uint rf_base_offset(SafePointNode* sfpt) { + return sfpt->jvms()->debug_end(); +} + +static bool dominates_another_rf(ReachabilityFenceNode* rf, PhaseIdealLoop* phase) { + assert(!rf->is_redundant(phase->igvn()), ""); + + for (int i = 0; i < phase->C->reachability_fences_count(); i++) { + ReachabilityFenceNode* other_rf = phase->C->reachability_fence(i); + assert(other_rf->outcnt() > 0, "dead node"); + if (rf != other_rf && rf->referent()->eqv_uncast(other_rf->referent()) && + phase->is_dominator(rf, other_rf)) { + return true; // dominates another reachability fence with the same referent + } + } + return false; +} + +// Phase 2: migrate reachability info to safepoints. +// All RFs are replaced with edges from corresponding referents to interfering safepoints. +// Interfering safepoints are safepoint nodes which are reachable from the RF to its referent through CFG. +bool PhaseIdealLoop::expand_reachability_fences() { + Compile::TracePhase tp(_t_reachability_expand); + + assert(OptimizeReachabilityFences, "required"); + assert(C->post_loop_opts_phase(), "required"); + DEBUG_ONLY( int no_of_constant_rfs = 0; ) + + ResourceMark rm; + Unique_Node_List for_removal; + typedef Pair ReachabilityEdge; + GrowableArray reachability_edges; + { + // Reuse temporary structures to avoid allocating them for every single RF node. + Node_List sfpt_worklist; + Node_Stack stack(0); + VectorSet visited; + + for (int i = 0; i < C->reachability_fences_count(); i++) { + ReachabilityFenceNode* rf = C->reachability_fence(i); + assert(!rf->is_redundant(igvn()), "missed"); + if (PreserveReachabilityFencesOnConstants) { + const Type* referent_t = igvn().type(rf->referent()); + assert(referent_t != TypePtr::NULL_PTR, "redundant rf"); + bool is_constant_rf = referent_t->singleton(); + if (is_constant_rf) { + DEBUG_ONLY( no_of_constant_rfs += 1; ) + continue; // leave RFs on constants intact + } + } + if (dominates_another_rf(rf, this)) { + // Redundant fence: dominates another RF with the same referent. + // RF is redundant for some referent oop when the referent has another RF which + // keeps it alive across the RF. In terms of dominance relation it can be formulated + // as "a referent has another RF which is dominated by the redundant RF". + // + // NB! It is safe to assume that dominating RF is redundant only during expansion. + // Otherwise, there's a chance for the superseding RF node to go away before expansion. + // Non-RF users are ignored for a similar reason: they can go away before or after expansion + // takes place, so no guarantees reachability information is preserved. + } else { + assert(sfpt_worklist.size() == 0, "not empty"); + enumerate_interfering_sfpts(rf, this, stack, visited, sfpt_worklist); + + Node* referent = rf->referent(); + while (sfpt_worklist.size() > 0) { + SafePointNode* sfpt = sfpt_worklist.pop()->as_SafePoint(); + assert(is_dominator(get_ctrl(referent), sfpt), ""); + assert(sfpt->req() == rf_base_offset(sfpt), "no extra edges allowed"); + if (sfpt->find_edge(referent) == -1) { + ReachabilityEdge p(sfpt, referent); + reachability_edges.push(p); + } + } + } + for_removal.push(rf); + } + } + // Materialize reachability edges. + while (reachability_edges.length() > 0) { + ReachabilityEdge p = reachability_edges.pop(); + SafePointNode* sfpt = p.first; + Node* referent = p.second; + if (sfpt->find_edge(referent) == -1) { + sfpt->add_req(referent); + igvn()._worklist.push(sfpt); + } + } + // Eliminate processed RFs. They become redundant once reachability edges are added. + bool progress = (for_removal.size() > 0); + while (for_removal.size() > 0) { + remove_rf(for_removal.pop()->as_ReachabilityFence()); + } + + assert(C->reachability_fences_count() == no_of_constant_rfs, ""); + return progress; +} + +//====================================================================== +//---------------------------- Phase 3 --------------------------------- + +// Find a point in CFG right after safepoint node to insert reachability fence. +static Node* sfpt_ctrl_out(SafePointNode* sfpt) { + if (sfpt->is_Call()) { + CallProjections callprojs; + sfpt->as_Call()->extract_projections(&callprojs, + false /*separate_io_proj*/, + false /*do_asserts*/, + true /*allow_handlers*/); + // Calls can have multiple control projections. However, reachability edge expansion + // happens during final graph reshaping which is performed very late in compilation pipeline. + // The assumption here is that the control path chosen for insertion can't go away. + // So, materializing a reachability fence on a single control path produced by a call + // is enough to keep the referent oop alive across the call. + if (callprojs.fallthrough_catchproj != nullptr) { + return callprojs.fallthrough_catchproj; + } else if (callprojs.catchall_catchproj != nullptr) { + return callprojs.catchall_catchproj; // rethrow stub + } else if (callprojs.fallthrough_proj != nullptr) { + return callprojs.fallthrough_proj; // no exceptions thrown + } else { + ShouldNotReachHere(); + } + } else { + return sfpt; + } +} + +// Phase 3: materialize reachability fences from reachability edges on safepoints. +// Turn extra safepoint edges into reachability fences immediately following the safepoint. +// +// NB! As of now, a special care is needed to properly enumerate reachability edges because +// there are other use cases for non-debug safepoint edges. expand_reachability_edges() runs +// after macro expansion where runtime calls during array allocation are annotated with +// valid_length_test_input as an extra edges. Until there's a mechanism to distinguish between +// different types of non-debug edges, unrelated cases are filtered out explicitly and in ad-hoc manner. +void Compile::expand_reachability_edges(Unique_Node_List& safepoints) { + for (uint i = 0; i < safepoints.size(); i++) { + SafePointNode* sfpt = safepoints.at(i)->as_SafePoint(); + + uint rf_offset = rf_base_offset(sfpt); + if (sfpt->jvms() != nullptr && sfpt->req() > rf_offset) { + assert(is_interfering_sfpt_candidate(sfpt), ""); + Node* ctrl_out = sfpt_ctrl_out(sfpt); + Node* ctrl_end = ctrl_out->unique_ctrl_out(); + + Node* extra_edge = nullptr; + if (sfpt->is_Call()) { + address entry = sfpt->as_Call()->entry_point(); + if (entry == OptoRuntime::new_array_Java() || + entry == OptoRuntime::new_array_nozero_Java()) { + // valid_length_test_input is appended during macro expansion at the very end + int last_idx = sfpt->req() - 1; + extra_edge = sfpt->in(last_idx); + sfpt->del_req(last_idx); + } + } + + while (sfpt->req() > rf_offset) { + int idx = sfpt->req() - 1; + Node* referent = sfpt->in(idx); + sfpt->del_req(idx); + + Node* new_rf = new ReachabilityFenceNode(C, ctrl_out, referent); + ctrl_end->replace_edge(ctrl_out, new_rf); + ctrl_end = new_rf; + } + + if (extra_edge != nullptr) { + sfpt->add_req(extra_edge); // Add valid_length_test_input edge back + } + } + } +} diff --git a/src/hotspot/share/opto/reachability.hpp b/src/hotspot/share/opto/reachability.hpp new file mode 100644 index 00000000000..ba435c8484f --- /dev/null +++ b/src/hotspot/share/opto/reachability.hpp @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#ifndef SHARE_OPTO_REACHABILITY_HPP +#define SHARE_OPTO_REACHABILITY_HPP + +#include "opto/multnode.hpp" +#include "opto/node.hpp" +#include "opto/opcodes.hpp" +#include "opto/type.hpp" + +//------------------------ReachabilityFenceNode-------------------------- +// Represents a Reachability Fence (RF) in the code. +// +// RF ensures that the given object (referent) remains strongly reachable regardless of +// any optimizing transformations the virtual machine may perform that might otherwise +// allow the object to become unreachable. + +// java.lang.ref.Reference::reachabilityFence calls are intrinsified into ReachabilityFence nodes. +// +// More details in reachability.cpp. +class ReachabilityFenceNode : public Node { +public: + ReachabilityFenceNode(Compile* C, Node* ctrl, Node* referent) + : Node(1) { + assert(referent->bottom_type()->isa_oopptr() || + referent->bottom_type()->isa_narrowoop() != nullptr || + referent->bottom_type() == TypePtr::NULL_PTR, + "%s", Type::str(referent->bottom_type())); + init_class_id(Class_ReachabilityFence); + init_req(TypeFunc::Control, ctrl); + add_req(referent); + C->add_reachability_fence(this); + } + virtual int Opcode() const; + virtual bool is_CFG() const { return true; } + virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash + virtual bool depends_only_on_test() const { return false; }; + virtual uint ideal_reg() const { return 0; } // not matched in the AD file + virtual const Type* bottom_type() const { return Type::CONTROL; } + virtual const RegMask& in_RegMask(uint idx) const { + // Fake input register mask for the referent: accepts all registers and all stack slots. + // This avoids redundant register moves around reachability fences. + return RegMask::ALL; + } + virtual const RegMask& out_RegMask() const { + return RegMask::EMPTY; + } + + virtual Node* Ideal(PhaseGVN* phase, bool can_reshape); + virtual Node* Identity(PhaseGVN* phase); + + Node* referent() const { return in(1); } + bool is_redundant(PhaseGVN& gvn); + bool clear_referent(PhaseIterGVN& phase); + +#ifndef PRODUCT + virtual void format(PhaseRegAlloc* ra, outputStream* st) const; + virtual void emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra) const; +#endif +}; + +#endif // SHARE_OPTO_REACHABILITY_HPP diff --git a/src/hotspot/share/opto/runtime.cpp b/src/hotspot/share/opto/runtime.cpp index 0d2dbb813bd..c01e8578e43 100644 --- a/src/hotspot/share/opto/runtime.cpp +++ b/src/hotspot/share/opto/runtime.cpp @@ -24,6 +24,7 @@ #include "classfile/vmClasses.hpp" #include "classfile/vmSymbols.hpp" +#include "code/aotCodeCache.hpp" #include "code/codeCache.hpp" #include "code/compiledIC.hpp" #include "code/nmethod.hpp" @@ -154,7 +155,8 @@ static bool check_compiled_frame(JavaThread* thread) { bool OptoRuntime::generate(ciEnv* env) { C2_STUBS_DO(GEN_C2_BLOB, GEN_C2_STUB) - + // disallow any further c2 stub generation + AOTCodeCache::set_c2_stubs_complete(); return true; } diff --git a/src/hotspot/share/opto/split_if.cpp b/src/hotspot/share/opto/split_if.cpp index dff8bf86606..e5f8043ae19 100644 --- a/src/hotspot/share/opto/split_if.cpp +++ b/src/hotspot/share/opto/split_if.cpp @@ -38,6 +38,11 @@ RegionNode* PhaseIdealLoop::split_thru_region(Node* n, RegionNode* region) { assert(n->is_CFG(), ""); RegionNode* r = new RegionNode(region->req()); IdealLoopTree* loop = get_loop(n); +#ifndef PRODUCT + if (TraceSplitIf) { + tty->print_cr(" Splitting %d %s through %d %s", n->_idx, n->Name(), region->_idx, region->Name()); + } +#endif for (uint i = 1; i < region->req(); i++) { Node* x = n->clone(); Node* in0 = n->in(0); @@ -80,7 +85,7 @@ bool PhaseIdealLoop::split_up( Node *n, Node *blk1, Node *blk2 ) { if( split_up( n->in(i), blk1, blk2 ) ) { // Got split recursively and self went dead? if (n->outcnt() == 0) - _igvn.remove_dead_node(n); + _igvn.remove_dead_node(n, PhaseIterGVN::NodeOrigin::Graph); return true; } } @@ -145,6 +150,11 @@ bool PhaseIdealLoop::split_up( Node *n, Node *blk1, Node *blk2 ) { } // Now actually split-up this guy. One copy per control path merging. +#ifndef PRODUCT + if (TraceSplitIf) { + tty->print_cr(" Splitting up: %d %s", n->_idx, n->Name()); + } +#endif Node *phi = PhiNode::make_blank(blk1, n); for( uint j = 1; j < blk1->req(); j++ ) { Node *x = n->clone(); @@ -185,6 +195,11 @@ bool PhaseIdealLoop::split_up( Node *n, Node *blk1, Node *blk2 ) { // AddP and CheckCastPP have the same obj input after split if. bool PhaseIdealLoop::clone_cmp_loadklass_down(Node* n, const Node* blk1, const Node* blk2) { if (n->Opcode() == Op_AddP && at_relevant_ctrl(n, blk1, blk2)) { +#ifndef PRODUCT + if (TraceSplitIf) { + tty->print_cr(" Cloning down (LoadKlass): %d %s", n->_idx, n->Name()); + } +#endif Node_List cmp_nodes; uint old = C->unique(); for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { @@ -258,7 +273,7 @@ void PhaseIdealLoop::clone_loadklass_nodes_at_cmp_index(const Node* n, Node* cmp _igvn.replace_input_of(decode_clone, 1, loadklass_clone); _igvn.replace_input_of(loadklass_clone, MemNode::Address, addp_clone); if (decode->outcnt() == 0) { - _igvn.remove_dead_node(decode); + _igvn.remove_dead_node(decode, PhaseIterGVN::NodeOrigin::Graph); } } } @@ -275,7 +290,7 @@ void PhaseIdealLoop::clone_loadklass_nodes_at_cmp_index(const Node* n, Node* cmp _igvn.replace_input_of(cmp, i, loadklass_clone); _igvn.replace_input_of(loadklass_clone, MemNode::Address, addp_clone); if (loadklass->outcnt() == 0) { - _igvn.remove_dead_node(loadklass); + _igvn.remove_dead_node(loadklass, PhaseIterGVN::NodeOrigin::Graph); } } } @@ -301,6 +316,11 @@ bool PhaseIdealLoop::clone_cmp_down(Node* n, const Node* blk1, const Node* blk2) at_relevant_ctrl(cmov, blk1, blk2)))) { // Must clone down +#ifndef PRODUCT + if (TraceSplitIf) { + tty->print_cr(" Cloning down (Cmp): %d %s", n->_idx, n->Name()); + } +#endif if (!n->is_FastLock()) { // Clone down any block-local BoolNode uses of this CmpNode for (DUIterator i = n->outs(); n->has_out(i); i++) { @@ -349,7 +369,7 @@ bool PhaseIdealLoop::clone_cmp_down(Node* n, const Node* blk1, const Node* blk2) _igvn.replace_input_of(x2, 1, x1); _igvn.replace_input_of(iff, 1, x2); } - _igvn.remove_dead_node(u); + _igvn.remove_dead_node(u, PhaseIterGVN::NodeOrigin::Graph); --j; } else { // We might see an Opaque1 from a loop limit check here @@ -365,7 +385,7 @@ bool PhaseIdealLoop::clone_cmp_down(Node* n, const Node* blk1, const Node* blk2) --j; } } - _igvn.remove_dead_node(bol); + _igvn.remove_dead_node(bol, PhaseIterGVN::NodeOrigin::Graph); --i; } } @@ -383,7 +403,7 @@ bool PhaseIdealLoop::clone_cmp_down(Node* n, const Node* blk1, const Node* blk2) register_new_node(x, ctrl_or_self(use)); _igvn.replace_input_of(use, pos, x); } - _igvn.remove_dead_node(n); + _igvn.remove_dead_node(n, PhaseIterGVN::NodeOrigin::Graph); return true; } @@ -401,6 +421,12 @@ void PhaseIdealLoop::clone_template_assertion_expression_down(Node* node) { return; } +#ifndef PRODUCT + if (TraceSplitIf) { + tty->print_cr(" Cloning down (Template Assertion Expression): %d %s", node->_idx, node->Name()); + } +#endif + TemplateAssertionExpressionNode template_assertion_expression_node(node); auto clone_expression = [&](IfNode* template_assertion_predicate) { OpaqueTemplateAssertionPredicateNode* opaque_node = @@ -459,6 +485,11 @@ Node *PhaseIdealLoop::spinup( Node *iff_dom, Node *new_false, Node *new_true, No Node *phi_post; if( prior_n == new_false || prior_n == new_true ) { phi_post = def->clone(); +#ifndef PRODUCT + if (TraceSplitIf) { + tty->print_cr(" Spinup: cloning def to sink: %d %s -> %d %s", def->_idx, def->Name(), phi_post->_idx, phi_post->Name()); + } +#endif phi_post->set_req(0, prior_n ); register_new_node(phi_post, prior_n); } else { @@ -472,6 +503,11 @@ Node *PhaseIdealLoop::spinup( Node *iff_dom, Node *new_false, Node *new_true, No } else { assert( def->is_Phi(), "" ); assert( prior_n->is_Region(), "must be a post-dominating merge point" ); +#ifndef PRODUCT + if (TraceSplitIf) { + tty->print_cr(" Spinup: creating new Phi for merge: %d %s", def->_idx, def->Name()); + } +#endif // Need a Phi here phi_post = PhiNode::make_blank(prior_n, def); @@ -481,7 +517,7 @@ Node *PhaseIdealLoop::spinup( Node *iff_dom, Node *new_false, Node *new_true, No Node *t = _igvn.hash_find_insert(phi_post); if( t ) { // See if we already have this one // phi_post will not be used, so kill it - _igvn.remove_dead_node(phi_post); + _igvn.remove_dead_node(phi_post, PhaseIterGVN::NodeOrigin::Speculative); phi_post->destruct(&_igvn); phi_post = t; } else { @@ -611,7 +647,7 @@ void PhaseIdealLoop::do_split_if(Node* iff, RegionNode** new_false_region, Regio Node* m = n->out(j); // If m is dead, throw it away, and declare progress if (_loop_or_ctrl[m->_idx] == nullptr) { - _igvn.remove_dead_node(m); + _igvn.remove_dead_node(m, PhaseIterGVN::NodeOrigin::Graph); // fall through } else if (m != iff && split_up(m, region, iff)) { @@ -668,7 +704,10 @@ void PhaseIdealLoop::do_split_if(Node* iff, RegionNode** new_false_region, Regio new_true = ifpx; } } - _igvn.remove_dead_node(new_iff); + assert(new_false != nullptr, "iff is malformed"); + assert(new_true != nullptr, "iff is malformed"); + + _igvn.remove_dead_node(new_iff, PhaseIterGVN::NodeOrigin::Speculative); // Lazy replace IDOM info with the region's dominator replace_node_and_forward_ctrl(iff, region_dom); // Break the self-cycle. Required for forward_ctrl to work on region. @@ -684,7 +723,7 @@ void PhaseIdealLoop::do_split_if(Node* iff, RegionNode** new_false_region, Regio for (DUIterator k = region->outs(); region->has_out(k); k++) { Node* phi = region->out(k); if (!phi->in(0)) { // Dead phi? Remove it - _igvn.remove_dead_node(phi); + _igvn.remove_dead_node(phi, PhaseIterGVN::NodeOrigin::Graph); } else if (phi == region) { // Found the self-reference continue; // No roll-back of DUIterator } else if (phi->is_Phi()) { // Expected common case: Phi hanging off of Region @@ -703,7 +742,7 @@ void PhaseIdealLoop::do_split_if(Node* iff, RegionNode** new_false_region, Regio handle_use(use, phi, &phi_cache, region_dom, new_false, new_true, old_false, old_true); } // End of while phi has uses // Remove the dead Phi - _igvn.remove_dead_node( phi ); + _igvn.remove_dead_node(phi, PhaseIterGVN::NodeOrigin::Graph); } else { assert(phi->in(0) == region, "Inconsistent graph"); // Random memory op guarded by Region. Compute new DEF for USE. @@ -716,7 +755,7 @@ void PhaseIdealLoop::do_split_if(Node* iff, RegionNode** new_false_region, Regio --k; } // End of while merge point has phis - _igvn.remove_dead_node(region); + _igvn.remove_dead_node(region, PhaseIterGVN::NodeOrigin::Graph); // Control is updated here to a region, which is not a test, so any node that // depends_only_on_test must be pinned diff --git a/src/hotspot/share/opto/subtypenode.cpp b/src/hotspot/share/opto/subtypenode.cpp index 8e4c7d829a7..317b839fbd4 100644 --- a/src/hotspot/share/opto/subtypenode.cpp +++ b/src/hotspot/share/opto/subtypenode.cpp @@ -1,5 +1,6 @@ /* - * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -177,12 +178,12 @@ bool SubTypeCheckNode::verify(PhaseGVN* phase) { return true; } const Type* cached_t = Value(phase); // cache the type to validate consistency - switch (C->static_subtype_check(superk, subk)) { + switch (C->static_subtype_check(superk, subk, false)) { case Compile::SSC_easy_test: { return verify_helper(phase, load_klass(phase), cached_t); } case Compile::SSC_full_test: { - Node* p1 = phase->transform(new AddPNode(C->top(), superklass, phase->MakeConX(in_bytes(Klass::super_check_offset_offset())))); + Node* p1 = phase->transform(AddPNode::make_off_heap(superklass, phase->MakeConX(in_bytes(Klass::super_check_offset_offset())))); Node* chk_off = phase->transform(new LoadINode(nullptr, C->immutable_memory(), p1, phase->type(p1)->is_ptr(), TypeInt::INT, MemNode::unordered)); record_for_cleanup(chk_off, phase); @@ -194,7 +195,7 @@ bool SubTypeCheckNode::verify(PhaseGVN* phase) { #ifdef _LP64 chk_off_X = phase->transform(new ConvI2LNode(chk_off_X)); #endif - Node* p2 = phase->transform(new AddPNode(C->top(), subklass, chk_off_X)); + Node* p2 = phase->transform(AddPNode::make_off_heap(subklass, chk_off_X)); Node* nkls = phase->transform(LoadKlassNode::make(*phase, C->immutable_memory(), p2, phase->type(p2)->is_ptr(), TypeInstKlassPtr::OBJECT_OR_NULL)); return verify_helper(phase, nkls, cached_t); @@ -217,7 +218,7 @@ Node* SubTypeCheckNode::load_klass(PhaseGVN* phase) const { const Type* sub_t = phase->type(obj_or_subklass); Node* subklass = nullptr; if (sub_t->isa_oopptr()) { - Node* adr = phase->transform(new AddPNode(obj_or_subklass, obj_or_subklass, phase->MakeConX(oopDesc::klass_offset_in_bytes()))); + Node* adr = phase->transform(AddPNode::make_with_base(obj_or_subklass, phase->MakeConX(oopDesc::klass_offset_in_bytes()))); subklass = phase->transform(LoadKlassNode::make(*phase, phase->C->immutable_memory(), adr, TypeInstPtr::KLASS)); record_for_cleanup(subklass, phase); } else { diff --git a/src/hotspot/share/opto/superword.cpp b/src/hotspot/share/opto/superword.cpp index d878b2b1d3d..53845a94c1c 100644 --- a/src/hotspot/share/opto/superword.cpp +++ b/src/hotspot/share/opto/superword.cpp @@ -2500,7 +2500,9 @@ static bool can_subword_truncate(Node* in, const Type* type) { switch (opc) { case Op_AbsI: case Op_DivI: + case Op_UDivI: case Op_ModI: + case Op_UModI: case Op_MinI: case Op_MaxI: case Op_CMoveI: diff --git a/src/hotspot/share/opto/type.cpp b/src/hotspot/share/opto/type.cpp index 1a0872ee0e6..308ec819773 100644 --- a/src/hotspot/share/opto/type.cpp +++ b/src/hotspot/share/opto/type.cpp @@ -49,6 +49,9 @@ #include "utilities/ostream.hpp" #include "utilities/powerOfTwo.hpp" #include "utilities/stringUtils.hpp" +#if INCLUDE_SHENANDOAHGC +#include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp" +#endif // INCLUDE_SHENANDOAHGC // Portions of code courtesy of Clifford Click @@ -732,6 +735,11 @@ void Type::Initialize_shared(Compile* current) { mreg2type[Op_VecY] = TypeVect::VECTY; mreg2type[Op_VecZ] = TypeVect::VECTZ; +#if INCLUDE_SHENANDOAHGC + ShenandoahBarrierSetC2::init(); +#endif //INCLUDE_SHENANDOAHGC + + BarrierSetC2::make_clone_type(); LockNode::initialize_lock_Type(); ArrayCopyNode::initialize_arraycopy_Type(); OptoRuntime::initialize_types(); @@ -3481,7 +3489,7 @@ TypeOopPtr::TypeOopPtr(TYPES t, PTR ptr, ciKlass* k, const TypeInterfaces* inter #ifdef _LP64 if (_offset > 0 || _offset == Type::OffsetTop || _offset == Type::OffsetBot) { if (_offset == oopDesc::klass_offset_in_bytes()) { - _is_ptr_to_narrowklass = UseCompressedClassPointers; + _is_ptr_to_narrowklass = true; } else if (klass() == nullptr) { // Array with unknown body type assert(this->isa_aryptr(), "only arrays without klass"); diff --git a/src/hotspot/share/opto/vector.cpp b/src/hotspot/share/opto/vector.cpp index f44df7e6da2..d35717c5922 100644 --- a/src/hotspot/share/opto/vector.cpp +++ b/src/hotspot/share/opto/vector.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -455,14 +455,12 @@ void PhaseVector::expand_vunbox_node(VectorUnboxNode* vec_unbox) { gvn.record_for_igvn(local_mem); BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); C2OptAccess access(gvn, ctrl, local_mem, decorators, T_OBJECT, obj, addr); - const Type* type = TypeOopPtr::make_from_klass(field->type()->as_klass()); - vec_field_ld = bs->load_at(access, type); - } - // For proper aliasing, attach concrete payload type. - ciKlass* payload_klass = ciTypeArrayKlass::make(bt); - const Type* payload_type = TypeAryPtr::make_from_klass(payload_klass)->cast_to_ptr_type(TypePtr::NotNull); - vec_field_ld = gvn.transform(new CastPPNode(nullptr, vec_field_ld, payload_type)); + // For proper aliasing, attach concrete payload type. + ciKlass* payload_klass = ciTypeArrayKlass::make(bt); + const Type* payload_type = TypeAryPtr::make_from_klass(payload_klass)->cast_to_ptr_type(TypePtr::NotNull); + vec_field_ld = bs->load_at(access, payload_type); + } Node* adr = kit.array_element_address(vec_field_ld, gvn.intcon(0), bt); const TypePtr* adr_type = adr->bottom_type()->is_ptr(); diff --git a/src/hotspot/share/opto/vectornode.cpp b/src/hotspot/share/opto/vectornode.cpp index d332bf440f6..d19aa476196 100644 --- a/src/hotspot/share/opto/vectornode.cpp +++ b/src/hotspot/share/opto/vectornode.cpp @@ -1047,6 +1047,20 @@ Node* VectorNode::Ideal(PhaseGVN* phase, bool can_reshape) { return nullptr; } +// Traverses a chain of VectorMaskCast and returns the first non VectorMaskCast node. +// +// Due to the unique nature of vector masks, for specific IR patterns, +// VectorMaskCast does not affect the output results. For example: +// (VectorStoreMask (VectorMaskCast* (VectorLoadMask x))) => (x) +// x remains to be a bool vector with no changes. +// This function can be used to eliminate the VectorMaskCast in such patterns. +Node* VectorNode::uncast_mask(Node* n) { + while (n->Opcode() == Op_VectorMaskCast) { + n = n->in(1); + } + return n; +} + // Return initial Pack node. Additional operands added with add_opd() calls. PackNode* PackNode::make(Node* s, uint vlen, BasicType bt) { const TypeVect* vt = TypeVect::make(bt, vlen); @@ -1246,6 +1260,10 @@ int ReductionNode::opcode(int opc, BasicType bt) { assert(bt == T_LONG, "must be"); vopc = Op_AddReductionVL; break; + case Op_AddHF: + assert(bt == T_SHORT, "must be"); + vopc = Op_AddReductionVHF; + break; case Op_AddF: assert(bt == T_FLOAT, "must be"); vopc = Op_AddReductionVF; @@ -1270,6 +1288,10 @@ int ReductionNode::opcode(int opc, BasicType bt) { assert(bt == T_LONG, "must be"); vopc = Op_MulReductionVL; break; + case Op_MulHF: + assert(bt == T_SHORT, "must be"); + vopc = Op_MulReductionVHF; + break; case Op_MulF: assert(bt == T_FLOAT, "must be"); vopc = Op_MulReductionVF; @@ -1418,10 +1440,12 @@ ReductionNode* ReductionNode::make(int opc, Node* ctrl, Node* n1, Node* n2, Basi switch (vopc) { case Op_AddReductionVI: return new AddReductionVINode(ctrl, n1, n2); case Op_AddReductionVL: return new AddReductionVLNode(ctrl, n1, n2); + case Op_AddReductionVHF: return new AddReductionVHFNode(ctrl, n1, n2, requires_strict_order); case Op_AddReductionVF: return new AddReductionVFNode(ctrl, n1, n2, requires_strict_order); case Op_AddReductionVD: return new AddReductionVDNode(ctrl, n1, n2, requires_strict_order); case Op_MulReductionVI: return new MulReductionVINode(ctrl, n1, n2); case Op_MulReductionVL: return new MulReductionVLNode(ctrl, n1, n2); + case Op_MulReductionVHF: return new MulReductionVHFNode(ctrl, n1, n2, requires_strict_order); case Op_MulReductionVF: return new MulReductionVFNode(ctrl, n1, n2, requires_strict_order); case Op_MulReductionVD: return new MulReductionVDNode(ctrl, n1, n2, requires_strict_order); case Op_MinReductionV: return new MinReductionVNode (ctrl, n1, n2); @@ -1495,10 +1519,12 @@ Node* VectorLoadMaskNode::Identity(PhaseGVN* phase) { Node* VectorStoreMaskNode::Identity(PhaseGVN* phase) { // Identity transformation on boolean vectors. - // VectorStoreMask (VectorLoadMask bv) elem_size ==> bv + // VectorStoreMask (VectorMaskCast* VectorLoadMask bv) elem_size ==> bv // vector[n]{bool} => vector[n]{t} => vector[n]{bool} - if (in(1)->Opcode() == Op_VectorLoadMask) { - return in(1)->in(1); + Node* in1 = VectorNode::uncast_mask(in(1)); + if (in1->Opcode() == Op_VectorLoadMask) { + assert(length() == in1->as_Vector()->length(), "vector length must match"); + return in1->in(1); } return this; } @@ -1597,6 +1623,8 @@ Node* ReductionNode::make_identity_con_scalar(PhaseGVN& gvn, int sopc, BasicType return nullptr; } break; + case Op_AddReductionVHF: + return gvn.makecon(TypeH::ZERO); case Op_AddReductionVI: // fallthrough case Op_AddReductionVL: // fallthrough case Op_AddReductionVF: // fallthrough @@ -1608,6 +1636,8 @@ Node* ReductionNode::make_identity_con_scalar(PhaseGVN& gvn, int sopc, BasicType return gvn.makecon(TypeInt::ONE); case Op_MulReductionVL: return gvn.makecon(TypeLong::ONE); + case Op_MulReductionVHF: + return gvn.makecon(TypeH::ONE); case Op_MulReductionVF: return gvn.makecon(TypeF::ONE); case Op_MulReductionVD: @@ -1700,12 +1730,14 @@ bool ReductionNode::auto_vectorization_requires_strict_order(int vopc) { // These are cases that all have associative operations, which can // thus be reordered, allowing non-strict order reductions. return false; + case Op_AddReductionVHF: + case Op_MulReductionVHF: case Op_AddReductionVF: case Op_MulReductionVF: case Op_AddReductionVD: case Op_MulReductionVD: // Floating-point addition and multiplication are non-associative, - // so AddReductionVF/D and MulReductionVF/D require strict ordering + // so AddReductionVHF/VF/VD and MulReductionVHF/VF/VD require strict ordering // in auto-vectorization. return true; default: @@ -1959,11 +1991,12 @@ Node* VectorMaskOpNode::Ideal(PhaseGVN* phase, bool can_reshape) { } Node* VectorMaskCastNode::Identity(PhaseGVN* phase) { - Node* in1 = in(1); - // VectorMaskCast (VectorMaskCast x) => x - if (in1->Opcode() == Op_VectorMaskCast && - vect_type()->eq(in1->in(1)->bottom_type())) { - return in1->in(1); + // (VectorMaskCast+ x) => (x) + // If the types of the input and output nodes in a VectorMaskCast chain are + // exactly the same, the intermediate VectorMaskCast nodes can be eliminated. + Node* n = VectorNode::uncast_mask(this); + if (vect_type()->eq(n->bottom_type())) { + return n; } return this; } @@ -2020,26 +2053,21 @@ Node* VectorLongToMaskNode::Ideal(PhaseGVN* phase, bool can_reshape) { uint vlen = dst_type->length(); const TypeVectMask* is_mask = dst_type->isa_vectmask(); + // Pattern: (VectorLongToMask (AndL (VectorMaskToLong src) mask)) + // Replace with: (VectorMaskCast src) + // The cast is needed if there are different mask types, and can be folded otherwise. + // The mask has exactly the vlen first bits on: mask = (2 << vlen - 1) if (in(1)->Opcode() == Op_AndL && in(1)->in(1)->Opcode() == Op_VectorMaskToLong && in(1)->in(2)->bottom_type()->isa_long() && in(1)->in(2)->bottom_type()->is_long()->is_con() && - in(1)->in(2)->bottom_type()->is_long()->get_con() == ((1L << vlen) - 1)) { - // Different src/dst mask length represents a re-interpretation operation, - // we can however generate a mask casting operation if length matches. - Node* src = in(1)->in(1)->in(1); - if (is_mask == nullptr) { - if (src->Opcode() != Op_VectorStoreMask) { - return nullptr; - } - src = src->in(1); - } - const TypeVect* src_type = src->bottom_type()->is_vect(); - if (src_type->length() == vlen && - ((src_type->isa_vectmask() == nullptr && is_mask == nullptr) || - (src_type->isa_vectmask() && is_mask))) { - return new VectorMaskCastNode(src, dst_type); - } + in(1)->in(2)->bottom_type()->is_long()->get_con() == ((1LL << vlen) - 1)) { + Node* src = in(1)->in(1)->in(1); + const TypeVect* src_type = src->bottom_type()->is_vect(); + if (src_type->length() == vlen && + ((src_type->isa_vectmask() == nullptr) == (is_mask == nullptr))) { + return new VectorMaskCastNode(src, dst_type); + } } // VectorLongToMask(-1/0) => MaskAll(-1/0) @@ -2437,67 +2465,68 @@ bool MulVLNode::has_uint_inputs() const { has_vector_elements_fit_uint(in(2)); } -static Node* UMinMaxV_Ideal(Node* n, PhaseGVN* phase, bool can_reshape) { +static Node* MinMaxV_Common_Ideal(MinMaxVNode* n, PhaseGVN* phase, bool can_reshape) { int vopc = n->Opcode(); - assert(vopc == Op_UMinV || vopc == Op_UMaxV, "Unexpected opcode"); + int min_opcode = n->min_opcode(); + int max_opcode = n->max_opcode(); - Node* umin = nullptr; - Node* umax = nullptr; + Node* min_op = nullptr; + Node* max_op = nullptr; int lopc = n->in(1)->Opcode(); int ropc = n->in(2)->Opcode(); - if (lopc == Op_UMinV && ropc == Op_UMaxV) { - umin = n->in(1); - umax = n->in(2); - } else if (lopc == Op_UMaxV && ropc == Op_UMinV) { - umin = n->in(2); - umax = n->in(1); + if (lopc == min_opcode && ropc == max_opcode) { + min_op = n->in(1); + max_op = n->in(2); + } else if (lopc == max_opcode && ropc == min_opcode) { + min_op = n->in(2); + max_op = n->in(1); } else { return nullptr; } - // UMin (UMin(a, b), UMax(a, b)) => UMin(a, b) - // UMin (UMax(a, b), UMin(b, a)) => UMin(a, b) - // UMax (UMin(a, b), UMax(a, b)) => UMax(a, b) - // UMax (UMax(a, b), UMin(b, a)) => UMax(a, b) - if (umin != nullptr && umax != nullptr) { - if ((umin->in(1) == umax->in(1) && umin->in(2) == umax->in(2)) || - (umin->in(2) == umax->in(1) && umin->in(1) == umax->in(2))) { - if (vopc == Op_UMinV) { - return new UMinVNode(umax->in(1), umax->in(2), n->bottom_type()->is_vect()); - } else { - return new UMaxVNode(umax->in(1), umax->in(2), n->bottom_type()->is_vect()); + // Min (Min(a, b), Max(a, b)) => Min(a, b) + // Min (Max(a, b), Min(b, a)) => Min(a, b) + // Max (Min(a, b), Max(a, b)) => Max(a, b) + // Max (Max(a, b), Min(b, a)) => Max(a, b) + + if (min_op != nullptr && max_op != nullptr) { + // Skip if predication status is inconsistent across n, min_op, and max_op, + // or if predicated operands carry different masks. + if (n->is_predicated_vector() != min_op->is_predicated_vector() || + min_op->is_predicated_vector() != max_op->is_predicated_vector()) { + return nullptr; + } + if (min_op->is_predicated_vector() && + !(n->in(3) == min_op->in(3) && min_op->in(3) == max_op->in(3))) { + return nullptr; + } + + if ((min_op->in(1) == max_op->in(1) && min_op->in(2) == max_op->in(2)) || + (min_op->in(2) == max_op->in(1) && min_op->in(1) == max_op->in(2))) { + // Use n->in(1) inputs for the result to preserve correct merge-masking + // passthrough: inactive lanes use in(1), so result->in(1) must equal + // n->in(1)->in(1) to maintain the original passthrough semantics. + VectorNode* result = VectorNode::make(vopc, n->in(1)->in(1), n->in(1)->in(2), n->bottom_type()->is_vect()); + if (n->is_predicated_vector()) { + result->add_req(n->in(3)); + result->add_flag(Node::Flag_is_predicated_vector); } + return result; } } return nullptr; } -Node* UMinVNode::Ideal(PhaseGVN* phase, bool can_reshape) { - Node* progress = UMinMaxV_Ideal(this, phase, can_reshape); +Node* MinMaxVNode::Ideal(PhaseGVN* phase, bool can_reshape) { + Node* progress = MinMaxV_Common_Ideal(this, phase, can_reshape); if (progress != nullptr) return progress; return VectorNode::Ideal(phase, can_reshape); } -Node* UMinVNode::Identity(PhaseGVN* phase) { - // UMin (a, a) => a - if (in(1) == in(2)) { - return in(1); - } - return this; -} - -Node* UMaxVNode::Ideal(PhaseGVN* phase, bool can_reshape) { - Node* progress = UMinMaxV_Ideal(this, phase, can_reshape); - if (progress != nullptr) return progress; - - return VectorNode::Ideal(phase, can_reshape); -} - -Node* UMaxVNode::Identity(PhaseGVN* phase) { - // UMax (a, a) => a +Node* MinMaxVNode::Identity(PhaseGVN* phase) { if (in(1) == in(2)) { return in(1); } @@ -2507,4 +2536,5 @@ Node* UMaxVNode::Identity(PhaseGVN* phase) { void VectorBoxAllocateNode::dump_spec(outputStream *st) const { CallStaticJavaNode::dump_spec(st); } + #endif // !PRODUCT diff --git a/src/hotspot/share/opto/vectornode.hpp b/src/hotspot/share/opto/vectornode.hpp index f0d010ee735..91cff9fcae8 100644 --- a/src/hotspot/share/opto/vectornode.hpp +++ b/src/hotspot/share/opto/vectornode.hpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2007, 2026, Oracle and/or its affiliates. All rights reserved. + * Copyright 2026 Arm Limited and/or its affiliates. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -195,6 +196,7 @@ class VectorNode : public TypeNode { static bool is_scalar_op_that_returns_int_but_vector_op_returns_long(int opc); static bool is_reinterpret_opcode(int opc); + static Node* uncast_mask(Node* n); static void trace_new_vector(Node* n, const char* context) { #ifdef ASSERT @@ -321,7 +323,7 @@ class ReductionNode : public Node { virtual uint size_of() const { return sizeof(*this); } // Floating-point addition and multiplication are non-associative, so - // AddReductionVF/D and MulReductionVF/D require strict ordering + // AddReductionVHF/F/D and MulReductionVHF/F/D require strict ordering // in auto-vectorization. Vector API can generate AddReductionVF/D // and MulReductionVF/VD without strict ordering, which can benefit // some platforms. @@ -358,6 +360,35 @@ public: virtual int Opcode() const; }; +// Vector add half float as a reduction +class AddReductionVHFNode : public ReductionNode { +private: + // True if add reduction operation for half floats requires strict ordering. + // As an example - The value is true when add reduction for half floats is auto-vectorized + // as auto-vectorization mandates strict ordering but the value is false when this node + // is generated through VectorAPI as VectorAPI does not impose any such rules on ordering. + const bool _requires_strict_order; + +public: + // _requires_strict_order is set to true by default as mandated by auto-vectorization + AddReductionVHFNode(Node* ctrl, Node* in1, Node* in2, bool requires_strict_order = true) : + ReductionNode(ctrl, in1, in2), _requires_strict_order(requires_strict_order) {} + + int Opcode() const override; + bool requires_strict_order() const override { return _requires_strict_order; } + + uint hash() const override { return Node::hash() + _requires_strict_order; } + + bool cmp(const Node& n) const override { + return Node::cmp(n) && _requires_strict_order == ((ReductionNode&)n).requires_strict_order(); + } + + uint size_of() const override { return sizeof(*this); } + + const Type* bottom_type() const override { return Type::HALF_FLOAT; } + uint ideal_reg() const override { return Op_RegF; } +}; + // Vector add float as a reduction class AddReductionVFNode : public ReductionNode { private: @@ -367,7 +398,7 @@ private: // is generated through VectorAPI as VectorAPI does not impose any such rules on ordering. const bool _requires_strict_order; public: - //_requires_strict_order is set to true by default as mandated by auto-vectorization + // _requires_strict_order is set to true by default as mandated by auto-vectorization AddReductionVFNode(Node* ctrl, Node* in1, Node* in2, bool requires_strict_order = true) : ReductionNode(ctrl, in1, in2), _requires_strict_order(requires_strict_order) {} @@ -393,7 +424,7 @@ private: // is generated through VectorAPI as VectorAPI does not impose any such rules on ordering. const bool _requires_strict_order; public: - //_requires_strict_order is set to true by default as mandated by auto-vectorization + // _requires_strict_order is set to true by default as mandated by auto-vectorization AddReductionVDNode(Node* ctrl, Node* in1, Node* in2, bool requires_strict_order = true) : ReductionNode(ctrl, in1, in2), _requires_strict_order(requires_strict_order) {} @@ -577,6 +608,35 @@ public: virtual int Opcode() const; }; +// Vector multiply half float as a reduction +class MulReductionVHFNode : public ReductionNode { +private: + // True if mul reduction operation for half floats requires strict ordering. + // As an example - The value is true when mul reduction for half floats is auto-vectorized + // as auto-vectorization mandates strict ordering but the value is false when this node + // is generated through VectorAPI as VectorAPI does not impose any such rules on ordering. + const bool _requires_strict_order; + +public: + // _requires_strict_order is set to true by default as mandated by auto-vectorization + MulReductionVHFNode(Node* ctrl, Node* in1, Node* in2, bool requires_strict_order = true) : + ReductionNode(ctrl, in1, in2), _requires_strict_order(requires_strict_order) {} + + int Opcode() const override; + bool requires_strict_order() const override { return _requires_strict_order; } + + uint hash() const override { return Node::hash() + _requires_strict_order; } + + bool cmp(const Node& n) const override { + return Node::cmp(n) && _requires_strict_order == ((ReductionNode&)n).requires_strict_order(); + } + + uint size_of() const override { return sizeof(*this); } + + const Type* bottom_type() const override { return Type::HALF_FLOAT; } + uint ideal_reg() const override { return Op_RegF; } +}; + // Vector multiply float as a reduction class MulReductionVFNode : public ReductionNode { // True if mul reduction operation for floats requires strict ordering. @@ -585,7 +645,7 @@ class MulReductionVFNode : public ReductionNode { // is generated through VectorAPI as VectorAPI does not impose any such rules on ordering. const bool _requires_strict_order; public: - //_requires_strict_order is set to true by default as mandated by auto-vectorization + // _requires_strict_order is set to true by default as mandated by auto-vectorization MulReductionVFNode(Node* ctrl, Node* in1, Node* in2, bool requires_strict_order = true) : ReductionNode(ctrl, in1, in2), _requires_strict_order(requires_strict_order) {} @@ -610,7 +670,7 @@ class MulReductionVDNode : public ReductionNode { // is generated through VectorAPI as VectorAPI does not impose any such rules on ordering. const bool _requires_strict_order; public: - //_requires_strict_order is set to true by default as mandated by auto-vectorization + // _requires_strict_order is set to true by default as mandated by auto-vectorization MulReductionVDNode(Node* ctrl, Node* in1, Node* in2, bool requires_strict_order = true) : ReductionNode(ctrl, in1, in2), _requires_strict_order(requires_strict_order) {} @@ -662,10 +722,22 @@ public: virtual int Opcode() const; }; -// Vector Min -class MinVNode : public VectorNode { +// Common superclass for Min/Max vector nodes +class MinMaxVNode : public VectorNode { public: - MinVNode(Node* in1, Node* in2, const TypeVect* vt) : VectorNode(in1, in2, vt) {} + MinMaxVNode(Node* in1, Node* in2, const TypeVect* vt) : VectorNode(in1, in2, vt) {} + virtual int min_opcode() const = 0; + virtual int max_opcode() const = 0; + virtual Node* Ideal(PhaseGVN* phase, bool can_reshape); + virtual Node* Identity(PhaseGVN* phase); +}; + +// Vector Min +class MinVNode : public MinMaxVNode { +public: + MinVNode(Node* in1, Node* in2, const TypeVect* vt) : MinMaxVNode(in1, in2, vt) {} + virtual int min_opcode() const { return Op_MinV; } + virtual int max_opcode() const { return Op_MaxV; } virtual int Opcode() const; }; @@ -684,31 +756,33 @@ public: }; // Vector Unsigned Min -class UMinVNode : public VectorNode { +class UMinVNode : public MinMaxVNode { public: - UMinVNode(Node* in1, Node* in2, const TypeVect* vt) : VectorNode(in1, in2 ,vt) { + UMinVNode(Node* in1, Node* in2, const TypeVect* vt) : MinMaxVNode(in1, in2, vt) { assert(is_integral_type(vt->element_basic_type()), ""); } - virtual Node* Ideal(PhaseGVN* phase, bool can_reshape); - virtual Node* Identity(PhaseGVN* phase); + virtual int min_opcode() const { return Op_UMinV; } + virtual int max_opcode() const { return Op_UMaxV; } virtual int Opcode() const; }; // Vector Max -class MaxVNode : public VectorNode { +class MaxVNode : public MinMaxVNode { public: - MaxVNode(Node* in1, Node* in2, const TypeVect* vt) : VectorNode(in1, in2, vt) {} + MaxVNode(Node* in1, Node* in2, const TypeVect* vt) : MinMaxVNode(in1, in2, vt) {} + virtual int min_opcode() const { return Op_MinV; } + virtual int max_opcode() const { return Op_MaxV; } virtual int Opcode() const; }; // Vector Unsigned Max -class UMaxVNode : public VectorNode { +class UMaxVNode : public MinMaxVNode { public: - UMaxVNode(Node* in1, Node* in2, const TypeVect* vt) : VectorNode(in1, in2, vt) { + UMaxVNode(Node* in1, Node* in2, const TypeVect* vt) : MinMaxVNode(in1, in2, vt) { assert(is_integral_type(vt->element_basic_type()), ""); } - virtual Node* Ideal(PhaseGVN* phase, bool can_reshape); - virtual Node* Identity(PhaseGVN* phase); + virtual int min_opcode() const { return Op_UMinV; } + virtual int max_opcode() const { return Op_UMaxV; } virtual int Opcode() const; }; @@ -1798,13 +1872,23 @@ class VectorStoreMaskNode : public VectorNode { static VectorStoreMaskNode* make(PhaseGVN& gvn, Node* in, BasicType in_type, uint num_elem); }; -// Lane-wise type cast a vector mask to the given vector type. The vector length -// of the input and output must be the same. +// Lane-wise type cast a vector mask to the given vector type. +// The vector length of the input and output must be the same. +// We can only cast between: +// - BVectMask and BVectMask (0x00/0x01) +// - NVectMask and NVectMask (0x0..0/0xF..F of different bit lengths) +// - PVectMask and PVectMask (specialized predicate/mask registers) +// Casting N/PVectMask <-> BVectMask needs to be done by +// VectorStoreMask and VectorLoadMask. class VectorMaskCastNode : public VectorNode { public: VectorMaskCastNode(Node* in, const TypeVect* vt) : VectorNode(in, vt) { const TypeVect* in_vt = in->bottom_type()->is_vect(); assert(in_vt->length() == vt->length(), "vector length must match"); + assert((in_vt->element_basic_type() == T_BOOLEAN) == (vt->element_basic_type() == T_BOOLEAN), + "Cast from/to BVectMask not allowed, use VectorLoadMask/VectorStoreMask instead"); + assert((in_vt->isa_vectmask() == nullptr) == (vt->isa_vectmask() == nullptr), + "Both BVectMask, or both NVectMask, or both PVectMask"); } Node* Identity(PhaseGVN* phase); virtual int Opcode() const; diff --git a/src/hotspot/share/prims/downcallLinker.cpp b/src/hotspot/share/prims/downcallLinker.cpp index 5dde825d75f..cbef9841652 100644 --- a/src/hotspot/share/prims/downcallLinker.cpp +++ b/src/hotspot/share/prims/downcallLinker.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,15 +30,34 @@ #include #endif +// keep in synch with jdk.internal.foreign.abi.CapturableState +enum PreservableValues { + NONE = 0, + GET_LAST_ERROR = 1, + WSA_GET_LAST_ERROR = 1 << 1, + ERRNO = 1 << 2 +}; + +// We call this from _thread_in_native, right before a downcall +JVM_LEAF(void, DowncallLinker::capture_state_pre(int32_t* value_ptr, int captured_state_mask)) +#ifdef _WIN64 + if (captured_state_mask & GET_LAST_ERROR) { + SetLastError(*value_ptr); + } + value_ptr++; + if (captured_state_mask & WSA_GET_LAST_ERROR) { + WSASetLastError(*value_ptr); + *value_ptr = WSAGetLastError(); + } + value_ptr++; +#endif + if (captured_state_mask & ERRNO) { + errno = *value_ptr; + } +JVM_END + // We call this from _thread_in_native, right after a downcall -JVM_LEAF(void, DowncallLinker::capture_state(int32_t* value_ptr, int captured_state_mask)) - // keep in synch with jdk.internal.foreign.abi.CapturableState - enum PreservableValues { - NONE = 0, - GET_LAST_ERROR = 1, - WSA_GET_LAST_ERROR = 1 << 1, - ERRNO = 1 << 2 - }; +JVM_LEAF(void, DowncallLinker::capture_state_post(int32_t* value_ptr, int captured_state_mask)) #ifdef _WIN64 if (captured_state_mask & GET_LAST_ERROR) { *value_ptr = GetLastError(); diff --git a/src/hotspot/share/prims/downcallLinker.hpp b/src/hotspot/share/prims/downcallLinker.hpp index 01ee5c56776..519e84281ce 100644 --- a/src/hotspot/share/prims/downcallLinker.hpp +++ b/src/hotspot/share/prims/downcallLinker.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,8 +41,9 @@ public: int captured_state_mask, bool needs_transition); - // This is defined as JVM_LEAF which adds the JNICALL modifier. - static void JNICALL capture_state(int32_t* value_ptr, int captured_state_mask); + // These are defined as JVM_LEAF which adds the JNICALL modifier. + static void JNICALL capture_state_pre(int32_t* value_ptr, int captured_state_mask); + static void JNICALL capture_state_post(int32_t* value_ptr, int captured_state_mask); class StubGenerator : public StubCodeGenerator { BasicType* _signature; @@ -71,7 +72,7 @@ public: bool needs_return_buffer, int captured_state_mask, bool needs_transition) - : StubCodeGenerator(buffer, PrintMethodHandleStubs), + : StubCodeGenerator(buffer, PrintMethodHandleStubs), _signature(signature), _num_args(num_args), _ret_bt(ret_bt), diff --git a/src/hotspot/share/prims/jni.cpp b/src/hotspot/share/prims/jni.cpp index 2297ce9b790..73082c6047d 100644 --- a/src/hotspot/share/prims/jni.cpp +++ b/src/hotspot/share/prims/jni.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2024 Red Hat, Inc. * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -867,8 +867,7 @@ static void jni_invoke_static(JNIEnv *env, JavaValue* result, jobject receiver, // Create object to hold arguments for the JavaCall, and associate it with // the jni parser ResourceMark rm(THREAD); - int number_of_parameters = method->size_of_parameters(); - JavaCallArguments java_args(number_of_parameters); + JavaCallArguments java_args(method->size_of_parameters()); assert(method->is_static(), "method should be static"); @@ -3129,16 +3128,21 @@ JNI_END JNI_ENTRY(jobject, jni_GetModule(JNIEnv* env, jclass clazz)) - return Modules::get_module(clazz, THREAD); + HOTSPOT_JNI_GETMODULE_ENTRY(env, clazz); + jobject ret = Modules::get_module(clazz, THREAD); + HOTSPOT_JNI_GETMODULE_RETURN(ret); + return ret; JNI_END JNI_ENTRY(jboolean, jni_IsVirtualThread(JNIEnv* env, jobject obj)) + HOTSPOT_JNI_ISVIRTUALTHREAD_ENTRY(env, obj); + jboolean ret = JNI_FALSE; oop thread_obj = JNIHandles::resolve_external_guard(obj); if (thread_obj != nullptr && thread_obj->is_a(vmClasses::BaseVirtualThread_klass())) { - return JNI_TRUE; - } else { - return JNI_FALSE; + ret = JNI_TRUE; } + HOTSPOT_JNI_ISVIRTUALTHREAD_RETURN(ret); + return ret; JNI_END diff --git a/src/hotspot/share/prims/jvmtiEnvBase.cpp b/src/hotspot/share/prims/jvmtiEnvBase.cpp index 401bb4dfdb8..886c2da1dee 100644 --- a/src/hotspot/share/prims/jvmtiEnvBase.cpp +++ b/src/hotspot/share/prims/jvmtiEnvBase.cpp @@ -1529,7 +1529,7 @@ JvmtiEnvBase::get_object_monitor_usage(JavaThread* calling_thread, jobject objec GrowableArray* wantList = nullptr; ObjectMonitor* mon = mark.has_monitor() - ? ObjectSynchronizer::read_monitor(current_thread, hobj(), mark) + ? ObjectSynchronizer::read_monitor(hobj(), mark) : nullptr; if (mon != nullptr) { diff --git a/src/hotspot/share/prims/jvmtiEventController.cpp b/src/hotspot/share/prims/jvmtiEventController.cpp index cb44b833c48..832c8a33c88 100644 --- a/src/hotspot/share/prims/jvmtiEventController.cpp +++ b/src/hotspot/share/prims/jvmtiEventController.cpp @@ -544,6 +544,11 @@ JvmtiEventControllerPrivate::recompute_env_thread_enabled(JvmtiEnvThreadState* e } switch (JvmtiEnv::get_phase()) { + case JVMTI_PHASE_ONLOAD: + case JVMTI_PHASE_PRIMORDIAL: + case JVMTI_PHASE_START: + now_enabled &= EARLY_EVENT_BITS; + break; case JVMTI_PHASE_DEAD: // no events allowed when dead now_enabled = 0; diff --git a/src/hotspot/share/prims/jvmtiRedefineClasses.cpp b/src/hotspot/share/prims/jvmtiRedefineClasses.cpp index 13b239b4df0..8beddc5d406 100644 --- a/src/hotspot/share/prims/jvmtiRedefineClasses.cpp +++ b/src/hotspot/share/prims/jvmtiRedefineClasses.cpp @@ -53,6 +53,7 @@ #include "oops/method.hpp" #include "oops/oop.inline.hpp" #include "oops/recordComponent.hpp" +#include "oops/trainingData.hpp" #include "prims/jvmtiImpl.hpp" #include "prims/jvmtiRedefineClasses.hpp" #include "prims/jvmtiThreadState.inline.hpp" @@ -274,6 +275,10 @@ void VM_RedefineClasses::doit() { redefine_single_class(current, _class_defs[i].klass, _scratch_classes[i]); } +#if INCLUDE_CDS + TrainingData::cleanup_after_redefinition(); +#endif + // Flush all compiled code that depends on the classes redefined. flush_dependent_code(); @@ -1481,6 +1486,8 @@ jvmtiError VM_RedefineClasses::load_new_class_versions() { } else { return JVMTI_ERROR_INTERNAL; } + } else if (res != JVMTI_ERROR_NONE) { + return res; } #ifdef ASSERT @@ -2045,7 +2052,7 @@ bool VM_RedefineClasses::rewrite_cp_refs_in_record_attribute(InstanceKlass* scra AnnotationArray* type_annotations = component->type_annotations(); if (type_annotations != nullptr && type_annotations->length() != 0) { int byte_i = 0; // byte index into annotations - if (!rewrite_cp_refs_in_annotations_typeArray(type_annotations, byte_i)) { + if (!rewrite_cp_refs_in_type_annotations_typeArray(type_annotations, byte_i, "record_info")) { log_debug(redefine, class, annotation)("bad record_component_type_annotations at %d", i); // propagate failure back to caller return false; diff --git a/src/hotspot/share/prims/methodHandles.cpp b/src/hotspot/share/prims/methodHandles.cpp index 584f077eddc..03cb98d8e75 100644 --- a/src/hotspot/share/prims/methodHandles.cpp +++ b/src/hotspot/share/prims/methodHandles.cpp @@ -157,6 +157,19 @@ int MethodHandles::ref_kind_to_flags(int ref_kind) { return flags; } +#ifdef ASSERT +const char* MethodHandles::ref_kind_to_verify_msg(int ref_kind) { + switch (ref_kind) { + case JVM_REF_invokeSpecial: return "verify_ref_kind expected invokeSpecial"; + case JVM_REF_invokeStatic: return "verify_ref_kind expected invokeStatic"; + case JVM_REF_invokeVirtual: return "verify_ref_kind expected invokeVirtual"; + case JVM_REF_invokeInterface: return "verify_ref_kind expected invokeInterface"; + default: assert(false, "unexpected ref_kind: %d", ref_kind); + } + return ""; +} +#endif + Handle MethodHandles::resolve_MemberName_type(Handle mname, Klass* caller, TRAPS) { Handle empty; Handle type(THREAD, java_lang_invoke_MemberName::type(mname())); diff --git a/src/hotspot/share/prims/methodHandles.hpp b/src/hotspot/share/prims/methodHandles.hpp index 73da28a6cf5..a2a549fe051 100644 --- a/src/hotspot/share/prims/methodHandles.hpp +++ b/src/hotspot/share/prims/methodHandles.hpp @@ -182,6 +182,8 @@ public: static int ref_kind_to_flags(int ref_kind); + DEBUG_ONLY( static const char* ref_kind_to_verify_msg(int ref_kind); ) + #include CPU_HEADER(methodHandles) // Tracing diff --git a/src/hotspot/share/prims/vectorSupport.cpp b/src/hotspot/share/prims/vectorSupport.cpp index 58f22d38d33..5c6010acdf1 100644 --- a/src/hotspot/share/prims/vectorSupport.cpp +++ b/src/hotspot/share/prims/vectorSupport.cpp @@ -200,7 +200,6 @@ bool VectorSupport::is_unsigned_op(jint id) { } const char* VectorSupport::lanetype2name(LaneType lane_type) { - assert(lane_type >= LT_FLOAT && lane_type <= LT_LONG, ""); const char* lanetype2name[] = { "float", "double", @@ -209,7 +208,11 @@ const char* VectorSupport::lanetype2name(LaneType lane_type) { "int", "long" }; - return lanetype2name[lane_type]; + if (lane_type >= LT_FLOAT && lane_type <= LT_LONG) { + return lanetype2name[lane_type]; + } + assert(false, "unknown lane type: %d", (int)lane_type); + return "illegal"; } int VectorSupport::vop2ideal(jint id, LaneType lt) { @@ -223,7 +226,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { case LT_LONG: return Op_AddL; case LT_FLOAT: return Op_AddF; case LT_DOUBLE: return Op_AddD; - default: fatal("ADD: %s", lanetype2name(lt)); + default: return 0; } break; } @@ -235,7 +238,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { case LT_LONG: return Op_SubL; case LT_FLOAT: return Op_SubF; case LT_DOUBLE: return Op_SubD; - default: fatal("SUB: %s", lanetype2name(lt)); + default: return 0; } break; } @@ -247,7 +250,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { case LT_LONG: return Op_MulL; case LT_FLOAT: return Op_MulF; case LT_DOUBLE: return Op_MulD; - default: fatal("MUL: %s", lanetype2name(lt)); + default: return 0; } break; } @@ -259,7 +262,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { case LT_LONG: return Op_DivL; case LT_FLOAT: return Op_DivF; case LT_DOUBLE: return Op_DivD; - default: fatal("DIV: %s", lanetype2name(lt)); + default: return 0; } break; } @@ -271,7 +274,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { case LT_LONG: return Op_MinL; case LT_FLOAT: return Op_MinF; case LT_DOUBLE: return Op_MinD; - default: fatal("MIN: %s", lanetype2name(lt)); + default: return 0; } break; } @@ -283,7 +286,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { case LT_LONG: return Op_MaxL; case LT_FLOAT: return Op_MaxF; case LT_DOUBLE: return Op_MaxD; - default: fatal("MAX: %s", lanetype2name(lt)); + default: return 0; } break; } @@ -293,7 +296,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { case LT_SHORT: case LT_INT: case LT_LONG: return Op_UMinV; - default: fatal("MIN: %s", lanetype2name(lt)); + default: return 0; } break; } @@ -303,7 +306,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { case LT_SHORT: case LT_INT: case LT_LONG: return Op_UMaxV; - default: fatal("MAX: %s", lanetype2name(lt)); + default: return 0; } break; } @@ -315,7 +318,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { case LT_LONG: return Op_AbsL; case LT_FLOAT: return Op_AbsF; case LT_DOUBLE: return Op_AbsD; - default: fatal("ABS: %s", lanetype2name(lt)); + default: return 0; } break; } @@ -327,7 +330,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { case LT_LONG: return Op_NegL; case LT_FLOAT: return Op_NegF; case LT_DOUBLE: return Op_NegD; - default: fatal("NEG: %s", lanetype2name(lt)); + default: return 0; } break; } @@ -337,7 +340,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { case LT_SHORT: // fall-through case LT_INT: return Op_AndI; case LT_LONG: return Op_AndL; - default: fatal("AND: %s", lanetype2name(lt)); + default: return 0; } break; } @@ -347,7 +350,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { case LT_SHORT: // fall-through case LT_INT: return Op_OrI; case LT_LONG: return Op_OrL; - default: fatal("OR: %s", lanetype2name(lt)); + default: return 0; } break; } @@ -357,7 +360,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { case LT_SHORT: // fall-through case LT_INT: return Op_XorI; case LT_LONG: return Op_XorL; - default: fatal("XOR: %s", lanetype2name(lt)); + default: return 0; } break; } @@ -365,7 +368,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { switch (lt) { case LT_FLOAT: return Op_SqrtF; case LT_DOUBLE: return Op_SqrtD; - default: fatal("SQRT: %s", lanetype2name(lt)); + default: return 0; } break; } @@ -373,7 +376,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { switch (lt) { case LT_FLOAT: return Op_FmaF; case LT_DOUBLE: return Op_FmaD; - default: fatal("FMA: %s", lanetype2name(lt)); + default: return 0; } break; } @@ -383,7 +386,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { case LT_SHORT: // fall-through case LT_INT: return Op_LShiftI; case LT_LONG: return Op_LShiftL; - default: fatal("LSHIFT: %s", lanetype2name(lt)); + default: return 0; } break; } @@ -393,7 +396,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { case LT_SHORT: // fall-through case LT_INT: return Op_RShiftI; case LT_LONG: return Op_RShiftL; - default: fatal("RSHIFT: %s", lanetype2name(lt)); + default: return 0; } break; } @@ -403,7 +406,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { case LT_SHORT: return Op_URShiftS; case LT_INT: return Op_URShiftI; case LT_LONG: return Op_URShiftL; - default: fatal("URSHIFT: %s", lanetype2name(lt)); + default: return 0; } break; } @@ -413,7 +416,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { case LT_SHORT: // fall-through case LT_INT: // fall-through case LT_LONG: return Op_RotateLeft; - default: fatal("LROTATE: %s", lanetype2name(lt)); + default: return 0; } break; } @@ -423,7 +426,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { case LT_SHORT: // fall-through case LT_INT: // fall-through case LT_LONG: return Op_RotateRight; - default: fatal("RROTATE: %s", lanetype2name(lt)); + default: return 0; } break; } @@ -435,7 +438,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { case LT_LONG: // fall-through case LT_FLOAT: // fall-through case LT_DOUBLE: return Op_VectorMaskLastTrue; - default: fatal("MASK_LASTTRUE: %s", lanetype2name(lt)); + default: return 0; } break; } @@ -447,7 +450,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { case LT_LONG: // fall-through case LT_FLOAT: // fall-through case LT_DOUBLE: return Op_VectorMaskFirstTrue; - default: fatal("MASK_FIRSTTRUE: %s", lanetype2name(lt)); + default: return 0; } break; } @@ -459,7 +462,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { case LT_LONG: // fall-through case LT_FLOAT: // fall-through case LT_DOUBLE: return Op_VectorMaskTrueCount; - default: fatal("MASK_TRUECOUNT: %s", lanetype2name(lt)); + default: return 0; } break; } @@ -471,7 +474,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { case LT_LONG: // fall-through case LT_FLOAT: // fall-through case LT_DOUBLE: return Op_VectorMaskToLong; - default: fatal("MASK_TOLONG: %s", lanetype2name(lt)); + default: return 0; } break; } @@ -483,7 +486,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { case LT_LONG: // fall-through case LT_FLOAT: // fall-through case LT_DOUBLE: return Op_ExpandV; - default: fatal("EXPAND: %s", lanetype2name(lt)); + default: return 0; } break; } @@ -495,7 +498,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { case LT_LONG: // fall-through case LT_FLOAT: // fall-through case LT_DOUBLE: return Op_CompressV; - default: fatal("COMPRESS: %s", lanetype2name(lt)); + default: return 0; } break; } @@ -507,7 +510,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { case LT_LONG: // fall-through case LT_FLOAT: // fall-through case LT_DOUBLE: return Op_CompressM; - default: fatal("MASK_COMPRESS: %s", lanetype2name(lt)); + default: return 0; } break; } @@ -517,7 +520,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { case LT_SHORT: // for byte and short types temporarily case LT_INT: return Op_PopCountI; case LT_LONG: return Op_PopCountL; - default: fatal("BILT_COUNT: %s", lanetype2name(lt)); + default: return 0; } break; } @@ -527,7 +530,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { case LT_SHORT: case LT_INT: return Op_CountTrailingZerosI; case LT_LONG: return Op_CountTrailingZerosL; - default: fatal("TZ_COUNT: %s", lanetype2name(lt)); + default: return 0; } break; } @@ -537,7 +540,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { case LT_SHORT: case LT_INT: return Op_CountLeadingZerosI; case LT_LONG: return Op_CountLeadingZerosL; - default: fatal("LZ_COUNT: %s", lanetype2name(lt)); + default: return 0; } break; } @@ -547,7 +550,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { case LT_SHORT: // Op_ReverseI for byte and short case LT_INT: return Op_ReverseI; case LT_LONG: return Op_ReverseL; - default: fatal("REVERSE: %s", lanetype2name(lt)); + default: return 0; } break; } @@ -562,7 +565,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { case LT_BYTE: // Intentionally fall-through case LT_INT: return Op_ReverseBytesI; case LT_LONG: return Op_ReverseBytesL; - default: fatal("REVERSE_BYTES: %s", lanetype2name(lt)); + default: return 0; } break; } @@ -573,7 +576,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { case LT_SHORT: // fall-through case LT_INT: // fall-through case LT_LONG: return Op_SaturatingAddV; - default: fatal("S[U]ADD: %s", lanetype2name(lt)); + default: return 0; } break; } @@ -584,7 +587,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { case LT_SHORT: // fall-through case LT_INT: // fall-through case LT_LONG: return Op_SaturatingSubV; - default: fatal("S[U}SUB: %s", lanetype2name(lt)); + default: return 0; } break; } @@ -592,7 +595,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { switch (lt) { case LT_INT: case LT_LONG: return Op_CompressBits; - default: fatal("COMPRESS_BITS: %s", lanetype2name(lt)); + default: return 0; } break; } @@ -600,7 +603,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { switch (lt) { case LT_INT: case LT_LONG: return Op_ExpandBits; - default: fatal("EXPAND_BITS: %s", lanetype2name(lt)); + default: return 0; } break; } @@ -624,7 +627,7 @@ int VectorSupport::vop2ideal(jint id, LaneType lt) { case VECTOR_OP_EXPM1: // fall-through case VECTOR_OP_HYPOT: return 0; // not supported; should be handled in Java code - default: fatal("unknown op: %d", vop); + default: return 0; } return 0; // Unimplemented } diff --git a/src/hotspot/share/prims/whitebox.cpp b/src/hotspot/share/prims/whitebox.cpp index 1a440584fe1..de140fb95ff 100644 --- a/src/hotspot/share/prims/whitebox.cpp +++ b/src/hotspot/share/prims/whitebox.cpp @@ -107,6 +107,7 @@ #if INCLUDE_G1GC #include "gc/g1/g1Arguments.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" +#include "gc/g1/g1CollectorState.inline.hpp" #include "gc/g1/g1ConcurrentMark.hpp" #include "gc/g1/g1ConcurrentMarkThread.inline.hpp" #include "gc/g1/g1HeapRegionManager.hpp" @@ -333,7 +334,6 @@ WB_ENTRY(void, WB_ReadFromNoaccessArea(JNIEnv* env, jobject o)) WB_END WB_ENTRY(void, WB_DecodeNKlassAndAccessKlass(JNIEnv* env, jobject o, jint nKlass)) - assert(UseCompressedClassPointers, "Should only call for UseCompressedClassPointers"); const narrowKlass nk = (narrowKlass)nKlass; const Klass* const k = CompressedKlassPointers::decode_not_null_without_asserts(nKlass); printf("WB_DecodeNKlassAndAccessKlass: nk %u k " PTR_FORMAT "\n", nk, p2i(k)); @@ -578,7 +578,7 @@ WB_END WB_ENTRY(jboolean, WB_G1InConcurrentMark(JNIEnv* env, jobject o)) if (UseG1GC) { G1CollectedHeap* g1h = G1CollectedHeap::heap(); - return g1h->concurrent_mark()->in_progress(); + return g1h->collector_state()->is_in_concurrent_cycle(); } THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_G1InConcurrentMark: G1 GC is not enabled"); WB_END @@ -1280,8 +1280,8 @@ WB_ENTRY(void, WB_ClearMethodState(JNIEnv* env, jobject o, jobject method)) if (mdo != nullptr) { mdo->init(); ResourceMark rm(THREAD); - int arg_count = mdo->method()->size_of_parameters(); - for (int i = 0; i < arg_count; i++) { + int arg_size = mdo->method()->size_of_parameters(); + for (int i = 0; i < arg_size; i++) { mdo->set_arg_modified(i, 0); } mdo->clean_method_data(/*always_clean*/true); diff --git a/src/hotspot/share/runtime/abstract_vm_version.cpp b/src/hotspot/share/runtime/abstract_vm_version.cpp index 4051ba3f9d6..37c5815f60e 100644 --- a/src/hotspot/share/runtime/abstract_vm_version.cpp +++ b/src/hotspot/share/runtime/abstract_vm_version.cpp @@ -152,28 +152,14 @@ const char* Abstract_VM_Version::vm_info_string() { } case Arguments::_mixed: if (is_vm_statically_linked()) { - if (CompilationModeFlag::quick_only()) { - return CDSConfig::is_using_archive() ? "mixed mode, emulated-client, static, sharing" : "mixed mode, emulated-client, static"; - } else { - return CDSConfig::is_using_archive() ? "mixed mode, static, sharing" : "mixed mode, static"; - } + return CDSConfig::is_using_archive() ? "mixed mode, static, sharing" : "mixed mode, static"; } else { - if (CompilationModeFlag::quick_only()) { - return CDSConfig::is_using_archive() ? "mixed mode, emulated-client, sharing" : "mixed mode, emulated-client"; - } else { - return CDSConfig::is_using_archive() ? "mixed mode, sharing" : "mixed mode"; - } + return CDSConfig::is_using_archive() ? "mixed mode, sharing" : "mixed mode"; } case Arguments::_comp: if (is_vm_statically_linked()) { - if (CompilationModeFlag::quick_only()) { - return CDSConfig::is_using_archive() ? "compiled mode, emulated-client, static, sharing" : "compiled mode, emulated-client, static"; - } return CDSConfig::is_using_archive() ? "compiled mode, static, sharing" : "compiled mode, static"; } else { - if (CompilationModeFlag::quick_only()) { - return CDSConfig::is_using_archive() ? "compiled mode, emulated-client, sharing" : "compiled mode, emulated-client"; - } return CDSConfig::is_using_archive() ? "compiled mode, sharing" : "compiled mode"; } } diff --git a/src/hotspot/share/runtime/abstract_vm_version.hpp b/src/hotspot/share/runtime/abstract_vm_version.hpp index 17ade2c068d..794fa4dabcf 100644 --- a/src/hotspot/share/runtime/abstract_vm_version.hpp +++ b/src/hotspot/share/runtime/abstract_vm_version.hpp @@ -45,6 +45,21 @@ class outputStream; class stringStream; enum class vmIntrinsicID; +// Helper macro to test and set VM flag and corresponding cpu feature +#define CHECK_CPU_FEATURE(feature_test_fn, feature) \ + if (feature_test_fn()) { \ + if (FLAG_IS_DEFAULT(Use##feature)) { \ + FLAG_SET_DEFAULT(Use##feature, true); \ + } else if (!Use##feature) { \ + clear_feature(CPU_##feature); \ + } \ + } else if (Use##feature) { \ + if (!FLAG_IS_DEFAULT(Use##feature)) { \ + warning(#feature " instructions are not available on this CPU"); \ + } \ + FLAG_SET_DEFAULT(Use##feature, false); \ + } + // Abstract_VM_Version provides information about the VM. class Abstract_VM_Version: AllStatic { diff --git a/src/hotspot/share/runtime/arguments.cpp b/src/hotspot/share/runtime/arguments.cpp index a1dae76f680..2d40ee1822a 100644 --- a/src/hotspot/share/runtime/arguments.cpp +++ b/src/hotspot/share/runtime/arguments.cpp @@ -533,12 +533,7 @@ static SpecialFlag const special_jvm_flags[] = { { "DynamicDumpSharedSpaces", JDK_Version::jdk(18), JDK_Version::jdk(19), JDK_Version::undefined() }, { "RequireSharedSpaces", JDK_Version::jdk(18), JDK_Version::jdk(19), JDK_Version::undefined() }, { "UseSharedSpaces", JDK_Version::jdk(18), JDK_Version::jdk(19), JDK_Version::undefined() }, -#ifdef _LP64 - { "UseCompressedClassPointers", JDK_Version::jdk(25), JDK_Version::jdk(27), JDK_Version::undefined() }, -#endif { "AggressiveHeap", JDK_Version::jdk(26), JDK_Version::jdk(27), JDK_Version::jdk(28) }, - { "NeverActAsServerClassMachine", JDK_Version::jdk(26), JDK_Version::jdk(27), JDK_Version::jdk(28) }, - { "AlwaysActAsServerClassMachine", JDK_Version::jdk(26), JDK_Version::jdk(27), JDK_Version::jdk(28) }, // --- Deprecated alias flags (see also aliased_jvm_flags) - sorted by obsolete_in then expired_in: { "CreateMinidumpOnCrash", JDK_Version::jdk(9), JDK_Version::undefined(), JDK_Version::undefined() }, @@ -548,11 +543,19 @@ static SpecialFlag const special_jvm_flags[] = { #if defined(AARCH64) { "NearCpool", JDK_Version::undefined(), JDK_Version::jdk(25), JDK_Version::undefined() }, #endif +#ifdef _LP64 + { "UseCompressedClassPointers", JDK_Version::jdk(25), JDK_Version::jdk(27), JDK_Version::undefined() }, +#endif { "PSChunkLargeArrays", JDK_Version::jdk(26), JDK_Version::jdk(27), JDK_Version::jdk(28) }, { "ParallelRefProcEnabled", JDK_Version::jdk(26), JDK_Version::jdk(27), JDK_Version::jdk(28) }, { "ParallelRefProcBalancingEnabled", JDK_Version::jdk(26), JDK_Version::jdk(27), JDK_Version::jdk(28) }, { "MaxRAM", JDK_Version::jdk(26), JDK_Version::jdk(27), JDK_Version::jdk(28) }, + { "NewSizeThreadIncrease", JDK_Version::undefined(), JDK_Version::jdk(27), JDK_Version::jdk(28) }, + { "NeverActAsServerClassMachine", JDK_Version::jdk(26), JDK_Version::jdk(27), JDK_Version::jdk(28) }, + { "AlwaysActAsServerClassMachine", JDK_Version::jdk(26), JDK_Version::jdk(27), JDK_Version::jdk(28) }, + { "UseXMMForArrayCopy", JDK_Version::undefined(), JDK_Version::jdk(27), JDK_Version::jdk(28) }, + { "UseNewLongLShift", JDK_Version::undefined(), JDK_Version::jdk(27), JDK_Version::jdk(28) }, #ifdef ASSERT { "DummyObsoleteTestFlag", JDK_Version::undefined(), JDK_Version::jdk(18), JDK_Version::undefined() }, @@ -1512,10 +1515,7 @@ void Arguments::set_heap_size() { !FLAG_IS_DEFAULT(MinRAMPercentage) || !FLAG_IS_DEFAULT(InitialRAMPercentage); - // Limit the available memory if client emulation mode is enabled. - const size_t avail_mem = CompilerConfig::should_set_client_emulation_mode_flags() - ? 1ULL*G - : os::physical_memory(); + const physical_memory_size_type avail_mem = os::physical_memory(); // If the maximum heap size has not been set with -Xmx, then set it as // fraction of the size of physical memory, respecting the maximum and @@ -1554,7 +1554,7 @@ void Arguments::set_heap_size() { } #ifdef _LP64 - if (UseCompressedOops || UseCompressedClassPointers) { + if (UseCompressedOops) { // HeapBaseMinAddress can be greater than default but not less than. if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) { if (HeapBaseMinAddress < DefaultHeapBaseMinAddress) { @@ -1567,9 +1567,7 @@ void Arguments::set_heap_size() { FLAG_SET_ERGO(HeapBaseMinAddress, DefaultHeapBaseMinAddress); } } - } - if (UseCompressedOops) { uintptr_t heap_end = HeapBaseMinAddress + MaxHeapSize; uintptr_t max_coop_heap = max_heap_for_compressed_oops(); @@ -3782,10 +3780,6 @@ jint Arguments::parse(const JavaVMInitArgs* initial_cmd_args) { void Arguments::set_compact_headers_flags() { #ifdef _LP64 - if (UseCompactObjectHeaders && FLAG_IS_CMDLINE(UseCompressedClassPointers) && !UseCompressedClassPointers) { - warning("Compact object headers require compressed class pointers. Disabling compact object headers."); - FLAG_SET_DEFAULT(UseCompactObjectHeaders, false); - } if (UseCompactObjectHeaders && !UseObjectMonitorTable) { // If UseCompactObjectHeaders is on the command line, turn on UseObjectMonitorTable. if (FLAG_IS_CMDLINE(UseCompactObjectHeaders)) { @@ -3799,9 +3793,6 @@ void Arguments::set_compact_headers_flags() { FLAG_SET_DEFAULT(UseObjectMonitorTable, true); } } - if (UseCompactObjectHeaders && !UseCompressedClassPointers) { - FLAG_SET_DEFAULT(UseCompressedClassPointers, true); - } #endif } @@ -3817,9 +3808,7 @@ jint Arguments::apply_ergo() { set_compact_headers_flags(); - if (UseCompressedClassPointers) { - CompressedKlassPointers::pre_initialize(); - } + CompressedKlassPointers::pre_initialize(); CDSConfig::ergo_initialize(); @@ -3864,10 +3853,6 @@ jint Arguments::apply_ergo() { DebugNonSafepoints = true; } - if (FLAG_IS_CMDLINE(CompressedClassSpaceSize) && !UseCompressedClassPointers) { - warning("Setting CompressedClassSpaceSize has no effect when compressed class pointers are not used"); - } - // Treat the odd case where local verification is enabled but remote // verification is not as if both were enabled. if (BytecodeVerificationLocal && !BytecodeVerificationRemote) { diff --git a/src/hotspot/share/runtime/atomicAccess.hpp b/src/hotspot/share/runtime/atomicAccess.hpp index c9a2dfb9383..46330cffdb2 100644 --- a/src/hotspot/share/runtime/atomicAccess.hpp +++ b/src/hotspot/share/runtime/atomicAccess.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,7 +26,7 @@ #define SHARE_RUNTIME_ATOMICACCESS_HPP #include "cppstdlib/type_traits.hpp" -#include "memory/allocation.hpp" +#include "memory/allStatic.hpp" #include "metaprogramming/enableIf.hpp" #include "metaprogramming/primitiveConversions.hpp" #include "runtime/orderAccess.hpp" @@ -829,7 +829,7 @@ class AtomicAccess::PlatformBitops {}; template -class ScopedFenceGeneral: public StackObj { +class ScopedFenceGeneral { public: void prefix() {} void postfix() {} diff --git a/src/hotspot/share/runtime/deoptimization.cpp b/src/hotspot/share/runtime/deoptimization.cpp index 54a65358693..cfdcf52a5eb 100644 --- a/src/hotspot/share/runtime/deoptimization.cpp +++ b/src/hotspot/share/runtime/deoptimization.cpp @@ -1686,7 +1686,7 @@ bool Deoptimization::relock_objects(JavaThread* thread, GrowableArrayowner()->is_locked(), "object must be locked now"); assert(obj->mark().has_monitor(), "must be"); assert(!deoptee_thread->lock_stack().contains(obj()), "must be"); - assert(ObjectSynchronizer::read_monitor(thread, obj(), obj->mark())->has_owner(deoptee_thread), "must be"); + assert(ObjectSynchronizer::read_monitor(obj(), obj->mark())->has_owner(deoptee_thread), "must be"); } } } @@ -2021,7 +2021,7 @@ static void post_deoptimization_event(nmethod* nm, #endif // INCLUDE_JFR static void log_deopt(nmethod* nm, Method* tm, intptr_t pc, frame& fr, int trap_bci, - const char* reason_name, const char* reason_action) { + const char* reason_name, const char* reason_action, const char* class_name) { LogTarget(Debug, deoptimization) lt; if (lt.is_enabled()) { LogStream ls(lt); @@ -2035,6 +2035,9 @@ static void log_deopt(nmethod* nm, Method* tm, intptr_t pc, frame& fr, int trap_ } ls.print("%s ", reason_name); ls.print("%s ", reason_action); + if (class_name != nullptr) { + ls.print("%s ", class_name); + } ls.print_cr("pc=" INTPTR_FORMAT " relative_pc=" INTPTR_FORMAT, pc, fr.pc() - nm->code_begin()); } @@ -2135,6 +2138,17 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint tr MethodData* trap_mdo = get_method_data(current, profiled_method, create_if_missing); + Symbol* class_name = nullptr; + bool unresolved = false; + if (unloaded_class_index >= 0) { + constantPoolHandle constants (current, trap_method->constants()); + if (constants->tag_at(unloaded_class_index).is_unresolved_klass()) { + class_name = constants->klass_name_at(unloaded_class_index); + unresolved = true; + } else if (constants->tag_at(unloaded_class_index).is_symbol()) { + class_name = constants->symbol_at(unloaded_class_index); + } + } { // Log Deoptimization event for JFR, UL and event system Method* tm = trap_method(); const char* reason_name = trap_reason_name(reason); @@ -2142,10 +2156,24 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint tr intptr_t pc = p2i(fr.pc()); JFR_ONLY(post_deoptimization_event(nm, tm, trap_bci, trap_bc, reason, action);) - log_deopt(nm, tm, pc, fr, trap_bci, reason_name, reason_action); - Events::log_deopt_message(current, "Uncommon trap: reason=%s action=%s pc=" INTPTR_FORMAT " method=%s @ %d %s", + + ResourceMark rm; + + const char* class_name_str = nullptr; + const char* class_name_msg = nullptr; + stringStream st, stm; + if (class_name != nullptr) { + class_name->print_symbol_on(&st); + class_name_str = st.freeze(); + stm.print("class=%s ", class_name_str); + class_name_msg = stm.freeze(); + } else { + class_name_msg = ""; + } + log_deopt(nm, tm, pc, fr, trap_bci, reason_name, reason_action, class_name_str); + Events::log_deopt_message(current, "Uncommon trap: reason=%s action=%s pc=" INTPTR_FORMAT " method=%s @ %d %s%s", reason_name, reason_action, pc, - tm->name_and_sig_as_C_string(), trap_bci, nm->compiler_name()); + tm->name_and_sig_as_C_string(), trap_bci, class_name_msg, nm->compiler_name()); } // Print a bunch of diagnostics, if requested. @@ -2173,20 +2201,13 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint tr #endif nm->log_identity(xtty); } - Symbol* class_name = nullptr; - bool unresolved = false; - if (unloaded_class_index >= 0) { - constantPoolHandle constants (current, trap_method->constants()); - if (constants->tag_at(unloaded_class_index).is_unresolved_klass()) { - class_name = constants->klass_name_at(unloaded_class_index); - unresolved = true; - if (xtty != nullptr) + if (class_name != nullptr) { + if (xtty != nullptr) { + if (unresolved) { xtty->print(" unresolved='1'"); - } else if (constants->tag_at(unloaded_class_index).is_symbol()) { - class_name = constants->symbol_at(unloaded_class_index); - } - if (xtty != nullptr) + } xtty->name(class_name); + } } if (xtty != nullptr && trap_mdo != nullptr && (int)reason < (int)MethodData::_trap_hist_limit) { // Dump the relevant MDO state. diff --git a/src/hotspot/share/runtime/flags/jvmFlagConstraintsCompiler.cpp b/src/hotspot/share/runtime/flags/jvmFlagConstraintsCompiler.cpp index 444ce321759..36eece6f013 100644 --- a/src/hotspot/share/runtime/flags/jvmFlagConstraintsCompiler.cpp +++ b/src/hotspot/share/runtime/flags/jvmFlagConstraintsCompiler.cpp @@ -274,6 +274,17 @@ JVMFlag::Error AVX3ThresholdConstraintFunc(int value, bool verbose) { return JVMFlag::SUCCESS; } +JVMFlag::Error CopyAVX3ThresholdConstraintFunc(int value, bool verbose) { + if (value != 0 && !is_power_of_2(value)) { + JVMFlag::printError(verbose, + "CopyAVX3Threshold ( %d ) must be 0 or " + "a power of two value between 0 and MAX_INT\n", value); + return JVMFlag::VIOLATES_CONSTRAINT; + } + + return JVMFlag::SUCCESS; +} + JVMFlag::Error ArraycopySrcPrefetchDistanceConstraintFunc(uintx value, bool verbose) { if (value >= 4032) { JVMFlag::printError(verbose, diff --git a/src/hotspot/share/runtime/flags/jvmFlagConstraintsCompiler.hpp b/src/hotspot/share/runtime/flags/jvmFlagConstraintsCompiler.hpp index cf785800cfc..45e91058e0b 100644 --- a/src/hotspot/share/runtime/flags/jvmFlagConstraintsCompiler.hpp +++ b/src/hotspot/share/runtime/flags/jvmFlagConstraintsCompiler.hpp @@ -46,6 +46,7 @@ f(uintx, ArraycopyDstPrefetchDistanceConstraintFunc) \ f(uintx, ArraycopySrcPrefetchDistanceConstraintFunc) \ f(int, AVX3ThresholdConstraintFunc) \ + f(int, CopyAVX3ThresholdConstraintFunc) \ f(uint, TypeProfileLevelConstraintFunc) \ f(uint, VerifyIterativeGVNConstraintFunc) \ f(intx, InitArrayShortSizeConstraintFunc) \ diff --git a/src/hotspot/share/runtime/flags/jvmFlagConstraintsRuntime.cpp b/src/hotspot/share/runtime/flags/jvmFlagConstraintsRuntime.cpp index 1e6efd893c8..1f16fada239 100644 --- a/src/hotspot/share/runtime/flags/jvmFlagConstraintsRuntime.cpp +++ b/src/hotspot/share/runtime/flags/jvmFlagConstraintsRuntime.cpp @@ -153,6 +153,20 @@ JVMFlag::Error OnSpinWaitInstNameConstraintFunc(ccstr value, bool verbose) { return JVMFlag::VIOLATES_CONSTRAINT; } +#ifdef LINUX + if (strcmp(value, "wfet") == 0) { + if (UnlockExperimentalVMOptions) { + return JVMFlag::SUCCESS; + } else { + JVMFlag::printError(verbose, + "'wfet' value for OnSpinWaitInst is experimental and " + "must be enabled via -XX:+UnlockExperimentalVMOptions.\n" + "Error: The unlock option must precede 'OnSpinWaitInst'.\n"); + return JVMFlag::VIOLATES_CONSTRAINT; + } + } +#endif + if (strcmp(value, "nop") != 0 && strcmp(value, "isb") != 0 && strcmp(value, "yield") != 0 && @@ -160,7 +174,7 @@ JVMFlag::Error OnSpinWaitInstNameConstraintFunc(ccstr value, bool verbose) { strcmp(value, "none") != 0) { JVMFlag::printError(verbose, "Unrecognized value %s for OnSpinWaitInst. Must be one of the following: " - "nop, isb, yield, sb, none\n", + "nop, isb, yield, sb," LINUX_ONLY(" wfet,") " none\n", value); return JVMFlag::VIOLATES_CONSTRAINT; } diff --git a/src/hotspot/share/runtime/frame.cpp b/src/hotspot/share/runtime/frame.cpp index 8f969600ba8..d691a3c8028 100644 --- a/src/hotspot/share/runtime/frame.cpp +++ b/src/hotspot/share/runtime/frame.cpp @@ -1286,7 +1286,7 @@ public: } bool is_good(oop* p) { - return *p == nullptr || (dbg_is_safe(*p, -1) && dbg_is_safe((*p)->klass(), -1) && oopDesc::is_oop_or_null(*p)); + return *p == nullptr || (dbg_is_safe(*p, -1) && dbg_is_safe((*p)->klass_without_asserts(), -1) && oopDesc::is_oop_or_null(*p)); } void describe(FrameValues& values, int frame_no) { for (int i = 0; i < _oops->length(); i++) { diff --git a/src/hotspot/share/runtime/globals.hpp b/src/hotspot/share/runtime/globals.hpp index 6d4b9908e1c..9d38a44cbd5 100644 --- a/src/hotspot/share/runtime/globals.hpp +++ b/src/hotspot/share/runtime/globals.hpp @@ -199,19 +199,6 @@ const int ObjectAlignmentInBytes = 8; "Granularity to use for NUMA interleaving on Windows OS") \ constraint(NUMAInterleaveGranularityConstraintFunc, AtParse) \ \ - product(uintx, NUMAChunkResizeWeight, 20, \ - "Percentage (0-100) used to weight the current sample when " \ - "computing exponentially decaying average for " \ - "AdaptiveNUMAChunkSizing") \ - range(0, 100) \ - \ - product(size_t, NUMASpaceResizeRate, 1*G, \ - "Do not reallocate more than this amount per collection") \ - range(0, max_uintx) \ - \ - product(bool, UseAdaptiveNUMAChunkSizing, true, \ - "Enable adaptive chunk sizing for NUMA") \ - \ product(bool, NUMAStats, false, \ "Print NUMA stats in detailed heap information") \ \ @@ -811,9 +798,6 @@ const int ObjectAlignmentInBytes = 8; "Number of OutOfMemoryErrors preallocated with backtrace") \ range(0, 1024) \ \ - product(bool, UseXMMForArrayCopy, false, \ - "Use SSE2 MOVQ instruction for Arraycopy") \ - \ develop(bool, PrintFieldLayout, false, \ "Print field layout for each class") \ \ @@ -884,20 +868,11 @@ const int ObjectAlignmentInBytes = 8; develop(bool, VerifyDependencies, trueInDebug, \ "Exercise and verify the compilation dependency mechanism") \ \ - develop(bool, TraceNewOopMapGeneration, false, \ - "Trace OopMapGeneration") \ - \ - develop(bool, TraceNewOopMapGenerationDetailed, false, \ - "Trace OopMapGeneration: print detailed cell states") \ - \ develop(bool, TimeOopMap, false, \ "Time calls to GenerateOopMap::compute_map() in sum") \ \ - develop(bool, TimeOopMap2, false, \ - "Time calls to GenerateOopMap::compute_map() individually") \ - \ - develop(bool, TraceOopMapRewrites, false, \ - "Trace rewriting of methods during oop map generation") \ + develop(bool, GenerateOopMapALot, false, \ + "Generate interpreter oopmaps at all safepoints") \ \ develop(bool, TraceFinalizerRegistration, false, \ "Trace registration of final references") \ @@ -1396,9 +1371,6 @@ const int ObjectAlignmentInBytes = 8; "Maximum size of Metaspaces (in bytes)") \ constraint(MaxMetaspaceSizeConstraintFunc,AfterErgo) \ \ - product(bool, UseCompressedClassPointers, true, \ - "(Deprecated) Use 32-bit class pointers.") \ - \ product(size_t, CompressedClassSpaceSize, 1*G, \ "Maximum size of class area in Metaspace when compressed " \ "class pointers are used") \ @@ -1545,6 +1517,10 @@ const int ObjectAlignmentInBytes = 8; "Size of code heap with non-nmethods (in bytes)") \ constraint(VMPageSizeConstraintFunc, AtParse) \ \ + product(size_t, HotCodeHeapSize, 0, EXPERIMENTAL, \ + "Size of code heap with predicted hot methods (in bytes)") \ + range(0, SIZE_MAX) \ + \ product_pd(size_t, CodeCacheExpansionSize, \ "Code cache expansion size (in bytes)") \ range(32*K, SIZE_MAX) \ @@ -1957,7 +1933,7 @@ const int ObjectAlignmentInBytes = 8; "Mark all threads after a safepoint, and clear on a modify " \ "fence. Add cleanliness checks.") \ \ - product(bool, UseObjectMonitorTable, false, DIAGNOSTIC, \ + product(bool, UseObjectMonitorTable, true, DIAGNOSTIC, \ "Use a table to record inflated monitors rather than the first " \ "word of the object.") \ \ diff --git a/src/hotspot/share/runtime/hotCodeCollector.cpp b/src/hotspot/share/runtime/hotCodeCollector.cpp new file mode 100644 index 00000000000..6bdeee011ce --- /dev/null +++ b/src/hotspot/share/runtime/hotCodeCollector.cpp @@ -0,0 +1,259 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifdef COMPILER2 + +#include "code/codeCache.hpp" +#include "code/compiledIC.hpp" +#include "compiler/compilerDefinitions.inline.hpp" +#include "logging/log.hpp" +#include "memory/resourceArea.hpp" +#include "oops/method.inline.hpp" +#include "runtime/hotCodeCollector.hpp" +#include "runtime/hotCodeSampler.hpp" +#include "runtime/java.hpp" +#include "runtime/javaThread.inline.hpp" + +// Initialize static variables +bool HotCodeCollector::_is_initialized = false; +int HotCodeCollector::_new_c2_nmethods_count = 0; +int HotCodeCollector::_total_c2_nmethods_count = 0; + +HotCodeCollector::HotCodeCollector() : JavaThread(thread_entry) {} + +void HotCodeCollector::initialize() { + EXCEPTION_MARK; + + assert(HotCodeHeap, "HotCodeCollector requires HotCodeHeap enabled"); + assert(CompilerConfig::is_c2_enabled(), "HotCodeCollector requires C2 enabled"); + assert(NMethodRelocation, "HotCodeCollector requires NMethodRelocation enabled"); + assert(HotCodeHeapSize > 0, "HotCodeHeapSize must be non-zero to use HotCodeCollector"); + assert(CodeCache::get_code_heap(CodeBlobType::MethodHot) != nullptr, "MethodHot code heap not found"); + + Handle thread_oop = JavaThread::create_system_thread_object("HotCodeCollectorThread", CHECK); + HotCodeCollector* thread = new HotCodeCollector(); + JavaThread::vm_exit_on_osthread_failure(thread); + JavaThread::start_internal_daemon(THREAD, thread, thread_oop, NormPriority); + + _is_initialized = true; +} + +bool HotCodeCollector::is_nmethod_count_stable() { + if (HotCodeStablePercent < 0) { + log_info(hotcode)("HotCodeStablePercent is less than zero, stable check disabled"); + return true; + } + + MutexLocker ml_CodeCache_lock(CodeCache_lock, Mutex::_no_safepoint_check_flag); + + if (_total_c2_nmethods_count <= 0) { + log_info(hotcode)("No registered C2 nmethods"); + return false; + } + + const double percent_new = 100.0 * _new_c2_nmethods_count / _total_c2_nmethods_count; + bool is_stable_nmethod_count = percent_new <= HotCodeStablePercent; + + log_info(hotcode)("C2 nmethod count %s", is_stable_nmethod_count ? "stable" : "not stable"); + log_debug(hotcode)("C2 nmethod stats: New: %d, Total: %d, Percent new: %f", _new_c2_nmethods_count, _total_c2_nmethods_count, percent_new); + + _new_c2_nmethods_count = 0; + + return is_stable_nmethod_count; +} + +void HotCodeCollector::thread_entry(JavaThread* thread, TRAPS) { + // Initial sleep to allow JVM to warm up + thread->sleep(HotCodeStartupDelaySeconds * 1000); + + while (true) { + ResourceMark rm; + + // Sample application and group hot nmethods if nmethod count is stable + if (is_nmethod_count_stable()) { + log_info(hotcode)("Sampling..."); + + ThreadSampler sampler; + uint64_t start_time = os::javaTimeMillis(); + while (os::javaTimeMillis() - start_time <= HotCodeSampleSeconds * 1000) { + sampler.sample_all_java_threads(); + thread->sleep(rand_sampling_period_ms()); + } + + Candidates candidates(sampler); + do_grouping(candidates); + } + + thread->sleep(HotCodeIntervalSeconds * 1000); + } +} + +void HotCodeCollector::do_grouping(Candidates& candidates) { + int num_relocated = 0; + + // Sort nmethods by increasing sample count so pop() returns the hottest + candidates.sort(); + + while (candidates.has_candidates()) { + + double percent_from_hot = candidates.get_hot_sample_percent(); + log_debug(hotcode)("Percentage of samples from hot code heap: %f", percent_from_hot); + if (percent_from_hot >= HotCodeSamplePercent) { + log_info(hotcode)("Percentage of samples from hot nmethods over threshold. Done collecting hot code"); + break; + } + + nmethod* candidate = candidates.get_candidate(); + + MutexLocker ml_Compile_lock(Compile_lock); + MutexLocker ml_CompiledIC_lock(CompiledIC_lock, Mutex::_no_safepoint_check_flag); + MutexLocker ml_CodeCache_lock(CodeCache_lock, Mutex::_no_safepoint_check_flag); + + num_relocated += do_relocation(candidate, 0); + } + + log_info(hotcode)("Collection done. Relocated %d nmethods to the MethodHot heap", num_relocated); +} + +int HotCodeCollector::do_relocation(void* candidate, uint call_level) { + if (candidate == nullptr) { + return 0; + } + + // Verify that address still points to CodeBlob + CodeBlob* blob = CodeCache::find_blob(candidate); + if (blob == nullptr) { + return 0; + } + + // Verify that blob is nmethod + nmethod* nm = blob->as_nmethod_or_null(); + if (nm == nullptr || nm->method() == nullptr) { + return 0; + } + + // The candidate may have been recompiled or already relocated. + // Retrieve the latest nmethod from the Method + nm = nm->method()->code(); + + // Verify the nmethod is still valid for relocation + if (nm == nullptr || !nm->is_in_use() || !nm->is_compiled_by_c2()) { + return 0; + } + + // Verify code heap has space + if (CodeCache::get_code_heap(CodeBlobType::MethodHot)->unallocated_capacity() < (size_t)nm->size()) { + log_info(hotcode)("Not enough free space in MethodHot heap (%zd bytes) to relocate nm (%d bytes). Bailing out", + CodeCache::get_code_heap(CodeBlobType::MethodHot)->unallocated_capacity(), nm->size()); + return 0; + } + + // Number of nmethods relocated (candidate + callees) + int num_relocated = 0; + + // Pointer to nmethod in hot heap + nmethod* hot_nm = nullptr; + + if (CodeCache::get_code_blob_type(nm) != CodeBlobType::MethodHot) { + CompiledICLocker ic_locker(nm); + hot_nm = nm->relocate(CodeBlobType::MethodHot); + + if (hot_nm != nullptr) { + // Successfully relocated nmethod. Update counts and proceed to callee relocation. + log_debug(hotcode)("Successful relocation: nmethod (%p), method (%s), call level (%d)", nm, hot_nm->method()->name_and_sig_as_C_string(), call_level); + num_relocated++; + } else { + // Relocation failed so return and do not attempt to relocate callees + log_debug(hotcode)("Failed relocation: nmethod (%p), call level (%d)", nm, call_level); + return 0; + } + } else { + // Skip relocation since already in hot heap, but still relocate callees + // since they may not have been compiled when this method was first relocated + log_debug(hotcode)("Already relocated: nmethod (%p), method (%s), call level (%d)", nm, nm->method()->name_and_sig_as_C_string(), call_level); + hot_nm = nm; + } + + assert(hot_nm != nullptr, "unable to relocate callees"); + + if (call_level < HotCodeCallLevel) { + // Loop over relocations to relocate callees + RelocIterator relocIter(hot_nm); + while (relocIter.next()) { + // Check if this is a call + Relocation* reloc = relocIter.reloc(); + if (!reloc->is_call()) { + continue; + } + + // Find the call destination address + address dest = ((CallRelocation*) reloc)->destination(); + + // Recursively relocate callees + num_relocated += do_relocation(dest, call_level + 1); + } + } + + return num_relocated; +} + +void HotCodeCollector::unregister_nmethod(nmethod* nm) { + assert_lock_strong(CodeCache_lock); + if (!_is_initialized) { + return; + } + + if (!nm->is_compiled_by_c2()) { + return; + } + + if (CodeCache::get_code_blob_type(nm) == CodeBlobType::MethodHot) { + // Nmethods in the hot code heap do not count towards total C2 nmethods. + return; + } + + // CodeCache_lock is held, so we can safely decrement the count. + _total_c2_nmethods_count--; +} + +void HotCodeCollector::register_nmethod(nmethod* nm) { + assert_lock_strong(CodeCache_lock); + if (!_is_initialized) { + return; + } + + if (!nm->is_compiled_by_c2()) { + return; // Only C2 nmethods are relocated to HotCodeHeap. + } + + if (CodeCache::get_code_blob_type(nm) == CodeBlobType::MethodHot) { + // Nmethods in the hot code heap do not count towards total C2 nmethods. + return; + } + + // CodeCache_lock is held, so we can safely increment the count. + _new_c2_nmethods_count++; + _total_c2_nmethods_count++; +} +#endif // COMPILER2 diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSetPreselector.hpp b/src/hotspot/share/runtime/hotCodeCollector.hpp similarity index 57% rename from src/hotspot/share/gc/shenandoah/shenandoahCollectionSetPreselector.hpp rename to src/hotspot/share/runtime/hotCodeCollector.hpp index b78259dd85b..dbefa3dc788 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSetPreselector.hpp +++ b/src/hotspot/share/runtime/hotCodeCollector.hpp @@ -22,30 +22,35 @@ * */ -#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHCOLLECTIONSETPRESELECTOR_HPP -#define SHARE_GC_SHENANDOAH_SHENANDOAHCOLLECTIONSETPRESELECTOR_HPP +#ifdef COMPILER2 +#ifndef SHARE_RUNTIME_HOTCODECOLLECTOR_HPP +#define SHARE_RUNTIME_HOTCODECOLLECTOR_HPP -#include "gc/shenandoah/shenandoahCollectionSet.hpp" -#include "memory/resourceArea.hpp" +#include "runtime/javaThread.hpp" -class ShenandoahCollectionSetPreselector : public StackObj { - ShenandoahCollectionSet* _cset; - bool* _pset; - ResourceMark _rm; +class Candidates; -public: - ShenandoahCollectionSetPreselector(ShenandoahCollectionSet* cset, size_t num_regions): - _cset(cset) { - _pset = NEW_RESOURCE_ARRAY(bool, num_regions); - for (unsigned int i = 0; i < num_regions; i++) { - _pset[i] = false; - } - _cset->establish_preselected(_pset); - } +class HotCodeCollector : public JavaThread { + private: + static bool _is_initialized; - ~ShenandoahCollectionSetPreselector() { - _cset->abandon_preselected(); - } + static int _new_c2_nmethods_count; + static int _total_c2_nmethods_count; + + HotCodeCollector(); + + static void do_grouping(Candidates& candidates); + + static int do_relocation(void* candidate, uint call_level); + + public: + static void initialize(); + static void thread_entry(JavaThread* thread, TRAPS); + static void unregister_nmethod(nmethod* nm); + static void register_nmethod(nmethod* nm); + + static bool is_nmethod_count_stable(); }; -#endif // SHARE_GC_SHENANDOAH_SHENANDOAHCOLLECTIONSETPRESELECTOR_HPP +#endif // SHARE_RUNTIME_HOTCODECOLLECTOR_HPP +#endif // COMPILER2 diff --git a/src/hotspot/share/runtime/hotCodeSampler.cpp b/src/hotspot/share/runtime/hotCodeSampler.cpp new file mode 100644 index 00000000000..730a47d238a --- /dev/null +++ b/src/hotspot/share/runtime/hotCodeSampler.cpp @@ -0,0 +1,121 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifdef COMPILER2 + +#include "code/codeCache.hpp" +#include "logging/log.hpp" +#include "runtime/hotCodeSampler.hpp" +#include "runtime/javaThread.inline.hpp" + +void ThreadSampler::sample_all_java_threads() { + // Collect samples for each JavaThread + for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { + if (jt->is_hidden_from_external_view() || + jt->in_deopt_handler() || + (jt->thread_state() != _thread_in_native && jt->thread_state() != _thread_in_Java)) { + continue; + } + + GetPCTask task(jt); + task.run(); + address pc = task.pc(); + if (pc == nullptr) { + continue; + } + + if (CodeCache::contains(pc)) { + nmethod* nm = CodeCache::find_blob(pc)->as_nmethod_or_null(); + if (nm != nullptr) { + bool created = false; + int *count = _samples.put_if_absent(nm, 0, &created); + (*count)++; + if (created) { + _samples.maybe_grow(); + } + } + } + } +} + +Candidates::Candidates(ThreadSampler& sampler) + : _hot_sample_count(0), _non_profiled_sample_count(0) { + auto func = [&](nmethod* nm, int count) { + if (CodeCache::get_code_blob_type(nm) == CodeBlobType::MethodNonProfiled) { + _candidates.append(Pair(nm, count)); + add_non_profiled_sample_count(count); + } else if (CodeCache::get_code_blob_type(nm) == CodeBlobType::MethodHot) { + add_hot_sample_count(count); + } + }; + sampler.iterate_samples(func); + + log_info(hotcode)("Generated candidate list from %d samples corresponding to %d nmethods", _non_profiled_sample_count + _hot_sample_count, _candidates.length()); +} + +void Candidates::add_candidate(nmethod* nm, int count) { + _candidates.append(Pair(nm, count)); +} + +void Candidates::add_hot_sample_count(int count) { + _hot_sample_count += count; +} + +void Candidates::add_non_profiled_sample_count(int count) { + _non_profiled_sample_count += count; +} + +void Candidates::sort() { + _candidates.sort( + [](Pair* a, Pair* b) { + if (a->second > b->second) return 1; + if (a->second < b->second) return -1; + return 0; + } + ); +} + +bool Candidates::has_candidates() { + return !_candidates.is_empty(); +} + +nmethod* Candidates::get_candidate() { + assert(has_candidates(), "must not be empty"); + Pair candidate = _candidates.pop(); + + _hot_sample_count += candidate.second; + _non_profiled_sample_count -= candidate.second; + + return candidate.first; +} + +double Candidates::get_hot_sample_percent() { + if (_hot_sample_count + _non_profiled_sample_count == 0) { + return 0; + } + + return 100.0 * _hot_sample_count / (_hot_sample_count + _non_profiled_sample_count); +} + +#endif // COMPILER2 diff --git a/src/hotspot/share/runtime/hotCodeSampler.hpp b/src/hotspot/share/runtime/hotCodeSampler.hpp new file mode 100644 index 00000000000..d61cac791e1 --- /dev/null +++ b/src/hotspot/share/runtime/hotCodeSampler.hpp @@ -0,0 +1,104 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifdef COMPILER2 +#ifndef SHARE_RUNTIME_HOTCODESAMPLER_HPP +#define SHARE_RUNTIME_HOTCODESAMPLER_HPP + +#include "runtime/javaThread.hpp" +#include "runtime/suspendedThreadTask.hpp" +#include "runtime/threadSMR.hpp" +#include "utilities/pair.hpp" +#include "utilities/resizableHashTable.hpp" + +// Generate a random sampling period between min and max +static inline uint rand_sampling_period_ms() { + assert(HotCodeMaxSamplingMs >= HotCodeMinSamplingMs, "max cannot be smaller than min"); + julong range = (julong)HotCodeMaxSamplingMs - (julong)HotCodeMinSamplingMs + 1; + return (uint)(os::random() % range) + HotCodeMinSamplingMs; +} + +class ThreadSampler; + +class Candidates : public StackObj { + private: + GrowableArray> _candidates; + int _hot_sample_count; + int _non_profiled_sample_count; + + public: + Candidates(ThreadSampler& sampler); + + void add_candidate(nmethod* nm, int count); + void add_hot_sample_count(int count); + void add_non_profiled_sample_count(int count); + void sort(); + + bool has_candidates(); + nmethod* get_candidate(); + double get_hot_sample_percent(); +}; + +class GetPCTask : public SuspendedThreadTask { + private: + address _pc; + + void do_task(const SuspendedThreadTaskContext& context) override { + JavaThread* jt = JavaThread::cast(context.thread()); + if (jt->thread_state() != _thread_in_native && jt->thread_state() != _thread_in_Java) { + return; + } + _pc = os::fetch_frame_from_context(context.ucontext(), nullptr, nullptr); + } + + public: + GetPCTask(JavaThread* thread) : SuspendedThreadTask(thread), _pc(nullptr) {} + + address pc() const { + return _pc; + } +}; + +class ThreadSampler : public StackObj { + private: + static const int INITIAL_TABLE_SIZE = 109; + + // Table of nmethods found during profiling with sample count + ResizeableHashTable _samples; + + public: + ThreadSampler() : _samples(INITIAL_TABLE_SIZE, HotCodeSampleSeconds * 1000 / HotCodeMaxSamplingMs) {} + + // Iterate over and sample all Java threads + void sample_all_java_threads(); + + // Iterate over all samples with a callback function + template + void iterate_samples(Function func) { + _samples.iterate_all(func); + } +}; + +#endif // SHARE_RUNTIME_HOTCODESAMPLER_HPP +#endif // COMPILER2 diff --git a/src/hotspot/share/runtime/icache.hpp b/src/hotspot/share/runtime/icache.hpp index bc153862323..692a876d9a6 100644 --- a/src/hotspot/share/runtime/icache.hpp +++ b/src/hotspot/share/runtime/icache.hpp @@ -129,4 +129,27 @@ class ICacheStubGenerator : public StubCodeGenerator { void generate_icache_flush(ICache::flush_icache_stub_t* flush_icache_stub); }; +class DefaultICacheInvalidationContext : StackObj { + public: + NONCOPYABLE(DefaultICacheInvalidationContext); + + DefaultICacheInvalidationContext() {} + + ~DefaultICacheInvalidationContext() {} + + void set_has_modified_code() {} +}; + +#ifndef PD_ICACHE_INVALIDATION_CONTEXT +#define PD_ICACHE_INVALIDATION_CONTEXT DefaultICacheInvalidationContext +#endif // PD_ICACHE_INVALIDATION_CONTEXT + +class ICacheInvalidationContext final : public PD_ICACHE_INVALIDATION_CONTEXT { + private: + NONCOPYABLE(ICacheInvalidationContext); + + public: + using PD_ICACHE_INVALIDATION_CONTEXT::PD_ICACHE_INVALIDATION_CONTEXT; +}; + #endif // SHARE_RUNTIME_ICACHE_HPP diff --git a/src/hotspot/share/runtime/init.cpp b/src/hotspot/share/runtime/init.cpp index adc49f84358..d820968495e 100644 --- a/src/hotspot/share/runtime/init.cpp +++ b/src/hotspot/share/runtime/init.cpp @@ -70,6 +70,10 @@ void VM_Version_init(); void icache_init2(); void initialize_stub_info(); // must precede all blob/stub generation void preuniverse_stubs_init(); + +#if INCLUDE_CDS +void stubs_AOTAddressTable_init(); +#endif // INCLUDE_CDS void initial_stubs_init(); jint universe_init(); // depends on codeCache_init and preuniverse_stubs_init @@ -149,13 +153,19 @@ jint init_globals() { AOTCodeCache::init2(); // depends on universe_init, must be before initial_stubs_init AsyncLogWriter::initialize(); +#if INCLUDE_CDS + stubs_AOTAddressTable_init(); // publish external addresses used by stubs + // depends on AOTCodeCache::init2 +#endif // INCLUDE_CDS initial_stubs_init(); // stubgen initial stub routines // stack overflow exception blob is referenced by the interpreter - AOTCodeCache::init_early_stubs_table(); // need this after stubgen initial stubs and before shared runtime initial stubs SharedRuntime::generate_initial_stubs(); gc_barrier_stubs_init(); // depends on universe_init, must be before interpreter_init continuations_init(); // must precede continuation stub generation - continuation_stubs_init(); // depends on continuations_init + AOTCodeCache::init3(); // depends on stubs_AOTAddressTable_init + // and continuations_init and must + // precede continuation stub generation + continuation_stubs_init(); // depends on continuations_init and AOTCodeCache::init3 #if INCLUDE_JFR SharedRuntime::generate_jfr_stubs(); #endif @@ -164,7 +174,6 @@ jint init_globals() { InterfaceSupport_init(); VMRegImpl::set_regName(); // need this before generate_stubs (for printing oop maps). SharedRuntime::generate_stubs(); - AOTCodeCache::init_shared_blobs_table(); // need this after generate_stubs SharedRuntime::init_adapter_library(); // do this after AOTCodeCache::init_shared_blobs_table return JNI_OK; } diff --git a/src/hotspot/share/runtime/interfaceSupport.cpp b/src/hotspot/share/runtime/interfaceSupport.cpp index 11a7d9fd41f..6ccf63b4c5e 100644 --- a/src/hotspot/share/runtime/interfaceSupport.cpp +++ b/src/hotspot/share/runtime/interfaceSupport.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,6 +23,7 @@ */ #include "gc/shared/collectedHeap.inline.hpp" +#include "interpreter/interpreterRuntime.hpp" #include "logging/log.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" @@ -65,6 +66,10 @@ VMEntryWrapper::~VMEntryWrapper() { if (VerifyStack) { InterfaceSupport::verify_stack(); } + // Verify interpreter oopmap generation + if (GenerateOopMapALot) { + InterpreterRuntime::generate_oop_map_alot(); + } } VMNativeEntryWrapper::VMNativeEntryWrapper() { diff --git a/src/hotspot/share/runtime/lockStack.inline.hpp b/src/hotspot/share/runtime/lockStack.inline.hpp index 27eb07fcec8..a9ad3553db8 100644 --- a/src/hotspot/share/runtime/lockStack.inline.hpp +++ b/src/hotspot/share/runtime/lockStack.inline.hpp @@ -1,7 +1,7 @@ /* * Copyright (c) 2022, Red Hat, Inc. All rights reserved. * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. - * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2024, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -254,7 +254,7 @@ inline void OMCache::set_monitor(ObjectMonitor *monitor) { oop obj = monitor->object_peek(); assert(obj != nullptr, "must be alive"); - assert(monitor == ObjectSynchronizer::get_monitor_from_table(JavaThread::current(), obj), "must exist in table"); + assert(monitor == ObjectSynchronizer::get_monitor_from_table(obj), "must exist in table"); OMCacheEntry to_insert = {obj, monitor}; diff --git a/src/hotspot/share/runtime/mutex.cpp b/src/hotspot/share/runtime/mutex.cpp index c455b5f36a2..8e0c1d10e5c 100644 --- a/src/hotspot/share/runtime/mutex.cpp +++ b/src/hotspot/share/runtime/mutex.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -293,7 +293,8 @@ Mutex::Mutex(Rank rank, const char * name, bool allow_vm_block) : _owner(nullptr _rank = rank; _skip_rank_check = false; - assert(_rank >= static_cast(0) && _rank <= safepoint, "Bad lock rank %s: %s", rank_name(), name); + assert(_rank >= static_cast(0) && _rank <= safepoint, "Bad lock rank %d outside [0, %d]: %s", + static_cast(rank), static_cast(safepoint), name); // The allow_vm_block also includes allowing other non-Java threads to block or // allowing Java threads to block in native. @@ -324,25 +325,33 @@ static const char* _rank_names[] = { "event", "service", "stackwatermark", "tty" static const int _num_ranks = 7; -static const char* rank_name_internal(Mutex::Rank r) { +static void print_rank_name_internal(outputStream* st, Mutex::Rank r) { // Find closest rank and print out the name - stringStream st; for (int i = 0; i < _num_ranks; i++) { if (r == _ranks[i]) { - return _rank_names[i]; + st->print("%s", _rank_names[i]); } else if (r > _ranks[i] && (i < _num_ranks-1 && r < _ranks[i+1])) { int delta = static_cast(_ranks[i+1]) - static_cast(r); - st.print("%s-%d", _rank_names[i+1], delta); - return st.as_string(); + st->print("%s-%d", _rank_names[i+1], delta); } } - return "fail"; +} + +// Requires caller to have ResourceMark. +static const char* rank_name_internal(Mutex::Rank r) { + stringStream st; + print_rank_name_internal(&st, r); + return st.as_string(); } const char* Mutex::rank_name() const { return rank_name_internal(_rank); } +// Does not require caller to have ResourceMark. +void Mutex::print_rank_name(outputStream* st) const { + print_rank_name_internal(st, _rank); +} void Mutex::assert_no_overlap(Rank orig, Rank adjusted, int adjust) { int i = 0; @@ -364,7 +373,8 @@ void Mutex::print_on(outputStream* st) const { if (_allow_vm_block) { st->print("%s", " allow_vm_block"); } - DEBUG_ONLY(st->print(" %s", rank_name())); + st->print(" "); + DEBUG_ONLY(print_rank_name(st)); st->cr(); } diff --git a/src/hotspot/share/runtime/mutex.hpp b/src/hotspot/share/runtime/mutex.hpp index cf2b222d2da..4d30a320cbf 100644 --- a/src/hotspot/share/runtime/mutex.hpp +++ b/src/hotspot/share/runtime/mutex.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -130,9 +130,11 @@ class Mutex : public CHeapObj { return _skip_rank_check; } + const char* rank_name() const; + void print_rank_name(outputStream* st) const; + public: Rank rank() const { return _rank; } - const char* rank_name() const; Mutex* next() const { return _next; } #endif // ASSERT diff --git a/src/hotspot/share/runtime/mutexLocker.cpp b/src/hotspot/share/runtime/mutexLocker.cpp index b102e6424f1..3dd61374d03 100644 --- a/src/hotspot/share/runtime/mutexLocker.cpp +++ b/src/hotspot/share/runtime/mutexLocker.cpp @@ -105,7 +105,6 @@ Mutex* G1MarkStackFreeList_lock = nullptr; Monitor* G1OldGCCount_lock = nullptr; Mutex* G1OldSets_lock = nullptr; Mutex* G1ReviseYoungLength_lock = nullptr; -Monitor* G1RootRegionScan_lock = nullptr; Mutex* G1RareEvent_lock = nullptr; Mutex* G1Uncommit_lock = nullptr; #endif @@ -216,7 +215,6 @@ void mutex_init() { MUTEX_DEFN(G1MarkStackChunkList_lock , PaddedMutex , nosafepoint); MUTEX_DEFN(G1MarkStackFreeList_lock , PaddedMutex , nosafepoint); MUTEX_DEFN(G1OldSets_lock , PaddedMutex , nosafepoint); - MUTEX_DEFN(G1RootRegionScan_lock , PaddedMonitor, nosafepoint-1); MUTEX_DEFN(G1Uncommit_lock , PaddedMutex , service-2); } #endif diff --git a/src/hotspot/share/runtime/mutexLocker.hpp b/src/hotspot/share/runtime/mutexLocker.hpp index f6c0a967718..682383de401 100644 --- a/src/hotspot/share/runtime/mutexLocker.hpp +++ b/src/hotspot/share/runtime/mutexLocker.hpp @@ -101,7 +101,6 @@ extern Monitor* G1OldGCCount_lock; // in support of "concurrent" f extern Mutex* G1OldSets_lock; // protects the G1 old region sets extern Mutex* G1RareEvent_lock; // Synchronizes (rare) parallel GC operations. extern Mutex* G1ReviseYoungLength_lock; // Protects access to young gen length revising operations. -extern Monitor* G1RootRegionScan_lock; // used to notify that the G1 CM threads have finished scanning the root regions extern Mutex* G1Uncommit_lock; // protects the G1 uncommit list when not at safepoints #endif diff --git a/src/hotspot/share/runtime/objectMonitor.cpp b/src/hotspot/share/runtime/objectMonitor.cpp index 8c6994c2152..9fc834f4b6b 100644 --- a/src/hotspot/share/runtime/objectMonitor.cpp +++ b/src/hotspot/share/runtime/objectMonitor.cpp @@ -155,7 +155,7 @@ static const jlong MAX_RECHECK_INTERVAL = 1000; // // Succession is provided for by a policy of competitive handoff. // The exiting thread does _not_ grant or pass ownership to the -// successor thread. (This is also referred to as "handoff succession"). +// successor thread. (This is also referred to as "handoff succession"). // Instead the exiting thread releases ownership and possibly wakes // a successor, so the successor can (re)compete for ownership of the lock. // @@ -189,7 +189,7 @@ static const jlong MAX_RECHECK_INTERVAL = 1000; // // Once we have formed a doubly linked list it's easy to find the // successor (A), wake it up, have it remove itself, and update the -// tail pointer, as seen in and 3) below. +// tail pointer, see 3) below. // // 3) entry_list ->F<=>E<=>D<=>C<=>B->null // entry_list_tail ------------------^ @@ -223,7 +223,7 @@ static const jlong MAX_RECHECK_INTERVAL = 1000; // remove itself) or update the tail. // // * The monitor entry list operations avoid locks, but strictly speaking -// they're not lock-free. Enter is lock-free, exit is not. +// they're not lock-free. Enter is lock-free, exit is not. // For a description of 'Methods and apparatus providing non-blocking access // to a resource,' see U.S. Pat. No. 7844973. // @@ -387,7 +387,7 @@ bool ObjectMonitor::try_lock_with_contention_mark(JavaThread* locking_thread, Ob prev_owner = try_set_owner_from(DEFLATER_MARKER, locking_thread); if (prev_owner == DEFLATER_MARKER) { // We successfully cancelled the in-progress async deflation by - // changing owner from DEFLATER_MARKER to current. We now extend + // changing owner from DEFLATER_MARKER to current. We now extend // the lifetime of the contention_mark (e.g. contentions++) here // to prevent the deflater thread from winning the last part of // the 2-part async deflation protocol after the regular @@ -593,12 +593,15 @@ void ObjectMonitor::enter_with_contention_mark(JavaThread* current, ObjectMonito OSThreadContendState osts(current->osthread()); assert(current->thread_state() == _thread_in_vm, "invariant"); + ObjectWaiter node(current); for (;;) { ExitOnSuspend eos(this); { ThreadBlockInVMPreprocess tbivs(current, eos, true /* allow_suspend */); - enter_internal(current); + if (!try_enter_fast(current, &node)) { + enter_internal(current, &node, false /* reenter_path */); + } current->set_current_pending_monitor(nullptr); // We can go to a safepoint at the end of this block. If we // do a thread dump during that safepoint, then this thread will show @@ -630,14 +633,14 @@ void ObjectMonitor::enter_with_contention_mark(JavaThread* current, ObjectMonito // The thread -- now the owner -- is back in vm mode. // Report the glorious news via TI,DTrace and jvmstat. - // The probe effect is non-trivial. All the reportage occurs + // The probe effect is non-trivial. All the reportage occurs // while we hold the monitor, increasing the length of the critical - // section. Amdahl's parallel speedup law comes vividly into play. + // section. Amdahl's parallel speedup law comes vividly into play. // // Another option might be to aggregate the events (thread local or // per-monitor aggregation) and defer reporting until a more opportune // time -- such as next time some thread encounters contention but has - // yet to acquire the lock. While spinning that thread could + // yet to acquire the lock. While spinning that thread could // spinning we could increment JVMStat counters, etc. DTRACE_MONITOR_PROBE(contended__entered, this, object(), current); @@ -736,11 +739,11 @@ bool ObjectMonitor::try_lock_or_add_to_entry_list(JavaThread* current, ObjectWai return false; } - // Interference - the CAS failed because _entry_list changed. Before + // Interference - the CAS failed because _entry_list changed. Before // retrying the CAS retry taking the lock as it may now be free. if (try_lock(current) == TryLockResult::Success) { - assert(!has_successor(current), "invariant"); assert(has_owner(current), "invariant"); + assert(!has_successor(current), "invariant"); node->TState = ObjectWaiter::TS_RUN; return true; } @@ -854,7 +857,7 @@ bool ObjectMonitor::deflate_monitor(Thread* current) { } if (UseObjectMonitorTable) { - ObjectSynchronizer::deflate_monitor(current, obj, this); + ObjectSynchronizer::deflate_monitor(obj, this); } else if (obj != nullptr) { // Install the old mark word if nobody else has already done it. install_displaced_markword_in_object(obj); @@ -942,14 +945,17 @@ const char* ObjectMonitor::is_busy_to_string(stringStream* ss) { return ss->base(); } -void ObjectMonitor::enter_internal(JavaThread* current) { +bool ObjectMonitor::try_enter_fast(JavaThread* current, ObjectWaiter* current_node) { + assert(current != nullptr, "invariant"); assert(current->thread_state() == _thread_blocked, "invariant"); + assert(current_node != nullptr, "invariant"); + assert(current_node->_thread == current, "invariant"); // Try the lock - TATAS if (try_lock(current) == TryLockResult::Success) { - assert(!has_successor(current), "invariant"); assert(has_owner(current), "invariant"); - return; + assert(!has_successor(current), "invariant"); + return true; } assert(InitDone, "Unexpectedly not initialized"); @@ -958,69 +964,82 @@ void ObjectMonitor::enter_internal(JavaThread* current) { // // If the _owner is ready but OFFPROC we could use a YieldTo() // operation to donate the remainder of this thread's quantum - // to the owner. This has subtle but beneficial affinity + // to the owner. This has subtle but beneficial affinity // effects. if (try_spin(current)) { assert(has_owner(current), "invariant"); assert(!has_successor(current), "invariant"); - return; + return true; } // The Spin failed -- Enqueue and park the thread ... - assert(!has_successor(current), "invariant"); assert(!has_owner(current), "invariant"); + assert(!has_successor(current), "invariant"); // Enqueue "current" on ObjectMonitor's _entry_list. // - // Node acts as a proxy for current. + // current_node acts as a proxy for current. // As an aside, if were to ever rewrite the synchronization code mostly // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class - // Java objects. This would avoid awkward lifecycle and liveness issues, + // Java objects. This would avoid awkward lifecycle and liveness issues, // as well as eliminate a subset of ABA issues. // TODO: eliminate ObjectWaiter and enqueue either Threads or Events. - ObjectWaiter node(current); current->_ParkEvent->reset(); - if (try_lock_or_add_to_entry_list(current, &node)) { - return; // We got the lock. + if (try_lock_or_add_to_entry_list(current, current_node)) { + return true; // We got the lock. } + // This thread is now added to the _entry_list. // The lock might have been released while this thread was occupied queueing - // itself onto _entry_list. To close the race and avoid "stranding" and - // progress-liveness failure we must resample-retry _owner before parking. + // itself onto _entry_list. To close the race and avoid "stranding" and + // progress-liveness failure the caller must resample-retry _owner before parking. // Note the Dekker/Lamport duality: ST _entry_list; MEMBAR; LD Owner. - // In this case the ST-MEMBAR is accomplished with CAS(). - // - // TODO: Defer all thread state transitions until park-time. - // Since state transitions are heavy and inefficient we'd like - // to defer the state transitions until absolutely necessary, - // and in doing so avoid some transitions ... + // In this case the ST-MEMBAR is accomplished with CAS() in try_lock_or_add_to_entry_list. + return false; +} + +void ObjectMonitor::enter_internal(JavaThread* current, ObjectWaiter* current_node, bool reenter_path) { + assert(current != nullptr, "invariant"); + assert(current->thread_state() == _thread_blocked, "invariant"); + assert(current_node != nullptr, "invariant"); + assert(current_node->_thread == current, "invariant"); // If there are unmounted virtual threads ahead in the _entry_list we want // to do a timed-park instead to alleviate some deadlock cases where one // of them is picked as the successor but cannot run due to having run out // of carriers. This can happen, for example, if this is a pinned virtual - // thread currently loading or initializining a class, and all other carriers + // thread currently loading or initializing a class, and all other carriers // have a pinned vthread waiting for said class to be loaded/initialized. // Read counter *after* adding this thread to the _entry_list. Adding to // _entry_list uses Atomic::cmpxchg() which already provides a fence that - // prevents this load from floating up previous store. + // prevents this load from floating up past a previous store. // Note that we can have false positives where timed-park is not necessary. - bool do_timed_parked = has_unmounted_vthreads(); + bool do_timed_park = has_unmounted_vthreads(); jlong recheck_interval = 1; for (;;) { + ObjectWaiter::TStates v = current_node->TState; + guarantee(v == ObjectWaiter::TS_ENTER, "invariant"); if (try_lock(current) == TryLockResult::Success) { break; } assert(!has_owner(current), "invariant"); + if (reenter_path) { + // If try_lock failed, spin again - we expect the notifier to release the monitor quickly. + // Note that spin count may be zero so the above try_lock is necessary. + if (try_spin(current)) { + break; + } + } + // park self - if (do_timed_parked) { + if (do_timed_park) { current->_ParkEvent->park(recheck_interval); // Increase the recheck_interval, but clamp the value. recheck_interval *= 8; @@ -1031,55 +1050,67 @@ void ObjectMonitor::enter_internal(JavaThread* current) { current->_ParkEvent->park(); } + // Try again, but just so we distinguish between futile wakeups and + // successful wakeups. The following test isn't algorithmically + // necessary, but it helps us maintain sensible statistics. if (try_lock(current) == TryLockResult::Success) { break; } - // The lock is still contested. + // The lock is still contended. - // Assuming this is not a spurious wakeup we'll normally find _succ == current. - // We can defer clearing _succ until after the spin completes - // try_spin() must tolerate being called with _succ == current. - // Try yet another round of adaptive spinning. - if (try_spin(current)) { - break; + if (!reenter_path) { + // Assuming this is not a spurious wakeup we'll normally find _succ == current. + // We can defer clearing _succ until after the spin completes and + // try_spin() must tolerate being called with _succ == current. + // Try yet another round of adaptive spinning. + if (try_spin(current)) { + break; + } } // We can find that we were unpark()ed and redesignated _succ while - // we were spinning. That's harmless. If we iterate and call park(), + // we were spinning. That's harmless. If we iterate and call park(), // park() will consume the event and return immediately and we'll - // just spin again. This pattern can repeat, leaving _succ to simply + // just spin again. This pattern can repeat, leaving _succ to simply // spin on a CPU. - if (has_successor(current)) clear_successor(); + if (has_successor(current)) { + clear_successor(); + } // Invariant: after clearing _succ a thread *must* retry _owner before parking. OrderAccess::fence(); + + // Will only potentially change on the reenter path - see comment in notify_internal. + do_timed_park |= current_node->_do_timed_park; } + assert(has_owner(current), "invariant"); + // Egress : // Current has acquired the lock -- Unlink current from the _entry_list. - unlink_after_acquire(current, &node); + unlink_after_acquire(current, current_node); if (has_successor(current)) { - clear_successor(); // Note that we don't need to do OrderAccess::fence() after clearing // _succ here, since we own the lock. + clear_successor(); } // We've acquired ownership with CAS(). // CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics. // But since the CAS() this thread may have also stored into _succ - // or entry_list. These meta-data updates must be visible __before + // or entry_list. These meta-data updates must be visible __before // this thread subsequently drops the lock. // Consider what could occur if we didn't enforce this constraint -- // STs to monitor meta-data and user-data could reorder with (become // visible after) the ST in exit that drops ownership of the lock. // Some other thread could then acquire the lock, but observe inconsistent - // or old monitor meta-data and heap data. That violates the JMM. + // or old monitor meta-data and heap data. That violates the JMM. // To that end, the exit() operation must have at least STST|LDST - // "release" barrier semantics. Specifically, there must be at least a + // "release" barrier semantics. Specifically, there must be at least a // STST|LDST barrier in exit() before the ST of null into _owner that drops - // the lock. The barrier ensures that changes to monitor meta-data and data + // the lock. The barrier ensures that changes to monitor meta-data and data // protected by the lock will be visible before we release the lock, and // therefore before some other thread (CPU) has a chance to acquire the lock. // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html. @@ -1088,98 +1119,10 @@ void ObjectMonitor::enter_internal(JavaThread* current) { // the ST of null into _owner in the *subsequent* (following) corresponding // monitorexit. + current_node->TState = ObjectWaiter::TS_RUN; return; } -// reenter_internal() is a specialized inline form of the latter half of the -// contended slow-path from enter_internal(). We use reenter_internal() only for -// monitor reentry in wait(). -// -// In the future we should reconcile enter_internal() and reenter_internal(). - -void ObjectMonitor::reenter_internal(JavaThread* current, ObjectWaiter* currentNode) { - assert(current != nullptr, "invariant"); - assert(current->thread_state() == _thread_blocked, "invariant"); - assert(currentNode != nullptr, "invariant"); - assert(currentNode->_thread == current, "invariant"); - assert(_waiters > 0, "invariant"); - - // If there are unmounted virtual threads ahead in the _entry_list we want - // to do a timed-park instead to alleviate some deadlock cases where one - // of them is picked as the successor but cannot run due to having run out - // of carriers. This can happen, for example, if a mixed of unmounted and - // pinned vthreads taking up all the carriers are waiting for a class to be - // initialized, and the selected successor is one of the unmounted vthreads. - // Although this method is used for the "notification" case, it could be - // that this thread reached here without been added to the _entry_list yet. - // This can happen if it was interrupted or the wait timed-out at the same - // time. In that case we rely on currentNode->_do_timed_park, which will be - // read on the next loop iteration, after consuming the park permit set by - // the notifier in notify_internal. - // Note that we can have false positives where timed-park is not necessary. - bool do_timed_parked = has_unmounted_vthreads(); - jlong recheck_interval = 1; - - for (;;) { - ObjectWaiter::TStates v = currentNode->TState; - guarantee(v == ObjectWaiter::TS_ENTER, "invariant"); - assert(!has_owner(current), "invariant"); - - // This thread has been notified so try to reacquire the lock. - if (try_lock(current) == TryLockResult::Success) { - break; - } - - // If that fails, spin again. Note that spin count may be zero so the above TryLock - // is necessary. - if (try_spin(current)) { - break; - } - - { - OSThreadContendState osts(current->osthread()); - if (do_timed_parked) { - current->_ParkEvent->park(recheck_interval); - // Increase the recheck_interval, but clamp the value. - recheck_interval *= 8; - if (recheck_interval > MAX_RECHECK_INTERVAL) { - recheck_interval = MAX_RECHECK_INTERVAL; - } - } else { - current->_ParkEvent->park(); - } - } - - // Try again, but just so we distinguish between futile wakeups and - // successful wakeups. The following test isn't algorithmically - // necessary, but it helps us maintain sensible statistics. - if (try_lock(current) == TryLockResult::Success) { - break; - } - - // The lock is still contested. - - // Assuming this is not a spurious wakeup we'll normally - // find that _succ == current. - if (has_successor(current)) clear_successor(); - - // Invariant: after clearing _succ a contending thread - // *must* retry _owner before parking. - OrderAccess::fence(); - - // See comment in notify_internal - do_timed_parked |= currentNode->_do_timed_park; - } - - // Current has acquired the lock -- Unlink current from the _entry_list. - assert(has_owner(current), "invariant"); - unlink_after_acquire(current, currentNode); - if (has_successor(current)) clear_successor(); - assert(!has_successor(current), "invariant"); - currentNode->TState = ObjectWaiter::TS_RUN; - OrderAccess::fence(); // see comments at the end of enter_internal() -} - // This method is called from two places: // - On monitorenter contention with a null waiter. // - After Object.wait() times out or the target is interrupted to reenter the @@ -1203,7 +1146,9 @@ bool ObjectMonitor::vthread_monitor_enter(JavaThread* current, ObjectWaiter* wai if (try_lock_or_add_to_entry_list(current, node)) { // We got the lock. - if (waiter == nullptr) delete node; // for Object.wait() don't delete yet + if (waiter == nullptr) { + delete node; // for Object.wait() don't delete yet + } dec_unmounted_vthreads(); return true; } @@ -1214,8 +1159,14 @@ bool ObjectMonitor::vthread_monitor_enter(JavaThread* current, ObjectWaiter* wai if (try_lock(current) == TryLockResult::Success) { assert(has_owner(current), "invariant"); unlink_after_acquire(current, node); - if (has_successor(current)) clear_successor(); - if (waiter == nullptr) delete node; // for Object.wait() don't delete yet + if (has_successor(current)) { + // Note that we don't need to do OrderAccess::fence() after clearing + // _succ here, since we own the lock. + clear_successor(); + } + if (waiter == nullptr) { + delete node; // for Object.wait() don't delete yet + } dec_unmounted_vthreads(); return true; } @@ -1239,7 +1190,9 @@ bool ObjectMonitor::resume_operation(JavaThread* current, ObjectWaiter* node, Co if (node->is_wait() && !node->at_reenter()) { bool acquired_monitor = vthread_wait_reenter(current, node, cont); - if (acquired_monitor) return true; + if (acquired_monitor) { + return true; + } } // Retry acquiring monitor... @@ -1253,7 +1206,9 @@ bool ObjectMonitor::resume_operation(JavaThread* current, ObjectWaiter* node, Co } oop vthread = current->vthread(); - if (has_successor(current)) clear_successor(); + if (has_successor(current)) { + clear_successor(); + } // Invariant: after clearing _succ a thread *must* retry acquiring the monitor. OrderAccess::fence(); @@ -1274,7 +1229,11 @@ void ObjectMonitor::vthread_epilog(JavaThread* current, ObjectWaiter* node) { add_to_contentions(-1); dec_unmounted_vthreads(); - if (has_successor(current)) clear_successor(); + if (has_successor(current)) { + // Note that we don't need to do OrderAccess::fence() after clearing + // _succ here, since we own the lock. + clear_successor(); + } guarantee(_recursions == 0, "invariant"); @@ -1476,7 +1435,7 @@ void ObjectMonitor::unlink_after_acquire(JavaThread* current, ObjectWaiter* curr // inopportune) reclamation of "this". // // We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ; -// There's one exception to the claim above, however. enter_internal() can call +// There's one exception to the claim above, however. enter_internal() can call // exit() to drop a lock if the acquirer has been externally suspended. // In that case exit() is called with _thread_state == _thread_blocked, // but the monitor's _contentions field is > 0, which inhibits reclamation. @@ -1564,12 +1523,12 @@ void ObjectMonitor::exit(JavaThread* current, bool not_suspended) { w = entry_list_tail(current); // I'd like to write: guarantee (w->_thread != current). // But in practice an exiting thread may find itself on the entry_list. - // Let's say thread T1 calls O.wait(). Wait() enqueues T1 on O's waitset and - // then calls exit(). Exit release the lock by setting O._owner to null. - // Let's say T1 then stalls. T2 acquires O and calls O.notify(). The + // Let's say thread T1 calls O.wait(). Wait() enqueues T1 on O's waitset and + // then calls exit(). Exit releases the lock by setting O._owner to null. + // Let's say T1 then stalls. T2 acquires O and calls O.notify(). The // notify() operation moves T1 from O's waitset to O's entry_list. T2 then - // release the lock "O". T1 resumes immediately after the ST of null into - // _owner, above. T1 notices that the entry_list is populated, so it + // releases the lock "O". T1 resumes immediately after the ST of null into + // _owner, above. T1 notices that the entry_list is populated, so it // reacquires the lock and then finds itself on the entry_list. // Given all that, we have to tolerate the circumstance where "w" is // associated with current. @@ -1591,26 +1550,26 @@ void ObjectMonitor::exit(JavaThread* current, bool not_suspended) { // Normally the exiting thread is responsible for ensuring succession, // but if this thread observes other successors are ready or other // entering threads are spinning after it has stored null into _owner - // then it can exit without waking a successor. The existence of + // then it can exit without waking a successor. The existence of // spinners or ready successors guarantees proper succession (liveness). - // Responsibility passes to the ready or running successors. The exiting - // thread delegates the duty. More precisely, if a successor already + // Responsibility passes to the ready or running successors. The exiting + // thread delegates the duty. More precisely, if a successor already // exists this thread is absolved of the responsibility of waking // (unparking) one. // The _succ variable is critical to reducing futile wakeup frequency. // _succ identifies the "heir presumptive" thread that has been made - // ready (unparked) but that has not yet run. We need only one such + // ready (unparked) but that has not yet run. We need only one such // successor thread to guarantee progress. // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf // section 3.3 "Futile Wakeup Throttling" for details. // - // Note that spinners in Enter() also set _succ non-null. - // In the current implementation spinners opportunistically set + // Note that spinners in enter(), try_enter_fast() and enter_internal() also + // set _succ non-null. In the current implementation spinners opportunistically set // _succ so that exiting threads might avoid waking a successor. // Which means that the exiting thread could exit immediately without // waking a successor, if it observes a successor after it has dropped - // the lock. Note that the dropped lock needs to become visible to the + // the lock. Note that the dropped lock needs to become visible to the // spinner. if (_entry_list == nullptr || has_successor()) { @@ -1787,7 +1746,7 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) { if (JvmtiExport::should_post_monitor_wait()) { JvmtiExport::post_monitor_wait(current, object(), millis); } - // post monitor waited event. Note that this is past-tense, we are done waiting. + // post monitor waited event. Note that this is past-tense, we are done waiting. if (JvmtiExport::should_post_monitor_waited()) { // Note: 'false' parameter is passed here because the // wait was not timed out due to thread interrupt. @@ -1848,9 +1807,9 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) { // Enter the waiting queue, which is a circular doubly linked list in this case // but it could be a priority queue or any data structure. - // _wait_set_lock protects the wait queue. Normally the wait queue is accessed only + // _wait_set_lock protects the wait queue. Normally the wait queue is accessed only // by the owner of the monitor *except* in the case where park() - // returns because of a timeout of interrupt. Contention is exceptionally rare + // returns because of a timeout of interrupt. Contention is exceptionally rare // so we use a simple spin-lock instead of a heavier-weight blocking lock. { @@ -1907,7 +1866,7 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) { // written by the is thread. (perhaps the fetch might even be satisfied // by a look-aside into the processor's own store buffer, although given // the length of the code path between the prior ST and this load that's - // highly unlikely). If the following LD fetches a stale TS_WAIT value + // highly unlikely). If the following LD fetches a stale TS_WAIT value // then we'll acquire the lock and then re-fetch a fresh TState value. // That is, we fail toward safety. @@ -1925,7 +1884,12 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) { // No other threads will asynchronously modify TState. guarantee(node.TState != ObjectWaiter::TS_WAIT, "invariant"); OrderAccess::loadload(); - if (has_successor(current)) clear_successor(); + if (has_successor(current)) { + clear_successor(); + // Note that we do not need a fence here, as, regardless of the path taken, + // there is a fence either in ThreadBlockInVM's destructor or + // right after a call to post_monitor_wait_event(). + } // Reentry phase -- reacquire the monitor. // re-enter contended monitor after object.wait(). @@ -1971,7 +1935,9 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) { ExitOnSuspend eos(this); { ThreadBlockInVMPreprocess tbivs(current, eos, true /* allow_suspend */); - reenter_internal(current, &node); + assert( _waiters > 0, "invariant"); + OSThreadContendState osts(current->osthread()); + enter_internal(current, &node, true /* reenter_path */); // We can go to a safepoint at the end of this block. If we // do a thread dump during that safepoint, then this thread will show // as having "-locked" the monitor, but the OS and java.lang.Thread @@ -2082,12 +2048,12 @@ bool ObjectMonitor::notify_internal(JavaThread* current) { // Wake up the thread to alleviate some deadlock cases where the successor // that will be picked up when this thread releases the monitor is an unmounted // virtual thread that cannot run due to having run out of carriers. Upon waking - // up, the thread will call reenter_internal() which will use timed-park in case + // up, the thread will call enter_internal(..., true) which will use timed-park in case // there is contention and there are still vthreads in the _entry_list. // If the target was interrupted or the wait timed-out at the same time, it could - // have reached reenter_internal and read a false value of has_unmounted_vthreads() + // have reached enter_internal and read a false value of has_unmounted_vthreads() // before we added it to the _entry_list above. To deal with that case, we set _do_timed_park - // which will be read by the target on the next loop iteration in reenter_internal. + // which will be read by the target on the next loop iteration in enter_internal. iterator->_do_timed_park = true; JavaThread* t = iterator->thread(); t->_ParkEvent->unpark(); @@ -2101,11 +2067,11 @@ bool ObjectMonitor::notify_internal(JavaThread* current) { } } - // _wait_set_lock protects the wait queue, not the entry_list. We could + // _wait_set_lock protects the wait queue, not the entry_list. We could // move the add-to-entry_list operation, above, outside the critical section - // protected by _wait_set_lock. In practice that's not useful. With the + // protected by _wait_set_lock. In practice that's not useful. With the // exception of wait() timeouts and interrupts the monitor owner - // is the only thread that grabs _wait_set_lock. There's almost no contention + // is the only thread that grabs _wait_set_lock. There's almost no contention // on _wait_set_lock so it's not profitable to reduce the length of the // critical section. } @@ -2206,9 +2172,9 @@ void ObjectMonitor::vthread_wait(JavaThread* current, jlong millis, bool interru // Enter the waiting queue, which is a circular doubly linked list in this case // but it could be a priority queue or any data structure. - // _wait_set_lock protects the wait queue. Normally the wait queue is accessed only + // _wait_set_lock protects the wait queue. Normally the wait queue is accessed only // by the owner of the monitor *except* in the case where park() - // returns because of a timeout or interrupt. Contention is exceptionally rare + // returns because of a timeout or interrupt. Contention is exceptionally rare // so we use a simple spin-lock instead of a heavier-weight blocking lock. { @@ -2298,25 +2264,25 @@ bool ObjectMonitor::vthread_wait_reenter(JavaThread* current, ObjectWaiter* node // algorithm. // // Broadly, we can fix the spin frequency -- that is, the % of contended lock -// acquisition attempts where we opt to spin -- at 100% and vary the spin count +// acquisition attempts where we opt to spin -- at 100% and vary the spin count // (duration) or we can fix the count at approximately the duration of -// a context switch and vary the frequency. Of course we could also +// a context switch and vary the frequency. Of course we could also // vary both satisfying K == Frequency * Duration, where K is adaptive by monitor. // For a description of 'Adaptive spin-then-block mutual exclusion in // multi-threaded processing,' see U.S. Pat. No. 8046758. // // This implementation varies the duration "D", where D varies with // the success rate of recent spin attempts. (D is capped at approximately -// length of a round-trip context switch). The success rate for recent +// length of a round-trip context switch). The success rate for recent // spin attempts is a good predictor of the success rate of future spin -// attempts. The mechanism adapts automatically to varying critical +// attempts. The mechanism adapts automatically to varying critical // section length (lock modality), system load and degree of parallelism. // D is maintained per-monitor in _SpinDuration and is initialized -// optimistically. Spin frequency is fixed at 100%. +// optimistically. Spin frequency is fixed at 100%. // // Note that _SpinDuration is volatile, but we update it without locks -// or atomics. The code is designed so that _SpinDuration stays within -// a reasonable range even in the presence of races. The arithmetic +// or atomics. The code is designed so that _SpinDuration stays within +// a reasonable range even in the presence of races. The arithmetic // operations on _SpinDuration are closed over the domain of legal values, // so at worst a race will install and older but still legal value. // At the very worst this introduces some apparent non-determinism. @@ -2324,28 +2290,28 @@ bool ObjectMonitor::vthread_wait_reenter(JavaThread* current, ObjectWaiter* node // count are relatively short, even in the worst case, the effect is harmless. // // Care must be taken that a low "D" value does not become an -// an absorbing state. Transient spinning failures -- when spinning +// an absorbing state. Transient spinning failures -- when spinning // is overall profitable -- should not cause the system to converge -// on low "D" values. We want spinning to be stable and predictable +// on low "D" values. We want spinning to be stable and predictable // and fairly responsive to change and at the same time we don't want // it to oscillate, become metastable, be "too" non-deterministic, // or converge on or enter undesirable stable absorbing states. // // We implement a feedback-based control system -- using past behavior -// to predict future behavior. We face two issues: (a) if the +// to predict future behavior. We face two issues: (a) if the // input signal is random then the spin predictor won't provide optimal // results, and (b) if the signal frequency is too high then the control // system, which has some natural response lag, will "chase" the signal. -// (b) can arise from multimodal lock hold times. Transient preemption +// (b) can arise from multimodal lock hold times. Transient preemption // can also result in apparent bimodal lock hold times. // Although sub-optimal, neither condition is particularly harmful, as // in the worst-case we'll spin when we shouldn't or vice-versa. // The maximum spin duration is rather short so the failure modes aren't bad. // To be conservative, I've tuned the gain in system to bias toward -// _not spinning. Relatedly, the system can sometimes enter a mode where it -// "rings" or oscillates between spinning and not spinning. This happens +// _not spinning. Relatedly, the system can sometimes enter a mode where it +// "rings" or oscillates between spinning and not spinning. This happens // when spinning is just on the cusp of profitability, however, so the -// situation is not dire. The state is benign -- there's no need to add +// situation is not dire. The state is benign -- there's no need to add // hysteresis control to damp the transition rate between spinning and // not spinning. @@ -2377,7 +2343,9 @@ inline static int adjust_down(int spin_duration) { // Consider an AIMD scheme like: x -= (x >> 3) + 100 // This is globally sample and tends to damp the response. x -= Knob_Penalty; - if (x < 0) { x = 0; } + if (x < 0) { + x = 0; + } return x; } else { return spin_duration; @@ -2403,7 +2371,7 @@ bool ObjectMonitor::short_fixed_spin(JavaThread* current, int spin_count, bool a // Spinning: Fixed frequency (100%), vary duration bool ObjectMonitor::try_spin(JavaThread* current) { - // Dumb, brutal spin. Good for comparative measurements against adaptive spinning. + // Dumb, brutal spin. Good for comparative measurements against adaptive spinning. int knob_fixed_spin = Knob_FixedSpin; // 0 (don't spin: default), 2000 good test if (knob_fixed_spin > 0) { return short_fixed_spin(current, knob_fixed_spin, false); @@ -2412,7 +2380,7 @@ bool ObjectMonitor::try_spin(JavaThread* current) { // Admission control - verify preconditions for spinning // // We always spin a little bit, just to prevent _SpinDuration == 0 from - // becoming an absorbing state. Put another way, we spin briefly to + // becoming an absorbing state. Put another way, we spin briefly to // sample, just in case the system load, parallelism, contention, or lock // modality changed. @@ -2424,7 +2392,7 @@ bool ObjectMonitor::try_spin(JavaThread* current) { // // Consider the following alternative: // Periodically set _SpinDuration = _SpinLimit and try a long/full - // spin attempt. "Periodically" might mean after a tally of + // spin attempt. "Periodically" might mean after a tally of // the # of failed spin attempts (or iterations) reaches some threshold. // This takes us into the realm of 1-out-of-N spinning, where we // hold the duration constant but vary the frequency. @@ -2471,9 +2439,9 @@ bool ObjectMonitor::try_spin(JavaThread* current) { // If this thread observes the monitor transition or flicker // from locked to unlocked to locked, then the odds that this // thread will acquire the lock in this spin attempt go down - // considerably. The same argument applies if the CAS fails + // considerably. The same argument applies if the CAS fails // or if we observe _owner change from one non-null value to - // another non-null value. In such cases we might abort + // another non-null value. In such cases we might abort // the spin without prejudice or apply a "penalty" to the // spin count-down variable "ctr", reducing it by 100, say. @@ -2484,6 +2452,8 @@ bool ObjectMonitor::try_spin(JavaThread* current) { // The CAS succeeded -- this thread acquired ownership // Take care of some bookkeeping to exit spin state. if (has_successor(current)) { + // Note that we don't need to do OrderAccess::fence() after clearing + // _succ here, since we own the lock. clear_successor(); } @@ -2525,7 +2495,7 @@ bool ObjectMonitor::try_spin(JavaThread* current) { if (has_successor(current)) { clear_successor(); // Invariant: after setting succ=null a contending thread - // must recheck-retry _owner before parking. This usually happens + // must recheck-retry _owner before parking. This usually happens // in the normal usage of try_spin(), but it's safest // to make try_spin() as foolproof as possible. OrderAccess::fence(); @@ -2541,19 +2511,19 @@ bool ObjectMonitor::try_spin(JavaThread* current) { // ----------------------------------------------------------------------------- // wait_set management ... -ObjectWaiter::ObjectWaiter(JavaThread* current) { - _next = nullptr; - _prev = nullptr; - _thread = current; - _monitor = nullptr; - _notifier_tid = 0; - _recursions = 0; - TState = TS_RUN; - _is_wait = false; - _at_reenter = false; - _interrupted = false; - _do_timed_park = false; - _active = false; +ObjectWaiter::ObjectWaiter(JavaThread* current) + : _next(nullptr), + _prev(nullptr), + _thread(current), + _monitor(nullptr), + _notifier_tid(0), + _recursions(0), + TState(TS_RUN), + _is_wait(false), + _at_reenter(false), + _interrupted(false), + _do_timed_park(false), + _active(false) { } const char* ObjectWaiter::getTStateName(ObjectWaiter::TStates state) { diff --git a/src/hotspot/share/runtime/objectMonitor.hpp b/src/hotspot/share/runtime/objectMonitor.hpp index 8d9a481bf37..3ab7b8ea519 100644 --- a/src/hotspot/share/runtime/objectMonitor.hpp +++ b/src/hotspot/share/runtime/objectMonitor.hpp @@ -89,10 +89,8 @@ class ObjectWaiter : public CHeapObj { } }; -// The ObjectMonitor class implements the heavyweight version of a -// JavaMonitor. The lightweight BasicLock/stack lock version has been -// inflated into an ObjectMonitor. This inflation is typically due to -// contention or use of Object.wait(). +// The ObjectMonitor class implements the heavyweight version of a JavaMonitor. +// This inflation is typically due to contention or use of Object.wait(). // // WARNING: This is a very sensitive and fragile class. DO NOT make any // changes unless you are fully aware of the underlying semantics. @@ -394,8 +392,8 @@ class ObjectMonitor : public CHeapObj { bool notify_internal(JavaThread* current); ObjectWaiter* dequeue_waiter(); void dequeue_specific_waiter(ObjectWaiter* waiter); - void enter_internal(JavaThread* current); - void reenter_internal(JavaThread* current, ObjectWaiter* current_node); + void enter_internal(JavaThread* current, ObjectWaiter* current_node, bool reenter_path); + bool try_enter_fast(JavaThread* current, ObjectWaiter* current_node); void entry_list_build_dll(JavaThread* current); void unlink_after_acquire(JavaThread* current, ObjectWaiter* current_node); ObjectWaiter* entry_list_tail(JavaThread* current); diff --git a/src/hotspot/share/runtime/objectMonitorTable.cpp b/src/hotspot/share/runtime/objectMonitorTable.cpp index 767f5de6897..bc173992d7a 100644 --- a/src/hotspot/share/runtime/objectMonitorTable.cpp +++ b/src/hotspot/share/runtime/objectMonitorTable.cpp @@ -31,6 +31,7 @@ #include "runtime/thread.hpp" #include "runtime/timerTrace.hpp" #include "runtime/trimNativeHeap.hpp" +#include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" // ----------------------------------------------------------------------------- @@ -46,7 +47,7 @@ // // When you want to find a monitor associated with an object, you extract the // hash value of the object. Then calculate an index by taking the hash value -// and bit-wise AND it with the capacity mask (e.g. size-1) of the OMT. Now +// and bit-wise AND it with the capacity mask (i.e., size-1) of the OMT. Now // use that index into the OMT's array of pointers. If the pointer is non // null, check if it's a monitor pointer that is associated with the object. // If so you're done. If the pointer is non null, but associated with another @@ -55,7 +56,7 @@ // means that the monitor is simply not in the OMT. // // If the size of the pointer array is significantly larger than the number of -// pointers in it, the chance of finding the monitor in the hash index +// pointers in it, the chance of finding the monitor at the hash index // (without any further linear searching) is quite high. It is also straight // forward to generate C2 code for this, which for the fast path doesn't // contain any branching at all. See: C2_MacroAssembler::fast_lock(). @@ -68,11 +69,11 @@ // allocate a new table (twice as large as the old one), and then copy all the // old monitor pointers from the old table to the new. // -// But since the OMT is a concurrent hash table and things needs to work for -// other clients of the OMT while we grow it, it's gets a bit more +// But since the OMT is a concurrent hash table and things need to work for +// other clients of the OMT while we grow it, it gets a bit more // complicated. // -// Both the new and (potentially several) old table(s) may exist at the same +// The new and (potentially several) old table(s) may exist at the same // time. The newest is always called the "current", and the older ones are // singly linked using a "prev" pointer. // @@ -82,7 +83,8 @@ // // After that we start to go through all the indexes in the old table. If the // index is empty (the pointer is null) we put a "tombstone" into that index, -// which will prevent any future concurrent insert ending up in that index. +// which will prevent any future concurrent insert from ending up in that +// index. // // If the index contains a monitor pointer, we insert that monitor pointer // into the OMT which can be considered as one generation newer. If the index @@ -92,11 +94,11 @@ // that is not null, not a tombstone and not removed, is considered to be a // pointer to a monitor. // -// When all the monitor pointers from an old OMT has been transferred to the +// When all the monitor pointers from an old OMT have been transferred to the // new OMT, the old table is unlinked. // // This copying from an old OMT to one generation newer OMT, will continue -// until all the monitor pointers from old OMTs has been transferred to the +// until all the monitor pointers from old OMTs have been transferred to the // newest "current" OMT. // // The memory for old, unlinked OMTs will be freed after a thread-local @@ -116,17 +118,17 @@ // requirements. Don't change it for fun, it might backfire. // ----------------------------------------------------------------------------- -ObjectMonitorTable::Table* volatile ObjectMonitorTable::_curr; +Atomic ObjectMonitorTable::_curr; class ObjectMonitorTable::Table : public CHeapObj { friend class ObjectMonitorTable; DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, 0); const size_t _capacity_mask; // One less than its power-of-two capacity - Table* volatile _prev; // Set while rehashing - Entry volatile* _buckets; // The payload + Atomic _prev; // Set while growing/rebuilding + Atomic* _buckets; // The payload DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(_capacity_mask) + sizeof(_prev) + sizeof(_buckets)); - volatile size_t _items_count; + Atomic _items_count; DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(_items_count)); static Entry as_entry(ObjectMonitor* monitor) { @@ -155,11 +157,11 @@ class ObjectMonitorTable::Table : public CHeapObj { // Make sure we leave space for previous versions to relocate too. bool try_inc_items_count() { for (;;) { - size_t population = AtomicAccess::load(&_items_count); + size_t population = _items_count.load_relaxed(); if (should_grow(population)) { return false; } - if (AtomicAccess::cmpxchg(&_items_count, population, population + 1, memory_order_relaxed) == population) { + if (_items_count.compare_set(population, population + 1, memory_order_relaxed)) { return true; } } @@ -170,31 +172,31 @@ class ObjectMonitorTable::Table : public CHeapObj { } void inc_items_count() { - AtomicAccess::inc(&_items_count, memory_order_relaxed); + _items_count.add_then_fetch(1u, memory_order_relaxed); } void dec_items_count() { - AtomicAccess::dec(&_items_count, memory_order_relaxed); + _items_count.sub_then_fetch(1u, memory_order_relaxed); } public: Table(size_t capacity, Table* prev) : _capacity_mask(capacity - 1), _prev(prev), - _buckets(NEW_C_HEAP_ARRAY(Entry, capacity, mtObjectMonitor)), + _buckets(NEW_C_HEAP_ARRAY(Atomic, capacity, mtObjectMonitor)), _items_count(0) { for (size_t i = 0; i < capacity; ++i) { - _buckets[i] = empty(); + ::new (_buckets + i) Atomic(empty()); } } ~Table() { - FREE_C_HEAP_ARRAY(Entry, _buckets); + FREE_C_HEAP_ARRAY(Atomic, _buckets); } Table* prev() { - return AtomicAccess::load(&_prev); + return _prev.load_relaxed(); } size_t capacity() { @@ -206,12 +208,12 @@ public: } bool should_grow() { - return should_grow(AtomicAccess::load(&_items_count)); + return should_grow(_items_count.load_relaxed()); } size_t total_items() { - size_t current_items = AtomicAccess::load(&_items_count); - Table* prev = AtomicAccess::load(&_prev); + size_t current_items = _items_count.load_relaxed(); + Table* prev = _prev.load_relaxed(); if (prev != nullptr) { return prev->total_items() + current_items; } @@ -220,7 +222,7 @@ public: ObjectMonitor* get(oop obj, intptr_t hash) { // Acquire tombstones and relocations in case prev transitioned to null - Table* prev = AtomicAccess::load_acquire(&_prev); + Table* prev = _prev.load_acquire(); if (prev != nullptr) { ObjectMonitor* result = prev->get(obj, hash); if (result != nullptr) { @@ -232,8 +234,8 @@ public: size_t index = start_index; for (;;) { - Entry volatile* bucket = _buckets + index; - Entry entry = AtomicAccess::load_acquire(bucket); + Atomic& bucket = _buckets[index]; + Entry entry = bucket.load_acquire(); if (entry == tombstone() || entry == empty()) { // Not found @@ -249,14 +251,14 @@ public: assert(index != start_index, "invariant"); } - // Rehashing could have started by now, but if a monitor has been inserted in a - // newer table, it was inserted after the get linearization point. + // Rebuilding could have started by now, but if a monitor has been inserted + // in a newer table, it was inserted after the get linearization point. return nullptr; } ObjectMonitor* prepare_insert(oop obj, intptr_t hash) { - // Acquire any tomb stones and relocations if prev transitioned to null. - Table* prev = AtomicAccess::load_acquire(&_prev); + // Acquire any tombstones and relocations if prev transitioned to null. + Table* prev = _prev.load_acquire(); if (prev != nullptr) { ObjectMonitor* result = prev->prepare_insert(obj, hash); if (result != nullptr) { @@ -268,15 +270,15 @@ public: size_t index = start_index; for (;;) { - Entry volatile* bucket = _buckets + index; - Entry entry = AtomicAccess::load_acquire(bucket); + Atomic& bucket = _buckets[index]; + Entry entry = bucket.load_acquire(); if (entry == empty()) { // Found an empty slot to install the new monitor in. - // To avoid concurrent inserts succeeding, place a tomb stone here. - Entry result = AtomicAccess::cmpxchg(bucket, entry, tombstone(), memory_order_relaxed); + // To avoid concurrent inserts succeeding, place a tombstone here. + Entry result = bucket.compare_exchange(entry, tombstone(), memory_order_relaxed); if (result == entry) { - // Success! Nobody will try to insert here again, except reinsert from rehashing. + // Success! Nobody will try to insert here again, except reinsert from rebuilding. return nullptr; } entry = result; @@ -299,7 +301,7 @@ public: ObjectMonitor* get_set(oop obj, Entry new_monitor, intptr_t hash) { // Acquire any tombstones and relocations if prev transitioned to null. - Table* prev = AtomicAccess::load_acquire(&_prev); + Table* prev = _prev.load_acquire(); if (prev != nullptr) { // Sprinkle tombstones in previous tables to force concurrent inserters // to the latest table. We only really want to try inserting in the @@ -314,14 +316,14 @@ public: size_t index = start_index; for (;;) { - Entry volatile* bucket = _buckets + index; - Entry entry = AtomicAccess::load_acquire(bucket); + Atomic& bucket = _buckets[index]; + Entry entry = bucket.load_acquire(); if (entry == empty()) { // Empty slot to install the new monitor if (try_inc_items_count()) { // Succeeding in claiming an item. - Entry result = AtomicAccess::cmpxchg(bucket, entry, new_monitor, memory_order_acq_rel); + Entry result = bucket.compare_exchange(entry, new_monitor, memory_order_acq_rel); if (result == entry) { // Success - already incremented. return as_monitor(new_monitor); @@ -331,11 +333,11 @@ public: dec_items_count(); entry = result; } else { - // Out of allowance; leaving place for rehashing to succeed. + // Out of allowance; leave space for rebuilding to succeed. // To avoid concurrent inserts succeeding, place a tombstone here. - Entry result = AtomicAccess::cmpxchg(bucket, entry, tombstone(), memory_order_acq_rel); + Entry result = bucket.compare_exchange(entry, tombstone(), memory_order_acq_rel); if (result == entry) { - // Success; nobody will try to insert here again, except reinsert from rehashing. + // Success; nobody will try to insert here again, except reinsert from rebuilding. return nullptr; } entry = result; @@ -365,8 +367,8 @@ public: size_t index = start_index; for (;;) { - Entry volatile* bucket = _buckets + index; - Entry entry = AtomicAccess::load_acquire(bucket); + Atomic& bucket = _buckets[index]; + Entry entry = bucket.load_acquire(); if (entry == empty()) { // The monitor does not exist in this table. @@ -380,8 +382,8 @@ public: if (entry == old_monitor) { // Found matching entry; remove it - Entry result = AtomicAccess::cmpxchg(bucket, entry, removed(), memory_order_relaxed); - assert(result == entry, "should not fail"); + bool result = bucket.compare_set(entry, removed(), memory_order_relaxed); + assert(result, "should not fail"); break; } @@ -394,8 +396,8 @@ public: // still not being a monitor, instead of flickering back to being there. // Only the deflation thread rebuilds and unlinks tables, so we do not need // any concurrency safe prev read below. - if (_prev != nullptr) { - _prev->remove(obj, old_monitor, hash); + if (_prev.load_relaxed() != nullptr) { + _prev.load_relaxed()->remove(obj, old_monitor, hash); } } @@ -406,12 +408,12 @@ public: size_t index = start_index; for (;;) { - Entry volatile* bucket = _buckets + index; - Entry entry = AtomicAccess::load_acquire(bucket); + Atomic& bucket = _buckets[index]; + Entry entry = bucket.load_acquire(); if (entry == empty()) { // Empty slot to install the new monitor. - Entry result = AtomicAccess::cmpxchg(bucket, entry, new_monitor, memory_order_acq_rel); + Entry result = bucket.compare_exchange(entry, new_monitor, memory_order_acq_rel); if (result == entry) { // Success - unconditionally increment. inc_items_count(); @@ -425,7 +427,7 @@ public: if (entry == tombstone()) { // A concurrent inserter did not get enough allowance in the table. // But reinsert always succeeds - we will take the spot. - Entry result = AtomicAccess::cmpxchg(bucket, entry, new_monitor, memory_order_acq_rel); + Entry result = bucket.compare_exchange(entry, new_monitor, memory_order_acq_rel); if (result == entry) { // Success - unconditionally increment. inc_items_count(); @@ -438,7 +440,7 @@ public: if (entry == removed()) { // A removed entry can be flipped back with reinsertion. - Entry result = AtomicAccess::cmpxchg(bucket, entry, new_monitor, memory_order_release); + Entry result = bucket.compare_exchange(entry, new_monitor, memory_order_release); if (result == entry) { // Success - but don't increment; the initial entry did that for us. return; @@ -458,7 +460,7 @@ public: } void rebuild() { - Table* prev = _prev; + Table* prev = _prev.load_relaxed(); if (prev == nullptr) { // Base case for recursion - no previous version. return; @@ -476,12 +478,12 @@ public: ThreadBlockInVM tbivm(current); } - Entry volatile* bucket = prev->_buckets + index; - Entry entry = AtomicAccess::load_acquire(bucket); + Atomic& bucket = prev->_buckets[index]; + Entry entry = bucket.load_acquire(); if (entry == empty()) { // Empty slot; put a tombstone there. - Entry result = AtomicAccess::cmpxchg(bucket, entry, tombstone(), memory_order_acq_rel); + Entry result = bucket.compare_exchange(entry, tombstone(), memory_order_acq_rel); if (result == empty()) { // Success; move to next entry. continue; @@ -507,17 +509,17 @@ public: } // Unlink this table, releasing the tombstones and relocations. - AtomicAccess::release_store(&_prev, (Table*)nullptr); + _prev.release_store(nullptr); } }; void ObjectMonitorTable::create() { - _curr = new Table(128, nullptr); + _curr.store_relaxed(new Table(128, nullptr)); } -ObjectMonitor* ObjectMonitorTable::monitor_get(Thread* current, oop obj) { +ObjectMonitor* ObjectMonitorTable::monitor_get(oop obj) { const intptr_t hash = obj->mark().hash(); - Table* curr = AtomicAccess::load_acquire(&_curr); + Table* curr = _curr.load_acquire(); ObjectMonitor* monitor = curr->get(obj, hash); return monitor; } @@ -525,7 +527,7 @@ ObjectMonitor* ObjectMonitorTable::monitor_get(Thread* current, oop obj) { // Returns a new table to try inserting into. ObjectMonitorTable::Table* ObjectMonitorTable::grow_table(Table* curr) { Table* result; - Table* new_table = AtomicAccess::load_acquire(&_curr); + Table* new_table = _curr.load_acquire(); if (new_table != curr) { // Table changed; no need to try further return new_table; @@ -536,14 +538,14 @@ ObjectMonitorTable::Table* ObjectMonitorTable::grow_table(Table* curr) { // attempt to allocate the new table. MonitorLocker ml(MonitorDeflation_lock, Mutex::_no_safepoint_check_flag); - new_table = AtomicAccess::load_acquire(&_curr); + new_table = _curr.load_acquire(); if (new_table != curr) { // Table changed; no need to try further return new_table; } new_table = new Table(curr->capacity() << 1, curr); - result = AtomicAccess::cmpxchg(&_curr, curr, new_table, memory_order_acq_rel); + result = _curr.compare_exchange(curr, new_table, memory_order_acq_rel); if (result == curr) { log_info(monitorinflation)("Growing object monitor table (capacity: %zu)", new_table->capacity()); @@ -556,15 +558,15 @@ ObjectMonitorTable::Table* ObjectMonitorTable::grow_table(Table* curr) { } } - // Somebody else started rehashing; restart in new table. + // Somebody else started rebuilding; restart in their new table. delete new_table; return result; } -ObjectMonitor* ObjectMonitorTable::monitor_put_get(Thread* current, ObjectMonitor* monitor, oop obj) { +ObjectMonitor* ObjectMonitorTable::monitor_put_get(ObjectMonitor* monitor, oop obj) { const intptr_t hash = obj->mark().hash(); - Table* curr = AtomicAccess::load_acquire(&_curr); + Table* curr = _curr.load_acquire(); for (;;) { // Curr is the latest table and is reasonably loaded. @@ -577,19 +579,19 @@ ObjectMonitor* ObjectMonitorTable::monitor_put_get(Thread* current, ObjectMonito } } -void ObjectMonitorTable::remove_monitor_entry(Thread* current, ObjectMonitor* monitor) { +void ObjectMonitorTable::remove_monitor_entry(ObjectMonitor* monitor) { oop obj = monitor->object_peek(); if (obj == nullptr) { // Defer removal until subsequent rebuilding. return; } const intptr_t hash = obj->mark().hash(); - Table* curr = AtomicAccess::load_acquire(&_curr); + Table* curr = _curr.load_acquire(); curr->remove(obj, curr->as_entry(monitor), hash); - assert(monitor_get(current, obj) != monitor, "should have been removed"); + assert(monitor_get(obj) != monitor, "should have been removed"); } -// Before handshake; rehash and unlink tables. +// Before handshake; rebuild and unlink tables. void ObjectMonitorTable::rebuild(GrowableArray* delete_list) { Table* new_table; { @@ -619,15 +621,15 @@ void ObjectMonitorTable::rebuild(GrowableArray* delete_list) { // given the growing threshold of 12.5%, it is impossible for the // tables to reach a load factor above 50%. Which is more than // enough to guarantee the function of this concurrent hash table. - Table* curr = AtomicAccess::load_acquire(&_curr); + Table* curr = _curr.load_acquire(); size_t need_to_accomodate = curr->total_items(); size_t new_capacity = curr->should_grow(need_to_accomodate) ? curr->capacity() << 1 : curr->capacity(); new_table = new Table(new_capacity, curr); - Table* result = AtomicAccess::cmpxchg(&_curr, curr, new_table, memory_order_acq_rel); + Table* result = _curr.compare_exchange(curr, new_table, memory_order_acq_rel); if (result != curr) { - // Somebody else racingly started rehashing. Delete the + // Somebody else racingly started rebuilding. Delete the // new_table and treat somebody else's table as the new one. delete new_table; new_table = result; @@ -652,7 +654,7 @@ void ObjectMonitorTable::destroy(GrowableArray* delete_list) { } address ObjectMonitorTable::current_table_address() { - return (address)(&_curr); + return reinterpret_cast
(&_curr) + _curr.value_offset_in_bytes(); } ByteSize ObjectMonitorTable::table_capacity_mask_offset() { @@ -660,5 +662,9 @@ ByteSize ObjectMonitorTable::table_capacity_mask_offset() { } ByteSize ObjectMonitorTable::table_buckets_offset() { + // Assumptions made from the emitted code about the layout. + STATIC_ASSERT(sizeof(Atomic) == sizeof(Entry*)); + STATIC_ASSERT(Atomic::value_offset_in_bytes() == 0); + return byte_offset_of(Table, _buckets); } diff --git a/src/hotspot/share/runtime/objectMonitorTable.hpp b/src/hotspot/share/runtime/objectMonitorTable.hpp index e8e7c61c6fa..1ec372883b2 100644 --- a/src/hotspot/share/runtime/objectMonitorTable.hpp +++ b/src/hotspot/share/runtime/objectMonitorTable.hpp @@ -27,6 +27,7 @@ #include "memory/allStatic.hpp" #include "oops/oopsHierarchy.hpp" +#include "runtime/atomic.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/sizes.hpp" @@ -44,7 +45,7 @@ public: class Table; private: - static Table* volatile _curr; + static Atomic _curr; static Table* grow_table(Table* curr); enum class Entry : uintptr_t { @@ -61,11 +62,11 @@ public: } SpecialPointerValues; static void create(); - static ObjectMonitor* monitor_get(Thread* current, oop obj); - static ObjectMonitor* monitor_put_get(Thread* current, ObjectMonitor* monitor, oop obj); + static ObjectMonitor* monitor_get(oop obj); + static ObjectMonitor* monitor_put_get(ObjectMonitor* monitor, oop obj); static void rebuild(GrowableArray* delete_list); static void destroy(GrowableArray* delete_list); - static void remove_monitor_entry(Thread* current, ObjectMonitor* monitor); + static void remove_monitor_entry(ObjectMonitor* monitor); // Compiler support static address current_table_address(); diff --git a/src/hotspot/share/runtime/os.cpp b/src/hotspot/share/runtime/os.cpp index 16335f97fdb..d55cf454256 100644 --- a/src/hotspot/share/runtime/os.cpp +++ b/src/hotspot/share/runtime/os.cpp @@ -1285,7 +1285,7 @@ void os::print_location(outputStream* st, intptr_t x, bool verbose) { bool accessible = is_readable_pointer(addr); // Check if addr points into the narrow Klass protection zone - if (UseCompressedClassPointers && CompressedKlassPointers::is_in_protection_zone(addr)) { + if (CompressedKlassPointers::is_in_protection_zone(addr)) { st->print_cr(PTR_FORMAT " points into nKlass protection zone", p2i(addr)); return; } @@ -1339,8 +1339,9 @@ void os::print_location(outputStream* st, intptr_t x, bool verbose) { } // Compressed klass needs to be decoded first. + // Todo: questionable for COH - can we do this better? #ifdef _LP64 - if (UseCompressedClassPointers && ((uintptr_t)addr &~ (uintptr_t)max_juint) == 0) { + if (((uintptr_t)addr &~ (uintptr_t)max_juint) == 0) { narrowKlass narrow_klass = (narrowKlass)(uintptr_t)addr; Klass* k = CompressedKlassPointers::decode_without_asserts(narrow_klass); @@ -1912,17 +1913,7 @@ void os::trace_page_sizes_for_requested_size(const char* str, // as was done for logical processors here, or replicate and // specialize this method for each platform. (Or fix os to have // some inheritance structure and use subclassing. Sigh.) -// If you want some platform to always or never behave as a server -// class machine, change the setting of AlwaysActAsServerClassMachine -// and NeverActAsServerClassMachine in globals*.hpp. bool os::is_server_class_machine() { - // First check for the early returns - if (NeverActAsServerClassMachine) { - return false; - } - if (AlwaysActAsServerClassMachine) { - return true; - } // Then actually look at the machine bool result = false; const unsigned int server_processors = 2; @@ -2312,24 +2303,13 @@ void os::uncommit_memory(char* addr, size_t bytes, bool executable) { log_debug(os, map)("Uncommitted " RANGEFMT, RANGEFMTARGS(addr, bytes)); } -// The scope of NmtVirtualMemoryLocker covers both pd_release_memory and record_virtual_memory_release because -// these operations must happen atomically to avoid races causing NMT to fall out os sync with the OS reality. -// We do not have the same lock protection for pd_reserve_memory and record_virtual_memory_reserve. -// We assume that there is some external synchronization that prevents a region from being released -// before it is finished being reserved. +// pd_release_memory is called outside the protection of the NMT lock. +// Until pd_release_memory is called, The OS is unable to give away the about-to-be-released range to another thread. +// So there is no risk of another thread re-reserving the range before this function is done with it. void os::release_memory(char* addr, size_t bytes) { assert_nonempty_range(addr, bytes); - bool res; - if (MemTracker::enabled()) { - MemTracker::NmtVirtualMemoryLocker nvml; - res = pd_release_memory(addr, bytes); - if (res) { - MemTracker::record_virtual_memory_release(addr, bytes); - } - } else { - res = pd_release_memory(addr, bytes); - } - if (!res) { + MemTracker::record_virtual_memory_release(addr, bytes); + if (!pd_release_memory(addr, bytes)) { fatal("Failed to release " RANGEFMT, RANGEFMTARGS(addr, bytes)); } log_debug(os, map)("Released " RANGEFMT, RANGEFMTARGS(addr, bytes)); @@ -2402,17 +2382,8 @@ char* os::map_memory(int fd, const char* file_name, size_t file_offset, } void os::unmap_memory(char *addr, size_t bytes) { - bool result; - if (MemTracker::enabled()) { - MemTracker::NmtVirtualMemoryLocker nvml; - result = pd_unmap_memory(addr, bytes); - if (result) { - MemTracker::record_virtual_memory_release(addr, bytes); - } - } else { - result = pd_unmap_memory(addr, bytes); - } - if (!result) { + MemTracker::record_virtual_memory_release(addr, bytes); + if (!pd_unmap_memory(addr, bytes)) { fatal("Failed to unmap memory " RANGEFMT, RANGEFMTARGS(addr, bytes)); } } diff --git a/src/hotspot/share/runtime/os.hpp b/src/hotspot/share/runtime/os.hpp index e185188384f..c883b828456 100644 --- a/src/hotspot/share/runtime/os.hpp +++ b/src/hotspot/share/runtime/os.hpp @@ -454,7 +454,7 @@ class os: AllStatic { static size_t align_down_vm_page_size(size_t size) { return align_down(size, os::vm_page_size()); } // The set of page sizes which the VM is allowed to use (may be a subset of - // the page sizes actually available on the platform). + // the page sizes actually available on the platform). static const PageSizes& page_sizes() { return _page_sizes; } // Returns the page size to use for a region of memory. @@ -721,6 +721,8 @@ class os: AllStatic { static int open(const char *path, int oflag, int mode); static FILE* fdopen(int fd, const char* mode); static FILE* fopen(const char* path, const char* mode); + static int64_t ftell(FILE* file); + static int fseek(FILE* file, int64_t offset, int whence); static jlong lseek(int fd, jlong offset, int whence); static bool file_exists(const char* file); @@ -893,6 +895,9 @@ class os: AllStatic { static void print_date_and_time(outputStream* st, char* buf, size_t buflen); static void print_elapsed_time(outputStream* st, double time); + // Prints the number of open file descriptors for the current process + static void print_open_file_descriptors(outputStream* st); + static void print_user_info(outputStream* st); static void print_active_locale(outputStream* st); diff --git a/src/hotspot/share/runtime/sharedRuntime.cpp b/src/hotspot/share/runtime/sharedRuntime.cpp index ae3835dd344..352c90f913b 100644 --- a/src/hotspot/share/runtime/sharedRuntime.cpp +++ b/src/hotspot/share/runtime/sharedRuntime.cpp @@ -82,6 +82,7 @@ #include "utilities/copy.hpp" #include "utilities/dtrace.hpp" #include "utilities/events.hpp" +#include "utilities/exceptions.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/hashTable.hpp" #include "utilities/macros.hpp" @@ -176,6 +177,11 @@ void SharedRuntime::generate_stubs() { CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception)); generate_deopt_blob(); + +#if INCLUDE_CDS + // disallow any further generation of runtime stubs + AOTCodeCache::set_shared_stubs_complete(); +#endif // INCLUDE_CDS } void SharedRuntime::init_adapter_library() { @@ -931,7 +937,7 @@ void SharedRuntime::throw_StackOverflowError_common(JavaThread* current, bool de // bindings. current->clear_scopedValueBindings(); // Increment counter for hs_err file reporting - AtomicAccess::inc(&Exceptions::_stack_overflow_errors); + Exceptions::increment_stack_overflow_errors(); throw_and_post_jvmti_exception(current, exception); } diff --git a/src/hotspot/share/runtime/stubCodeGenerator.cpp b/src/hotspot/share/runtime/stubCodeGenerator.cpp index 43250c004ca..45e40f4a754 100644 --- a/src/hotspot/share/runtime/stubCodeGenerator.cpp +++ b/src/hotspot/share/runtime/stubCodeGenerator.cpp @@ -23,6 +23,7 @@ */ #include "asm/macroAssembler.inline.hpp" +#include "code/aotCodeCache.hpp" #include "code/codeCache.hpp" #include "compiler/disassembler.hpp" #include "oops/oop.inline.hpp" @@ -30,7 +31,9 @@ #include "prims/jvmtiExport.hpp" #include "runtime/stubCodeGenerator.hpp" #include "runtime/stubRoutines.hpp" - +#if INCLUDE_ZGC +#include "gc/z/zBarrierSetAssembler.hpp" +#endif // INCLUDE_ZGC // Implementation of StubCodeDesc @@ -69,14 +72,16 @@ void StubCodeDesc::print() const { print_on(tty); } StubCodeGenerator::StubCodeGenerator(CodeBuffer* code, bool print_code) { _masm = new MacroAssembler(code); _blob_id = BlobId::NO_BLOBID; + _stub_data = nullptr; _print_code = PrintStubCode || print_code; } -StubCodeGenerator::StubCodeGenerator(CodeBuffer* code, BlobId blob_id, bool print_code) { +StubCodeGenerator::StubCodeGenerator(CodeBuffer* code, BlobId blob_id, AOTStubData* stub_data, bool print_code) { assert(StubInfo::is_stubgen(blob_id), "not a stubgen blob %s", StubInfo::name(blob_id)); _masm = new MacroAssembler(code); _blob_id = blob_id; + _stub_data = stub_data; _print_code = PrintStubCode || print_code; } @@ -91,11 +96,92 @@ StubCodeGenerator::~StubCodeGenerator() { #endif } +void StubCodeGenerator::setup_code_desc(const char* name, address start, address end, bool loaded_from_cache) { + StubCodeDesc* cdesc = new StubCodeDesc("StubRoutines", name, start, end); + cdesc->set_disp(uint(start - _masm->code_section()->outer()->insts_begin())); + if (loaded_from_cache) { + cdesc->set_loaded_from_cache(); + } + print_stub_code_desc(cdesc); + // copied from ~StubCodeMark() + Forte::register_stub(cdesc->name(), cdesc->begin(), cdesc->end()); + if (JvmtiExport::should_post_dynamic_code_generated()) { + JvmtiExport::post_dynamic_code_generated(cdesc->name(), cdesc->begin(), cdesc->end()); + } +} + +// Helper used to restore ranges and handler addresses restored from +// AOT cache. Expects entries to contain 3 * count addresses beginning +// at offset begin which identify start of range, end of range and +// address of handler pc. start and end of range may not be null. +// handler pc may be null in which case it defaults to the +// default_handler. + +void StubCodeGenerator::register_unsafe_access_handlers(GrowableArray
&entries, int begin, int count) { + for (int i = 0; i < count; i++) { + int offset = begin + 3 * i; + address start = entries.at(offset); + address end = entries.at(offset + 1); + address handler = entries.at(offset + 2); + assert(start != nullptr, "sanity"); + assert(end != nullptr, "sanity"); + if (handler == nullptr) { + assert(UnsafeMemoryAccess::common_exit_stub_pc() != nullptr, + "default unsafe handler must be set before registering unsafe rgeionwiht no handler!"); + handler = UnsafeMemoryAccess::common_exit_stub_pc(); + } + UnsafeMemoryAccess::add_to_table(start, end, handler); + } +} + +// Helper used to retrieve ranges and handler addresses registered +// during generation of the stub which spans [start, end) in order to +// allow them to be saved to an AOT cache. +void StubCodeGenerator::retrieve_unsafe_access_handlers(address start, address end, GrowableArray
&entries) { + UnsafeMemoryAccess::collect_entries(start, end, entries); +} + +#if INCLUDE_ZGC +// Helper used to restore ZGC pointer colouring relocation addresses +// retrieved from the AOT cache. +void StubCodeGenerator::register_reloc_addresses(GrowableArray
&entries, int begin, int count) { + LogTarget(Trace, aot, codecache, stubs) lt; + if (lt.is_enabled()) { + LogStream ls(lt); + for (int i = begin; i < count; i++) { + ls.print_cr("Registered reloc address " INTPTR_FORMAT, p2i(entries.at(i))); + } + } + ZBarrierSetAssembler *zbs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler(); + zbs->register_reloc_addresses(entries, begin, count); +} + +// Helper used to retrieve ranges and handler addresses registered +// during generation of the stub which spans [start, end) in order to +// allow them to be saved to an AOT cache. +void StubCodeGenerator::retrieve_reloc_addresses(address start, address end, GrowableArray
&entries) { + int l = entries.length(); + ZBarrierSetAssembler *zbs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler(); + zbs->retrieve_reloc_addresses(start, end, entries); + LogTarget(Trace, aot, codecache, stubs) lt; + if (lt.is_enabled()) { + LogStream ls(lt); + for (int i = l; i < entries.length(); i++) { + ls.print_cr("retrieved reloc address " INTPTR_FORMAT, p2i(entries.at(i))); + } + } +} +#endif // INCLUDE_ZGC + void StubCodeGenerator::stub_prolog(StubCodeDesc* cdesc) { // default implementation - do nothing } void StubCodeGenerator::stub_epilog(StubCodeDesc* cdesc) { + print_stub_code_desc(cdesc); +} + +void StubCodeGenerator::print_stub_code_desc(StubCodeDesc* cdesc) { LogTarget(Debug, stubs) lt; if (lt.is_enabled()) { LogStream ls(lt); @@ -119,6 +205,52 @@ void StubCodeGenerator::stub_epilog(StubCodeDesc* cdesc) { } } +address StubCodeGenerator::load_archive_data(StubId stub_id, GrowableArray
*entries, GrowableArray
* extras) { + // punt to stub data if it exists and is not for dumping + if (_stub_data == nullptr || _stub_data->is_dumping()) { + return nullptr; + } + // punt to stub data + address start, end; + start = _stub_data->load_archive_data(stub_id, end, entries, extras); + + if (start != nullptr) { + setup_code_desc(StubInfo::name(stub_id), start, end, true); + } + + return start; +} + +void StubCodeGenerator::store_archive_data(StubId stub_id, address start, address end, GrowableArray
* entries, GrowableArray
* extras) { + // punt to stub data if we have any + if (_stub_data != nullptr) { + _stub_data->store_archive_data(stub_id, start, end, entries, extras); + } +} + +void StubCodeGenerator::print_statistics_on(outputStream* st) { + st->print_cr("StubRoutines Stubs:"); + st->print_cr(" Initial stubs: %d", StubInfo::stub_count(BlobId::stubgen_initial_id)); + st->print_cr(" Continuation stubs: %d", StubInfo::stub_count(BlobId::stubgen_continuation_id)); + st->print_cr(" Compiler stubs: %d", StubInfo::stub_count(BlobId::stubgen_compiler_id)); + st->print_cr(" Final stubs: %d", StubInfo::stub_count(BlobId::stubgen_final_id)); + + int emitted = 0; + int loaded_from_cache = 0; + + StubCodeDesc* scd = StubCodeDesc::first(); + while (scd != nullptr) { + if (!strcmp(scd->group(), "StubRoutines")) { + emitted += 1; + if (scd->loaded_from_cache()) { + loaded_from_cache += 1; + } + } + scd = StubCodeDesc::next(scd); + } + st->print_cr("Total stubroutines stubs emitted: %d (generated=%d, loaded from cache=%d)", emitted, emitted - loaded_from_cache, loaded_from_cache); +} + #ifdef ASSERT void StubCodeGenerator::verify_stub(StubId stub_id) { assert(StubRoutines::stub_to_blob(stub_id) == blob_id(), "wrong blob %s for generation of stub %s", StubRoutines::get_blob_name(blob_id()), StubRoutines::get_stub_name(stub_id)); diff --git a/src/hotspot/share/runtime/stubCodeGenerator.hpp b/src/hotspot/share/runtime/stubCodeGenerator.hpp index 7d8944c85ea..958fa76543b 100644 --- a/src/hotspot/share/runtime/stubCodeGenerator.hpp +++ b/src/hotspot/share/runtime/stubCodeGenerator.hpp @@ -26,6 +26,7 @@ #define SHARE_RUNTIME_STUBCODEGENERATOR_HPP #include "asm/assembler.hpp" +#include "code/aotCodeCache.hpp" #include "memory/allocation.hpp" #include "runtime/stubInfo.hpp" @@ -48,6 +49,7 @@ class StubCodeDesc: public CHeapObj { address _begin; // points to the first byte of the stub code (included) address _end; // points to the first byte after the stub code (excluded) uint _disp; // Displacement relative base address in buffer. + bool _loaded_from_cache; friend class StubCodeMark; friend class StubCodeGenerator; @@ -65,6 +67,8 @@ class StubCodeDesc: public CHeapObj { void set_disp(uint disp) { _disp = disp; } + void set_loaded_from_cache() { _loaded_from_cache = true; } + public: static StubCodeDesc* first() { return _list; } static StubCodeDesc* next(StubCodeDesc* desc) { return desc->_next; } @@ -81,6 +85,7 @@ class StubCodeDesc: public CHeapObj { _end = end; _disp = 0; _list = this; + _loaded_from_cache = false; }; static void freeze(); @@ -93,12 +98,11 @@ class StubCodeDesc: public CHeapObj { uint disp() const { return _disp; } int size_in_bytes() const { return pointer_delta_as_int(_end, _begin); } bool contains(address pc) const { return _begin <= pc && pc < _end; } + bool loaded_from_cache() const { return _loaded_from_cache; } void print_on(outputStream* st) const; void print() const; }; -// forward declare blob and stub id enums - // The base class for all stub-generating code generators. // Provides utility functions. @@ -108,10 +112,20 @@ class StubCodeGenerator: public StackObj { BlobId _blob_id; protected: MacroAssembler* _masm; + AOTStubData* _stub_data; - public: + void setup_code_desc(const char* name, address start, address end, bool loaded_from_cache); + // unsafe handler management + void register_unsafe_access_handlers(GrowableArray
&entries, int begin, int count); + void retrieve_unsafe_access_handlers(address start, address end, GrowableArray
&entries); +#if INCLUDE_ZGC + void register_reloc_addresses(GrowableArray
&entries, int begin, int count); + void retrieve_reloc_addresses(address start, address end, GrowableArray
&entries); +#endif // INCLUDE_ZGC + +public: StubCodeGenerator(CodeBuffer* code, bool print_code = false); - StubCodeGenerator(CodeBuffer* code, BlobId blob_id, bool print_code = false); + StubCodeGenerator(CodeBuffer* code, BlobId blob_id, AOTStubData* stub_data = nullptr, bool print_code = false); ~StubCodeGenerator(); MacroAssembler* assembler() const { return _masm; } @@ -120,9 +134,59 @@ class StubCodeGenerator: public StackObj { virtual void stub_prolog(StubCodeDesc* cdesc); // called by StubCodeMark constructor virtual void stub_epilog(StubCodeDesc* cdesc); // called by StubCodeMark destructor + void print_stub_code_desc(StubCodeDesc* cdesc); + + static void print_statistics_on(outputStream* st); + + // load_archive_data should be called before generating the stub + // identified by stub_id. If AOT caching of stubs is enabled and the + // stubis found then the address of the stub's first and, possibly, + // only entry is returned and the caller should use it instead of + // generating thestub. Otherwise a null address is returned and the + // caller should proceed to generate the stub. + // + // store_archive_data should be called when a stub has been + // successfully generated into the current blob irrespctive of + // whether the current JVM is generating or consuming an AOT archive + // (the caller should not check for either case). When generating an + // archive the stub entry and end addresses are recorded for storage + // along with the current blob and also to allow rences to the stub + // from other stubs or from compiled Java methods can be detected + // and marked as requiring relocation. When consuming an archive the + // stub entry address is still inorer to identify it as a relocation + // target. When no archive is in use the call has no side effects. + // + // start and end identify the inclusive start and exclusive end + // address for stub code and must lie in the current blob's code + // range. Stubs presented via this interface must declare at least + // one entry and start is always taken to be the first entry. + // + // Optional arrays entries and extras store other addresses of + // interest all of which must either lie in the interval (start, + // end) or be nullptr (verified by load and store methods). + // + // entries lists secondary entries for the stub each of which must + // match a corresponding entry declaration for the stub (entry count + // verified by load and store methods). Null entry addresses are + // allowed when an architecture does not require a specific entry + // but may not vary from one run to the next. If the cache is in use + // at a store (for loading or saving code) then non-null entry + // addresses are entered into the AOT cache stub address table + // allowing references to them from other stubs or nmethods to be + // relocated. + // + // extras lists other non-entry stub addresses of interest such as + // memory protection ranges and associated handler addresses + // (potentially including a null address). These do do not need to + // be declared as entries and their number and meaning may vary + // according to the architecture. + + address load_archive_data(StubId stub_id, GrowableArray
*entries = nullptr, GrowableArray
* extras = nullptr); + void store_archive_data(StubId stub_id, address start, address end, GrowableArray
*entries = nullptr, GrowableArray
* extras = nullptr); #ifdef ASSERT void verify_stub(StubId stub_id); #endif + }; // Stack-allocated helper class used to associate a stub code with a name. diff --git a/src/hotspot/share/runtime/stubDeclarations.hpp b/src/hotspot/share/runtime/stubDeclarations.hpp index c478eda3e7c..ed1b3ea2e78 100644 --- a/src/hotspot/share/runtime/stubDeclarations.hpp +++ b/src/hotspot/share/runtime/stubDeclarations.hpp @@ -539,18 +539,19 @@ // generated. // // Architecture-specific entries need to be declared using the -// do_arch_entry template +// do_arch_entry templates // // do_arch_entry(arch, blob_name, stub_name, field_name, getter_name) // // do_arch_entry_init(arch, blob_name, stub_name, field_name, // getter_name, init_function) // +// do_arch_entry_array(arch, blob_name, stub_name, field_name, +// getter_name, count) +// // The only difference between these templates and the generic ones is // that they receive an extra argument which identifies the current // architecture e.g. x86, aarch64 etc. -// -// Currently there is no support for a do_arch_array_entry template. // Include arch-specific stub and entry declarations and make sure the // relevant template macros have been defined @@ -598,7 +599,8 @@ do_entry, do_entry_init, \ do_entry_array, \ do_arch_blob, \ - do_arch_entry, do_arch_entry_init) \ + do_arch_entry, do_arch_entry_init, \ + do_arch_entry_array) \ do_blob(preuniverse) \ do_stub(preuniverse, fence) \ do_entry(preuniverse, fence, fence_entry, fence_entry) \ @@ -615,7 +617,8 @@ atomic_cmpxchg_long_entry) \ /* merge in stubs and entries declared in arch header */ \ STUBGEN_PREUNIVERSE_BLOBS_ARCH_DO(do_stub, do_arch_blob, \ - do_arch_entry, do_arch_entry_init) \ + do_arch_entry, do_arch_entry_init, \ + do_arch_entry_array) \ end_blob(preuniverse) \ #define STUBGEN_INITIAL_BLOBS_DO(do_blob, end_blob, \ @@ -623,7 +626,8 @@ do_entry, do_entry_init, \ do_entry_array, \ do_arch_blob, \ - do_arch_entry, do_arch_entry_init) \ + do_arch_entry, do_arch_entry_init, \ + do_arch_entry_array) \ do_blob(initial) \ do_stub(initial, call_stub) \ do_entry(initial, call_stub, call_stub_entry, call_stub_entry) \ @@ -667,19 +671,10 @@ do_entry(initial, dcbrt, dcbrt, dcbrt) \ do_stub(initial, fmod) \ do_entry(initial, fmod, fmod, fmod) \ - /* following generic entries should really be x86_32 only */ \ - do_stub(initial, dlibm_sin_cos_huge) \ - do_entry(initial, dlibm_sin_cos_huge, dlibm_sin_cos_huge, \ - dlibm_sin_cos_huge) \ - do_stub(initial, dlibm_reduce_pi04l) \ - do_entry(initial, dlibm_reduce_pi04l, dlibm_reduce_pi04l, \ - dlibm_reduce_pi04l) \ - do_stub(initial, dlibm_tan_cot_huge) \ - do_entry(initial, dlibm_tan_cot_huge, dlibm_tan_cot_huge, \ - dlibm_tan_cot_huge) \ /* merge in stubs and entries declared in arch header */ \ STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, do_arch_blob, \ - do_arch_entry, do_arch_entry_init) \ + do_arch_entry, do_arch_entry_init, \ + do_arch_entry_array) \ end_blob(initial) \ @@ -689,7 +684,8 @@ do_entry_array, \ do_arch_blob, \ do_arch_entry, \ - do_arch_entry_init) \ + do_arch_entry_init, \ + do_arch_entry_array) \ do_blob(continuation) \ do_stub(continuation, cont_thaw) \ do_entry(continuation, cont_thaw, cont_thaw, cont_thaw) \ @@ -704,7 +700,8 @@ cont_returnBarrierExc) \ /* merge in stubs and entries declared in arch header */ \ STUBGEN_CONTINUATION_BLOBS_ARCH_DO(do_stub, do_arch_blob, \ - do_arch_entry, do_arch_entry_init) \ + do_arch_entry, do_arch_entry_init, \ + do_arch_entry_array) \ end_blob(continuation) \ @@ -713,7 +710,8 @@ do_entry, do_entry_init, \ do_entry_array, \ do_arch_blob, \ - do_arch_entry, do_arch_entry_init) \ + do_arch_entry, do_arch_entry_init, \ + do_arch_entry_array) \ do_blob(compiler) \ do_stub(compiler, array_sort) \ do_entry(compiler, array_sort, array_sort, select_arraysort_function) \ @@ -858,7 +856,8 @@ bigIntegerLeftShiftWorker, bigIntegerLeftShift) \ /* merge in stubs and entries declared in arch header */ \ STUBGEN_COMPILER_BLOBS_ARCH_DO(do_stub, do_arch_blob, \ - do_arch_entry, do_arch_entry_init) \ + do_arch_entry, do_arch_entry_init, \ + do_arch_entry_array) \ end_blob(compiler) \ @@ -867,7 +866,8 @@ do_entry, do_entry_init, \ do_entry_array, \ do_arch_blob, \ - do_arch_entry, do_arch_entry_init) \ + do_arch_entry, do_arch_entry_init, \ + do_arch_entry_array) \ do_blob(final) \ do_stub(final, verify_oop) \ do_entry(final, verify_oop, verify_oop_subroutine_entry, \ @@ -962,9 +962,15 @@ do_entry_init(final, arrayof_jlong_arraycopy, \ arrayof_jlong_arraycopy, arrayof_jlong_arraycopy, \ StubRoutines::arrayof_jlong_copy) \ + do_entry(final, arrayof_jlong_arraycopy, \ + arrayof_jlong_arraycopy_nopush, \ + arrayof_jlong_arraycopy_nopush) \ do_stub(final, arrayof_oop_arraycopy) \ do_entry_init(final, arrayof_oop_arraycopy, arrayof_oop_arraycopy, \ arrayof_oop_arraycopy, StubRoutines::arrayof_oop_copy) \ + do_entry(final, arrayof_oop_arraycopy, \ + arrayof_oop_arraycopy_nopush, \ + arrayof_oop_arraycopy_nopush) \ do_stub(final, arrayof_oop_arraycopy_uninit) \ do_entry_init(final, arrayof_oop_arraycopy_uninit, \ arrayof_oop_arraycopy_uninit, \ @@ -1073,7 +1079,8 @@ lookup_secondary_supers_table_slow_path_stub) \ /* merge in stubs and entries declared in arch header */ \ STUBGEN_FINAL_BLOBS_ARCH_DO(do_stub, do_arch_blob, \ - do_arch_entry, do_arch_entry_init) \ + do_arch_entry, do_arch_entry_init, \ + do_arch_entry_array) \ end_blob(final) \ @@ -1086,37 +1093,43 @@ do_entry, do_entry_init, \ do_entry_array, \ do_arch_blob, \ - do_arch_entry, do_arch_entry_init) \ + do_arch_entry, do_arch_entry_init, \ + do_arch_entry_array) \ STUBGEN_PREUNIVERSE_BLOBS_DO(do_blob, end_blob, \ do_stub, \ do_entry, do_entry_init, \ do_entry_array, \ do_arch_blob, \ - do_arch_entry, do_arch_entry_init) \ + do_arch_entry, do_arch_entry_init, \ + do_arch_entry_array) \ STUBGEN_INITIAL_BLOBS_DO(do_blob, end_blob, \ do_stub, \ do_entry, do_entry_init, \ do_entry_array, \ do_arch_blob, \ - do_arch_entry, do_arch_entry_init) \ + do_arch_entry, do_arch_entry_init, \ + do_arch_entry_array) \ STUBGEN_CONTINUATION_BLOBS_DO(do_blob, end_blob, \ do_stub, \ do_entry, do_entry_init, \ do_entry_array, \ do_arch_blob, \ - do_arch_entry, do_arch_entry_init) \ + do_arch_entry, do_arch_entry_init, \ + do_arch_entry_array) \ STUBGEN_COMPILER_BLOBS_DO(do_blob, end_blob, \ do_stub, \ do_entry, do_entry_init, \ do_entry_array, \ do_arch_blob, \ - do_arch_entry, do_arch_entry_init) \ + do_arch_entry, do_arch_entry_init, \ + do_arch_entry_array) \ STUBGEN_FINAL_BLOBS_DO(do_blob, end_blob, \ do_stub, \ do_entry, do_entry_init, \ do_entry_array, \ do_arch_blob, \ - do_arch_entry, do_arch_entry_init) \ + do_arch_entry, do_arch_entry_init, \ + do_arch_entry_array) \ // Convenience macros for use by template implementations @@ -1166,6 +1179,9 @@ #define STUBGEN_COUNT5(_1, _2, _3, _4, count) \ + count +#define STUBGEN_COUNT6(_1, _2, _3, _4, _5, count) \ + + count + // Convenience templates that emit nothing // ignore do_blob(blob_name, type) declarations @@ -1204,7 +1220,8 @@ DO_ENTRY_EMPTY4, DO_ENTRY_EMPTY5, \ DO_ENTRY_EMPTY5, \ DO_ARCH_BLOB_EMPTY2, \ - DO_ARCH_ENTRY_EMPTY5, DO_ARCH_ENTRY_EMPTY6) \ + DO_ARCH_ENTRY_EMPTY5, DO_ARCH_ENTRY_EMPTY6, \ + DO_ARCH_ENTRY_EMPTY6) \ // client macro to operate only on StubGenerator stubs @@ -1214,7 +1231,8 @@ DO_ENTRY_EMPTY4, DO_ENTRY_EMPTY5, \ DO_ENTRY_EMPTY5, \ DO_ARCH_BLOB_EMPTY2, \ - DO_ARCH_ENTRY_EMPTY5, DO_ARCH_ENTRY_EMPTY6) \ + DO_ARCH_ENTRY_EMPTY5, DO_ARCH_ENTRY_EMPTY6, \ + DO_ARCH_ENTRY_EMPTY6) \ // client macros to operate only on StubGenerator blobs and stubs @@ -1224,18 +1242,21 @@ DO_ENTRY_EMPTY4, DO_ENTRY_EMPTY5, \ DO_ENTRY_EMPTY5, \ DO_ARCH_BLOB_EMPTY2, \ - DO_ARCH_ENTRY_EMPTY5,DO_ARCH_ENTRY_EMPTY6) \ + DO_ARCH_ENTRY_EMPTY5,DO_ARCH_ENTRY_EMPTY6, \ + DO_ARCH_ENTRY_EMPTY6) \ // client macro to operate only on StubGenerator generci and arch entries #define STUBGEN_ALL_ENTRIES_DO(do_entry, do_entry_init, do_entry_array, \ - do_arch_entry, do_arch_entry_init) \ + do_arch_entry, do_arch_entry_init, \ + do_arch_entry_array) \ STUBGEN_ALL_DO(DO_BLOB_EMPTY1, DO_BLOB_EMPTY1, \ DO_STUB_EMPTY2, \ do_entry, do_entry_init, \ do_entry_array, \ DO_ARCH_BLOB_EMPTY2, \ - do_arch_entry, do_arch_entry_init) \ + do_arch_entry, do_arch_entry_init, \ + do_arch_entry_array) \ // client macro to operate only on StubGenerator entries @@ -1245,7 +1266,8 @@ do_entry, do_entry_init, \ do_entry_array, \ DO_ARCH_BLOB_EMPTY2, \ - DO_ARCH_ENTRY_EMPTY5, DO_ARCH_ENTRY_EMPTY6) \ + DO_ARCH_ENTRY_EMPTY5, DO_ARCH_ENTRY_EMPTY6, \ + DO_ARCH_ENTRY_EMPTY6) \ // client macro to operate only on StubGenerator arch blobs @@ -1255,16 +1277,19 @@ DO_ENTRY_EMPTY4, DO_ENTRY_EMPTY5, \ DO_ENTRY_EMPTY5, \ do_arch_blob, \ - DO_ARCH_ENTRY_EMPTY5, DO_ARCH_ENTRY_EMPTY6) \ + DO_ARCH_ENTRY_EMPTY5, DO_ARCH_ENTRY_EMPTY6, \ + DO_ARCH_ENTRY_EMPTY6) \ // client macro to operate only on StubGenerator arch entries -#define STUBGEN_ARCH_ENTRIES_DO(do_arch_entry, do_arch_entry_init) \ +#define STUBGEN_ARCH_ENTRIES_DO(do_arch_entry, do_arch_entry_init, \ + do_arch_entry_array) \ STUBGEN_ALL_DO(DO_BLOB_EMPTY1, DO_BLOB_EMPTY1, \ DO_STUB_EMPTY2, \ DO_ENTRY_EMPTY4, DO_ENTRY_EMPTY5, \ DO_ENTRY_EMPTY5, \ DO_ARCH_BLOB_EMPTY2, \ - do_arch_entry, do_arch_entry_init) \ + do_arch_entry, do_arch_entry_init, \ + do_arch_entry_array) \ #endif // SHARE_RUNTIME_STUBDECLARATIONS_HPP diff --git a/src/hotspot/share/runtime/stubInfo.cpp b/src/hotspot/share/runtime/stubInfo.cpp index ee90631145a..4d4d865cf95 100644 --- a/src/hotspot/share/runtime/stubInfo.cpp +++ b/src/hotspot/share/runtime/stubInfo.cpp @@ -574,6 +574,18 @@ void StubInfo::process_stubgen_entry(StubGroup& group_cursor, field_name, id), \ 0); \ +#define PROCESS_STUBGEN_ENTRY_ARCH_ARRAY(arch_name, blob, stub, \ + field_name, getter_name, \ + count) \ + process_stubgen_entry(_group_cursor, _blob_cursor, \ + _stub_cursor, _entry_cursor, \ + #arch_name "_" # field_name "_entry (stub gen)", \ + BlobId:: JOIN3(stubgen, blob, id), \ + StubId:: JOIN3(stubgen, stub, id), \ + EntryId:: JOIN4(stubgen, arch_name, \ + field_name, id), \ + count); \ + void StubInfo::populate_stub_tables() { StubGroup _group_cursor; BlobId _blob_cursor = BlobId::NO_BLOBID; @@ -615,7 +627,8 @@ void StubInfo::populate_stub_tables() { PROCESS_STUBGEN_ENTRY, PROCESS_STUBGEN_ENTRY_INIT, PROCESS_STUBGEN_ENTRY_ARRAY, DO_ARCH_BLOB_EMPTY2, - PROCESS_STUBGEN_ENTRY_ARCH, PROCESS_STUBGEN_ENTRY_ARCH_INIT); + PROCESS_STUBGEN_ENTRY_ARCH, PROCESS_STUBGEN_ENTRY_ARCH_INIT, + PROCESS_STUBGEN_ENTRY_ARCH_ARRAY); assert(next(_blob_cursor) == BlobId::NUM_BLOBIDS, "should have exhausted all blob ids!"); assert(next(_stub_cursor) == StubId::NUM_STUBIDS, "should have exhausted all stub ids!"); assert(next(_entry_cursor) == EntryId::NUM_ENTRYIDS, "should have exhausted all entry ids!"); @@ -636,6 +649,7 @@ void StubInfo::populate_stub_tables() { #undef PROCESS_STUBGEN_ENTRY_ARRAY #undef PROCESS_STUBGEN_ENTRY_ARCH #undef PROCESS_STUBGEN_ENTRY_ARCH_INIT +#undef PROCESS_STUBGEN_ENTRY_ARCH_ARRAY #ifdef ASSERT @@ -1087,6 +1101,15 @@ int StubInfo::stubgen_offset(StubId id) { return local_offset(StubGroup::STUBGEN, id); } +int StubInfo::stubgen_offset_in_blob(BlobId blob_id, StubId id) { + assert(blob(id) == blob_id, "sanity!"); + StubGroup group = StubGroup::STUBGEN; + assert(stubgroup(blob_id) == group, "sanity"); + StubId base_id = stub_base(blob_id); + assert(base_id != StubId::NO_STUBID, "sanity"); + return local_offset(group, id) - local_offset(group, base_id); +} + // initialization function called to populate blob. stub and entry // tables. this must be called before any stubs are generated void initialize_stub_info() { diff --git a/src/hotspot/share/runtime/stubInfo.hpp b/src/hotspot/share/runtime/stubInfo.hpp index 9ed6e0cb9f9..2fe503a8d0e 100644 --- a/src/hotspot/share/runtime/stubInfo.hpp +++ b/src/hotspot/share/runtime/stubInfo.hpp @@ -349,6 +349,14 @@ enum class StubId : int { init_function) \ JOIN4(stubgen, arch_name, field_name, id), \ +#define STUBGEN_DECLARE_ARCH_ARRAY_TAG(arch_name, blob_name, stub_name, \ + field_name, getter_name, \ + count) \ + JOIN4(stubgen, arch_name, field_name, id), \ + JOIN4(stubgen, arch_name, field_name, max) = \ + JOIN4(stubgen, arch_name, field_name, id) + \ + count - 1, \ + // the above macros are enough to declare the enum enum class EntryId : int { @@ -366,7 +374,8 @@ enum class EntryId : int { STUBGEN_DECLARE_INIT_TAG, STUBGEN_DECLARE_ARRAY_TAG, STUBGEN_DECLARE_ARCH_TAG, - STUBGEN_DECLARE_ARCH_INIT_TAG) + STUBGEN_DECLARE_ARCH_INIT_TAG, + STUBGEN_DECLARE_ARCH_ARRAY_TAG) NUM_ENTRYIDS }; @@ -379,6 +388,7 @@ enum class EntryId : int { #undef STUBGEN_DECLARE_ARRAY_TAG #undef STUBGEN_DECLARE_ARCH_TAG #undef STUBGEN_DECLARE_ARCH_INIT_TAG +#undef STUBGEN_DECLARE_ARCH_ARRAY_TAG // we need static init expressions for blob, stub and entry counts in // each stubgroup @@ -404,7 +414,8 @@ enum class EntryId : int { #define STUBGEN_ENTRY_COUNT_INITIALIZER \ 0 STUBGEN_ALL_ENTRIES_DO(COUNT4, COUNT5, \ STUBGEN_COUNT5, \ - COUNT5, COUNT6) + COUNT5, COUNT6, \ + STUBGEN_COUNT6) // Declare management class StubInfo @@ -669,6 +680,11 @@ public: static int c1_offset(StubId id); static int c2_offset(StubId id); static int stubgen_offset(StubId id); + + // Convert a stub id to a unique, zero-based offset in the range of + // stub ids for a given blob in the stubgen stub group. + + static int stubgen_offset_in_blob(BlobId blob_id, StubId id); }; diff --git a/src/hotspot/share/runtime/stubRoutines.cpp b/src/hotspot/share/runtime/stubRoutines.cpp index 5246613738e..f5509b9d996 100644 --- a/src/hotspot/share/runtime/stubRoutines.cpp +++ b/src/hotspot/share/runtime/stubRoutines.cpp @@ -102,8 +102,7 @@ BlobId StubRoutines::stub_to_blob(StubId id) { // Initialization -extern void StubGenerator_generate(CodeBuffer* code, BlobId blob_id); // only interface to generators - +extern void StubGenerator_generate(CodeBuffer* code, BlobId blob_id, AOTStubData* stub_data); // only interface to generators void UnsafeMemoryAccess::create_table(int max_size) { UnsafeMemoryAccess::_table = new UnsafeMemoryAccess[max_size]; UnsafeMemoryAccess::_table_max_length = max_size; @@ -154,7 +153,8 @@ void UnsafeMemoryAccess::collect_entries(address range_start, address range_end, if (e._error_exit_pc != _common_exit_stub_pc) { entries.append(e._error_exit_pc); } else { - // an address outside the stub must be the common exit stub address + // an address outside the stub must be the common exit stub + // address which is marked with a null address entries.append(nullptr); } } @@ -169,6 +169,38 @@ static BufferBlob* initialize_stubs(BlobId blob_id, assert(StubInfo::is_stubgen(blob_id), "not a stubgen blob %s", StubInfo::name(blob_id)); ResourceMark rm; TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime)); + // If we are loading stubs we need to check if we can retrieve a + // blob and/or an associated archived stub descriptor from the + // AOTCodeCache. If we are storing stubs we need to create a blob + // but we still need a stub data descriptor to fill in during + // generation. + AOTStubData stub_data(blob_id); + AOTStubData* stub_data_p = nullptr; + LogTarget(Info, stubs) lt; + + // we need to track and publish details of stubs in a stubgen blob + // when we are 1) using stubs from the cache 2) dumping stubs to the + // cache 3) generating stubs that may be needed by other cache + // elements. + + if (stub_data.is_open()) { + stub_data_p = &stub_data; + } + if (code_size > 0 && stub_data.is_using()) { + // try to load the blob and details of its stubs from cache. if + // that fails we will still generate all necessary stubs + if (stub_data.load_code_blob()) { + if (lt.is_enabled()) { + LogStream ls(lt); + ls.print_cr("Found blob %s in AOT cache", StubInfo::name(blob_id)); + } + } + } + + // Even if we managed to load a blob from the AOT cache we still + // need to allocate a code blob and associated buffer. The AOT blob + // may not include all the stubs we need for this runtime. + // Add extra space for large CodeEntryAlignment int size = code_size + CodeEntryAlignment * max_aligned_stubs; BufferBlob* stubs_code = BufferBlob::create(buffer_name, size); @@ -178,6 +210,10 @@ static BufferBlob* initialize_stubs(BlobId blob_id, // In that case we can tolerate an allocation failure because the // compiler will have been shut down and we have no need of the // blob. + // TODO: Ideally we would still like to try to use any AOT cached + // blob here but we don't have a fallback if we find that it is + // missing stubs we need so for now we exit. This should only + // happen in cases where we have a very small code cache. if (Thread::current()->is_Compiler_thread()) { assert(blob_id == BlobId::stubgen_compiler_id, "sanity"); assert(DelayCompilerStubsGeneration, "sanity"); @@ -187,10 +223,12 @@ static BufferBlob* initialize_stubs(BlobId blob_id, vm_exit_out_of_memory(code_size, OOM_MALLOC_ERROR, "CodeCache: no room for %s", buffer_name); } CodeBuffer buffer(stubs_code); - StubGenerator_generate(&buffer, blob_id); + short buffer_locs[20]; + buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs, + sizeof(buffer_locs)/sizeof(relocInfo)); + StubGenerator_generate(&buffer, blob_id, stub_data_p); if (code_size == 0) { assert(buffer.insts_size() == 0, "should not write into buffer when bob size declared as 0"); - LogTarget(Info, stubs) lt; if (lt.is_enabled()) { LogStream ls(lt); ls.print_cr("%s\t not generated", buffer_name); @@ -203,7 +241,37 @@ static BufferBlob* initialize_stubs(BlobId blob_id, "increase %s, code_size: %d, used: %d, free: %d", assert_msg, code_size, buffer.total_content_size(), buffer.insts_remaining()); - LogTarget(Info, stubs) lt; + if (stub_data.is_dumping()) { + // save the blob and publish the entry addresses + if (stub_data.store_code_blob(*stubs_code, &buffer)) { + if (lt.is_enabled()) { + LogStream ls(lt); + ls.print_cr("Stored blob '%s' to Startup Code Cache", buffer_name); + } + } else { + if (lt.is_enabled()) { + LogStream ls(lt); + ls.print_cr("Failed to store blob '%s' to Startup Code Cache", buffer_name); + } + } + } else if (stub_data.is_open()) { + // we either loaded some entries or generated new entries so + // publish all entries + // + // TODO - ensure we publish collect and publish the preuniverse + // stubs but don't try to save them + AOTCodeCache::publish_stub_addresses(*stubs_code, blob_id, &stub_data); + if (lt.is_enabled()) { + LogStream ls(lt); + ls.print_cr("Republished entries for blob '%s'", buffer_name); + } + } + + // close off recording of any further stubgen generation + if (blob_id == BlobId::stubgen_final_id) { + AOTCodeCache::set_stubgen_stubs_complete(); + } + if (lt.is_enabled()) { LogStream ls(lt); ls.print_cr("%s\t [" INTPTR_FORMAT ", " INTPTR_FORMAT "] used: %d, free: %d", @@ -214,17 +282,19 @@ static BufferBlob* initialize_stubs(BlobId blob_id, return stubs_code; } +// per blob initializer methods StubRoutines::initialize_xxx_stubs() + #define DEFINE_BLOB_INIT_METHOD(blob_name) \ void StubRoutines::initialize_ ## blob_name ## _stubs() { \ if (STUBGEN_BLOB_FIELD_NAME(blob_name) == nullptr) { \ BlobId blob_id = BlobId:: JOIN3(stubgen, blob_name, id); \ int size = _ ## blob_name ## _code_size; \ - int max_aligned_size = 10; \ + int max_aligned_stubs = StubInfo::stub_count(blob_id); \ const char* timer_msg = "StubRoutines generation " # blob_name " stubs"; \ const char* name = "StubRoutines (" # blob_name " stubs)"; \ const char* assert_msg = "_" # blob_name "_code_size"; \ STUBGEN_BLOB_FIELD_NAME(blob_name) = \ - initialize_stubs(blob_id, size, max_aligned_size, timer_msg, \ + initialize_stubs(blob_id, size, max_aligned_stubs, timer_msg, \ name, assert_msg); \ } \ } @@ -234,6 +304,7 @@ STUBGEN_BLOBS_DO(DEFINE_BLOB_INIT_METHOD) #undef DEFINE_BLOB_INIT_METHOD +// external driver API functions for per blob init: xxx_stubs_init() #define DEFINE_BLOB_INIT_FUNCTION(blob_name) \ void blob_name ## _stubs_init() { \ @@ -244,11 +315,18 @@ STUBGEN_BLOBS_DO(DEFINE_BLOB_INIT_FUNCTION) #undef DEFINE_BLOB_INIT_FUNCTION + +#if INCLUDE_CDS +// non-generated external API init driver function + +void stubs_AOTAddressTable_init() { StubRoutines::init_AOTAddressTable(); } +#endif // INCLUDE_CDS + /* - * we generate the underlying driver method but this wrapper is needed - * to perform special handling depending on where the compiler init - * gets called from. it ought to be possible to remove this at some - * point and have a determinate ordered init. + * we generate the underlying driver function compiler_stubs_init() + * but this wrapper is needed to perform special handling depending on + * where the compiler init gets called from. it ought to be possible + * to remove this at some point and have a determinate ordered init. */ void compiler_stubs_init(bool in_compiler_thread) { diff --git a/src/hotspot/share/runtime/stubRoutines.hpp b/src/hotspot/share/runtime/stubRoutines.hpp index 97e3e46b870..894bd47faab 100644 --- a/src/hotspot/share/runtime/stubRoutines.hpp +++ b/src/hotspot/share/runtime/stubRoutines.hpp @@ -112,6 +112,8 @@ class UnsafeMemoryAccess : public CHeapObj { address _end_pc; address _error_exit_pc; public: + // each table entry requires 3 addresses + static const int COLUMN_COUNT = 3; static address _common_exit_stub_pc; static UnsafeMemoryAccess* _table; static int _table_length; @@ -130,6 +132,7 @@ class UnsafeMemoryAccess : public CHeapObj { static UnsafeMemoryAccess* add_to_table(address start_pc, address end_pc, address error_exit_pc) { guarantee(_table_length < _table_max_length, "Incorrect UnsafeMemoryAccess::_table_max_length"); UnsafeMemoryAccess* entry = &_table[_table_length]; + assert(start_pc != nullptr, "invalid start address"); entry->set_start_pc(start_pc); entry->set_end_pc(end_pc); entry->set_error_exit_pc(error_exit_pc); @@ -283,6 +286,11 @@ public: static BlobId stub_to_blob(StubId id); #endif +#if INCLUDE_CDS + // AOT Initalization -- implementation is arch-specific + static void init_AOTAddressTable(); +#endif // INCLUDE_CDS + // Debugging static jint verify_oop_count() { return _verify_oop_count; } static jint* verify_oop_count_addr() { return &_verify_oop_count; } diff --git a/src/hotspot/share/runtime/synchronizer.cpp b/src/hotspot/share/runtime/synchronizer.cpp index c93e1a65512..fa178dcb5a1 100644 --- a/src/hotspot/share/runtime/synchronizer.cpp +++ b/src/hotspot/share/runtime/synchronizer.cpp @@ -350,7 +350,7 @@ bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool al } if (mark.has_monitor()) { - ObjectMonitor* const mon = read_monitor(current, obj, mark); + ObjectMonitor* const mon = read_monitor(obj, mark); if (mon == nullptr) { // Racing with inflation/deflation go slow path return false; @@ -485,7 +485,7 @@ ObjectLocker::ObjectLocker(Handle obj, TRAPS) : _thread(THREAD), _obj(obj), // otherwise just force other vthreads to preempt in case they try // to acquire this monitor. _skip_exit = !_thread->preemption_cancelled(); - ObjectSynchronizer::read_monitor(_thread, _obj())->set_object_strong(); + ObjectSynchronizer::read_monitor(_obj())->set_object_strong(); _thread->set_pending_preempted_exception(); } @@ -502,7 +502,7 @@ void ObjectLocker::wait_uninterruptibly(TRAPS) { ObjectSynchronizer::waitUninterruptibly(_obj, 0, _thread); if (_thread->preempting()) { _skip_exit = true; - ObjectSynchronizer::read_monitor(_thread, _obj())->set_object_strong(); + ObjectSynchronizer::read_monitor(_obj())->set_object_strong(); _thread->set_pending_preempted_exception(); } } @@ -749,7 +749,7 @@ bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current, } while (mark.has_monitor()) { - ObjectMonitor* monitor = read_monitor(current, obj, mark); + ObjectMonitor* monitor = read_monitor(obj, mark); if (monitor != nullptr) { return monitor->is_entered(current) != 0; } @@ -778,7 +778,7 @@ JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_ob } while (mark.has_monitor()) { - ObjectMonitor* monitor = read_monitor(Thread::current(), obj, mark); + ObjectMonitor* monitor = read_monitor(obj, mark); if (monitor != nullptr) { return Threads::owning_thread_from_monitor(t_list, monitor); } @@ -830,8 +830,7 @@ void ObjectSynchronizer::owned_monitors_iterate_filtered(MonitorClosure* closure }); } -// Iterate ObjectMonitors where the owner == thread; this does NOT include -// ObjectMonitors where owner is set to a stack-lock address in thread. +// Iterate ObjectMonitors where the owner == thread. void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure, JavaThread* thread) { int64_t key = ObjectMonitor::owner_id_from(thread); auto thread_filter = [&](ObjectMonitor* monitor) { return monitor->owner() == key; }; @@ -1416,7 +1415,11 @@ void ObjectSynchronizer::chk_in_use_entry(ObjectMonitor* n, outputStream* out, } const markWord mark = obj->mark(); - if (!mark.has_monitor()) { + // Note: When using ObjectMonitorTable we may observe an intermediate state, + // where the monitor is globally visible, but no thread has yet transitioned + // the markWord. To avoid reporting a false positive during this transition, we + // skip the `!mark.has_monitor()` test if we are using the ObjectMonitorTable. + if (!UseObjectMonitorTable && !mark.has_monitor()) { out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor's " "object does not think it has a monitor: obj=" INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n), @@ -1425,7 +1428,7 @@ void ObjectSynchronizer::chk_in_use_entry(ObjectMonitor* n, outputStream* out, return; } - ObjectMonitor* const obj_mon = read_monitor(Thread::current(), obj, mark); + ObjectMonitor* const obj_mon = read_monitor(obj, mark); if (n != obj_mon) { out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor's " "object does not refer to the same monitor: obj=" @@ -1471,8 +1474,8 @@ void ObjectSynchronizer::log_in_use_monitor_details(outputStream* out, bool log_ out->flush(); } -ObjectMonitor* ObjectSynchronizer::get_or_insert_monitor_from_table(oop object, JavaThread* current, bool* inserted) { - ObjectMonitor* monitor = get_monitor_from_table(current, object); +ObjectMonitor* ObjectSynchronizer::get_or_insert_monitor_from_table(oop object, bool* inserted) { + ObjectMonitor* monitor = get_monitor_from_table(object); if (monitor != nullptr) { *inserted = false; return monitor; @@ -1482,7 +1485,7 @@ ObjectMonitor* ObjectSynchronizer::get_or_insert_monitor_from_table(oop object, alloced_monitor->set_anonymous_owner(); // Try insert monitor - monitor = add_monitor(current, alloced_monitor, object); + monitor = add_monitor(alloced_monitor, object); *inserted = alloced_monitor == monitor; if (!*inserted) { @@ -1522,7 +1525,7 @@ ObjectMonitor* ObjectSynchronizer::get_or_insert_monitor(oop object, JavaThread* EventJavaMonitorInflate event; bool inserted; - ObjectMonitor* monitor = get_or_insert_monitor_from_table(object, current, &inserted); + ObjectMonitor* monitor = get_or_insert_monitor_from_table(object, &inserted); if (inserted) { log_inflate(current, object, cause); @@ -1538,7 +1541,7 @@ ObjectMonitor* ObjectSynchronizer::get_or_insert_monitor(oop object, JavaThread* } // Add the hashcode to the monitor to match the object and put it in the hashtable. -ObjectMonitor* ObjectSynchronizer::add_monitor(JavaThread* current, ObjectMonitor* monitor, oop obj) { +ObjectMonitor* ObjectSynchronizer::add_monitor(ObjectMonitor* monitor, oop obj) { assert(UseObjectMonitorTable, "must be"); assert(obj == monitor->object(), "must be"); @@ -1546,14 +1549,14 @@ ObjectMonitor* ObjectSynchronizer::add_monitor(JavaThread* current, ObjectMonito assert(hash != 0, "must be set when claiming the object monitor"); monitor->set_hash(hash); - return ObjectMonitorTable::monitor_put_get(current, monitor, obj); + return ObjectMonitorTable::monitor_put_get(monitor, obj); } -void ObjectSynchronizer::remove_monitor(Thread* current, ObjectMonitor* monitor, oop obj) { +void ObjectSynchronizer::remove_monitor(ObjectMonitor* monitor, oop obj) { assert(UseObjectMonitorTable, "must be"); assert(monitor->object_peek() == obj, "must be, cleared objects are removed by is_dead"); - ObjectMonitorTable::remove_monitor_entry(current, monitor); + ObjectMonitorTable::remove_monitor_entry(monitor); } void ObjectSynchronizer::deflate_mark_word(oop obj) { @@ -1721,7 +1724,7 @@ bool ObjectSynchronizer::fast_lock_spin_enter(oop obj, LockStack& lock_stack, Ja return true; } else if (observed_deflation) { // Spin while monitor is being deflated. - ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(current, obj, mark); + ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(obj, mark); return monitor == nullptr || monitor->is_being_async_deflated(); } // Else stop spinning. @@ -1881,7 +1884,7 @@ void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) if (UseObjectMonitorTable) { monitor = read_caches(current, lock, object); if (monitor == nullptr) { - monitor = get_monitor_from_table(current, object); + monitor = get_monitor_from_table(object); } } else { monitor = ObjectSynchronizer::read_monitor(mark); @@ -1925,7 +1928,7 @@ ObjectMonitor* ObjectSynchronizer::inflate_locked_or_imse(oop obj, ObjectSynchro } assert(mark.has_monitor(), "must be"); - ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(current, obj, mark); + ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(obj, mark); if (monitor != nullptr) { if (monitor->has_anonymous_owner()) { LockStack& lock_stack = current->lock_stack(); @@ -1960,12 +1963,10 @@ ObjectMonitor* ObjectSynchronizer::inflate_into_object_header(oop object, Object const markWord mark = object->mark_acquire(); // The mark can be in one of the following states: - // * inflated - Just return if using stack-locking. - // If using fast-locking and the ObjectMonitor owner - // is anonymous and the locking_thread owns the - // object lock, then we make the locking_thread - // the ObjectMonitor owner and remove the lock from - // the locking_thread's lock stack. + // * inflated - If the ObjectMonitor owner is anonymous and the + // locking_thread owns the object lock, then we make the + // locking_thread the ObjectMonitor owner and remove the + // lock from the locking_thread's lock stack. // * fast-locked - Coerce it to inflated from fast-locked. // * unlocked - Aggressively inflate the object. @@ -2087,7 +2088,7 @@ ObjectMonitor* ObjectSynchronizer::inflate_fast_locked_object(oop object, Object // contains a deflating monitor it must be anonymously owned. if (monitor->has_anonymous_owner()) { // The monitor must be anonymously owned if it was added - assert(monitor == get_monitor_from_table(current, object), "The monitor must be found"); + assert(monitor == get_monitor_from_table(object), "The monitor must be found"); // New fresh monitor break; } @@ -2279,31 +2280,31 @@ ObjectMonitor* ObjectSynchronizer::inflate_and_enter(oop object, BasicLock* lock return monitor; } -void ObjectSynchronizer::deflate_monitor(Thread* current, oop obj, ObjectMonitor* monitor) { +void ObjectSynchronizer::deflate_monitor(oop obj, ObjectMonitor* monitor) { if (obj != nullptr) { deflate_mark_word(obj); - remove_monitor(current, monitor, obj); + remove_monitor(monitor, obj); } } -ObjectMonitor* ObjectSynchronizer::get_monitor_from_table(Thread* current, oop obj) { +ObjectMonitor* ObjectSynchronizer::get_monitor_from_table(oop obj) { assert(UseObjectMonitorTable, "must be"); - return ObjectMonitorTable::monitor_get(current, obj); + return ObjectMonitorTable::monitor_get(obj); } ObjectMonitor* ObjectSynchronizer::read_monitor(markWord mark) { return mark.monitor(); } -ObjectMonitor* ObjectSynchronizer::read_monitor(Thread* current, oop obj) { - return ObjectSynchronizer::read_monitor(current, obj, obj->mark()); +ObjectMonitor* ObjectSynchronizer::read_monitor(oop obj) { + return ObjectSynchronizer::read_monitor(obj, obj->mark()); } -ObjectMonitor* ObjectSynchronizer::read_monitor(Thread* current, oop obj, markWord mark) { +ObjectMonitor* ObjectSynchronizer::read_monitor(oop obj, markWord mark) { if (!UseObjectMonitorTable) { return read_monitor(mark); } else { - return ObjectSynchronizer::get_monitor_from_table(current, obj); + return ObjectSynchronizer::get_monitor_from_table(obj); } } diff --git a/src/hotspot/share/runtime/synchronizer.hpp b/src/hotspot/share/runtime/synchronizer.hpp index 97690b9c886..58d0e88a026 100644 --- a/src/hotspot/share/runtime/synchronizer.hpp +++ b/src/hotspot/share/runtime/synchronizer.hpp @@ -126,8 +126,8 @@ public: static const char* inflate_cause_name(const InflateCause cause); static ObjectMonitor* read_monitor(markWord mark); - static ObjectMonitor* read_monitor(Thread* current, oop obj); - static ObjectMonitor* read_monitor(Thread* current, oop obj, markWord mark); + static ObjectMonitor* read_monitor(oop obj); + static ObjectMonitor* read_monitor(oop obj, markWord mark); // Returns the identity hash value for an oop // NOTE: It may cause monitor inflation @@ -209,11 +209,11 @@ public: static void handle_sync_on_value_based_class(Handle obj, JavaThread* locking_thread); - static ObjectMonitor* get_or_insert_monitor_from_table(oop object, JavaThread* current, bool* inserted); + static ObjectMonitor* get_or_insert_monitor_from_table(oop object, bool* inserted); static ObjectMonitor* get_or_insert_monitor(oop object, JavaThread* current, ObjectSynchronizer::InflateCause cause); - static ObjectMonitor* add_monitor(JavaThread* current, ObjectMonitor* monitor, oop obj); - static void remove_monitor(Thread* current, ObjectMonitor* monitor, oop obj); + static ObjectMonitor* add_monitor(ObjectMonitor* monitor, oop obj); + static void remove_monitor(ObjectMonitor* monitor, oop obj); static void deflate_mark_word(oop object); @@ -239,9 +239,9 @@ public: static ObjectMonitor* inflate_fast_locked_object(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current); static ObjectMonitor* inflate_and_enter(oop object, BasicLock* lock, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current); - static void deflate_monitor(Thread* current, oop obj, ObjectMonitor* monitor); + static void deflate_monitor(oop obj, ObjectMonitor* monitor); - static ObjectMonitor* get_monitor_from_table(Thread* current, oop obj); + static ObjectMonitor* get_monitor_from_table(oop obj); static bool contains_monitor(Thread* current, ObjectMonitor* monitor); diff --git a/src/hotspot/share/runtime/threadSMR.cpp b/src/hotspot/share/runtime/threadSMR.cpp index 418f7707118..4c68648fec8 100644 --- a/src/hotspot/share/runtime/threadSMR.cpp +++ b/src/hotspot/share/runtime/threadSMR.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -726,7 +726,8 @@ JavaThread* ThreadsList::find_JavaThread_from_java_tid(jlong java_tid) const { } } } - } else if (!thread->is_exiting()) { + } else if (includes(thread) && !thread->is_exiting()) { + // The thread is protected by this list and has not yet exited return thread; } return nullptr; @@ -883,7 +884,7 @@ void ThreadsSMRSupport::add_thread(JavaThread *thread){ ThreadsList *old_list = xchg_java_thread_list(new_list); free_list(old_list); - if (ThreadIdTable::is_initialized()) { + if (ThreadIdTable::is_initialized_acquire()) { jlong tid = SharedRuntime::get_java_tid(thread); ThreadIdTable::add_thread(tid, thread); } diff --git a/src/hotspot/share/runtime/threads.cpp b/src/hotspot/share/runtime/threads.cpp index f7f755a37b3..b83389a1929 100644 --- a/src/hotspot/share/runtime/threads.cpp +++ b/src/hotspot/share/runtime/threads.cpp @@ -113,6 +113,7 @@ #endif #ifdef COMPILER2 #include "opto/idealGraphPrinter.hpp" +#include "runtime/hotCodeCollector.hpp" #endif #if INCLUDE_JFR #include "jfr/jfr.hpp" @@ -798,6 +799,12 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) { StringDedup::start(); } +#ifdef COMPILER2 + if (HotCodeHeap) { + HotCodeCollector::initialize(); + } +#endif // COMPILER2 + // Pre-initialize some JSR292 core classes to avoid deadlock during class loading. // It is done after compilers are initialized, because otherwise compilations of // signature polymorphic MH intrinsics can be missed @@ -1127,7 +1134,7 @@ void Threads::remove(JavaThread* p, bool is_daemon) { ConditionalMutexLocker throttle_ml(ThreadsLockThrottle_lock, UseThreadsLockThrottleLock); MonitorLocker ml(Threads_lock); - if (ThreadIdTable::is_initialized()) { + if (ThreadIdTable::is_initialized_acquire()) { // This cleanup must be done before the current thread's GC barrier // is detached since we need to touch the threadObj oop. jlong tid = SharedRuntime::get_java_tid(p); diff --git a/src/hotspot/share/runtime/vframe.cpp b/src/hotspot/share/runtime/vframe.cpp index 604ff1f751a..c9628255e45 100644 --- a/src/hotspot/share/runtime/vframe.cpp +++ b/src/hotspot/share/runtime/vframe.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -248,7 +248,7 @@ void javaVFrame::print_lock_info_on(outputStream* st, bool is_virtual, int frame // The first stage of async deflation does not affect any field // used by this comparison so the ObjectMonitor* is usable here. if (mark.has_monitor()) { - ObjectMonitor* mon = ObjectSynchronizer::read_monitor(current, monitor->owner(), mark); + ObjectMonitor* mon = ObjectSynchronizer::read_monitor(monitor->owner(), mark); if (// if the monitor is null we must be in the process of locking mon == nullptr || // we have marked ourself as pending on this monitor diff --git a/src/hotspot/share/runtime/vmOperation.hpp b/src/hotspot/share/runtime/vmOperation.hpp index 5140d0401fb..6078600c16e 100644 --- a/src/hotspot/share/runtime/vmOperation.hpp +++ b/src/hotspot/share/runtime/vmOperation.hpp @@ -90,7 +90,7 @@ template(ShenandoahFinalMarkStartEvac) \ template(ShenandoahInitUpdateRefs) \ template(ShenandoahFinalUpdateRefs) \ - template(ShenandoahFinalRoots) \ + template(ShenandoahFinalVerify) \ template(ShenandoahDegeneratedGC) \ template(Exit) \ template(LinuxDllLoad) \ diff --git a/src/hotspot/share/runtime/vmOperations.cpp b/src/hotspot/share/runtime/vmOperations.cpp index ef480f04c57..c4a77ce3275 100644 --- a/src/hotspot/share/runtime/vmOperations.cpp +++ b/src/hotspot/share/runtime/vmOperations.cpp @@ -276,9 +276,6 @@ void VM_ThreadDump::doit_epilogue() { } // Hash table of int64_t to a list of ObjectMonitor* owned by the JavaThread. -// The JavaThread's owner key is either a JavaThread* or a stack lock -// address in the JavaThread so we use "int64_t". -// class ObjectMonitorsDump : public MonitorClosure, public ObjectMonitorsView { private: static unsigned int ptr_hash(int64_t const& s1) { diff --git a/src/hotspot/share/runtime/vmStructs.cpp b/src/hotspot/share/runtime/vmStructs.cpp index a54fbc7e8ab..ad9463443b2 100644 --- a/src/hotspot/share/runtime/vmStructs.cpp +++ b/src/hotspot/share/runtime/vmStructs.cpp @@ -163,10 +163,9 @@ /******************************************************************/ \ \ volatile_nonstatic_field(oopDesc, _mark, markWord) \ - volatile_nonstatic_field(oopDesc, _metadata._klass, Klass*) \ - volatile_nonstatic_field(oopDesc, _metadata._compressed_klass, narrowKlass) \ + volatile_nonstatic_field(oopDesc, _compressed_klass, narrowKlass) \ static_field(BarrierSet, _barrier_set, BarrierSet*) \ - nonstatic_field(ArrayKlass, _dimension, int) \ + nonstatic_field(ArrayKlass, _dimension, const int) \ volatile_nonstatic_field(ArrayKlass, _higher_dimension, ObjArrayKlass*) \ volatile_nonstatic_field(ArrayKlass, _lower_dimension, ArrayKlass*) \ nonstatic_field(BSMAttributeEntries, _offsets, Array*) \ @@ -335,12 +334,11 @@ nonstatic_field(ThreadLocalAllocBuffer, _pf_top, HeapWord*) \ nonstatic_field(ThreadLocalAllocBuffer, _desired_size, size_t) \ nonstatic_field(ThreadLocalAllocBuffer, _refill_waste_limit, size_t) \ - static_field(ThreadLocalAllocBuffer, _reserve_for_allocation_prefetch, int) \ - static_field(ThreadLocalAllocBuffer, _target_refills, unsigned) \ - nonstatic_field(ThreadLocalAllocBuffer, _number_of_refills, unsigned) \ + static_field(ThreadLocalAllocBuffer, _target_num_refills, unsigned) \ + nonstatic_field(ThreadLocalAllocBuffer, _num_refills, unsigned) \ nonstatic_field(ThreadLocalAllocBuffer, _refill_waste, unsigned) \ nonstatic_field(ThreadLocalAllocBuffer, _gc_waste, unsigned) \ - nonstatic_field(ThreadLocalAllocBuffer, _slow_allocations, unsigned) \ + nonstatic_field(ThreadLocalAllocBuffer, _num_slow_allocations, unsigned) \ nonstatic_field(VirtualSpace, _low_boundary, char*) \ nonstatic_field(VirtualSpace, _high_boundary, char*) \ nonstatic_field(VirtualSpace, _low, char*) \ @@ -476,6 +474,7 @@ /***********************************/ \ \ static_field(StubRoutines, _call_stub_return_address, address) \ + static_field(StubRoutines, _cont_returnBarrier, address) \ \ /***************************************/ \ /* PcDesc and other compiled code info */ \ @@ -787,6 +786,7 @@ static_field(Mutex, _mutex_array, Mutex**) \ static_field(Mutex, _num_mutex, int) \ volatile_nonstatic_field(Mutex, _owner, Thread*) \ + nonstatic_field(ContinuationEntry, _parent, ContinuationEntry*) \ static_field(ContinuationEntry, _return_pc, address) //-------------------------------------------------------------------------------- @@ -2143,4 +2143,3 @@ void vmStructs_init() { VMStructs::init(); } #endif // ASSERT - diff --git a/src/hotspot/share/sanitizers/address.cpp b/src/hotspot/share/sanitizers/address.cpp index 7d129feab0a..26de106ebcb 100644 --- a/src/hotspot/share/sanitizers/address.cpp +++ b/src/hotspot/share/sanitizers/address.cpp @@ -30,6 +30,7 @@ #include "utilities/globalDefinitions.hpp" #include "utilities/vmError.hpp" +#ifndef _WINDOWS #include #include @@ -118,4 +119,11 @@ void Asan::report(outputStream* st) { } } +#else // defined windows + +void Asan::initialize() {} +bool Asan::had_error() { return false; } +void Asan::report(outputStream* st) {} +#endif // ifndef _WINDOWS + #endif // ADDRESS_SANITIZER diff --git a/src/hotspot/share/services/diagnosticCommand.cpp b/src/hotspot/share/services/diagnosticCommand.cpp index 0846f339227..f2fa114133e 100644 --- a/src/hotspot/share/services/diagnosticCommand.cpp +++ b/src/hotspot/share/services/diagnosticCommand.cpp @@ -99,6 +99,7 @@ void DCmd::register_dcmds() { DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export)); DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export)); DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export)); DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export)); DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export)); DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export)); @@ -334,8 +335,8 @@ void JVMTIAgentLoadDCmd::execute(DCmdSource source, TRAPS) { #endif // INCLUDE_JVMTI #endif // INCLUDE_SERVICES -void PrintSystemPropertiesDCmd::execute(DCmdSource source, TRAPS) { - // load VMSupport +// helper method for printing system and security properties +static void print_properties(Symbol* method_name, outputStream* out, TRAPS) { Symbol* klass = vmSymbols::jdk_internal_vm_VMSupport(); Klass* k = SystemDictionary::resolve_or_fail(klass, true, CHECK); InstanceKlass* ik = InstanceKlass::cast(k); @@ -343,39 +344,36 @@ void PrintSystemPropertiesDCmd::execute(DCmdSource source, TRAPS) { ik->initialize(THREAD); } if (HAS_PENDING_EXCEPTION) { - java_lang_Throwable::print(PENDING_EXCEPTION, output()); - output()->cr(); + java_lang_Throwable::print(PENDING_EXCEPTION, out); + out->cr(); CLEAR_PENDING_EXCEPTION; return; } - - // invoke the serializePropertiesToByteArray method JavaValue result(T_OBJECT); JavaCallArguments args; - Symbol* signature = vmSymbols::void_byte_array_signature(); - JavaCalls::call_static(&result, - ik, - vmSymbols::serializePropertiesToByteArray_name(), - signature, - &args, - THREAD); + JavaCalls::call_static(&result, ik, method_name, signature, &args, THREAD); + if (HAS_PENDING_EXCEPTION) { - java_lang_Throwable::print(PENDING_EXCEPTION, output()); - output()->cr(); + java_lang_Throwable::print(PENDING_EXCEPTION, out); + out->cr(); CLEAR_PENDING_EXCEPTION; return; } - - // The result should be a [B oop res = result.get_oop(); - assert(res->is_typeArray(), "just checking"); - assert(TypeArrayKlass::cast(res->klass())->element_type() == T_BYTE, "just checking"); - - // copy the bytes to the output stream + assert(res->is_typeArray(), "should be a byte array"); + assert(TypeArrayKlass::cast(res->klass())->element_type() == T_BYTE, "should be a byte array"); typeArrayOop ba = typeArrayOop(res); - jbyte* addr = typeArrayOop(res)->byte_at_addr(0); - output()->print_raw((const char*)addr, ba->length()); + jbyte* addr = ba->byte_at_addr(0); + out->print_raw((const char*)addr, ba->length()); +} + +void PrintSystemPropertiesDCmd::execute(DCmdSource source, TRAPS) { + print_properties(vmSymbols::serializePropertiesToByteArray_name(), output(), THREAD); +} + +void PrintSecurityPropertiesDCmd::execute(DCmdSource source, TRAPS) { + print_properties(vmSymbols::serializeSecurityPropertiesToByteArray_name(), output(), THREAD); } VMUptimeDCmd::VMUptimeDCmd(outputStream* output, bool heap) : diff --git a/src/hotspot/share/services/diagnosticCommand.hpp b/src/hotspot/share/services/diagnosticCommand.hpp index c41e7bf2e2e..97ceb19d0ad 100644 --- a/src/hotspot/share/services/diagnosticCommand.hpp +++ b/src/hotspot/share/services/diagnosticCommand.hpp @@ -94,6 +94,15 @@ public: virtual void execute(DCmdSource source, TRAPS); }; +class PrintSecurityPropertiesDCmd : public DCmd { +public: + PrintSecurityPropertiesDCmd(outputStream* output, bool heap) : DCmd(output, heap) { } + static const char* name() { return "VM.security_properties"; } + static const char* description() { return "Print java.security.Security properties."; } + static const char* impact() { return "Low"; } + virtual void execute(DCmdSource source, TRAPS); +}; + // See also: print_flag in attachListener.cpp class PrintVMFlagsDCmd : public DCmdWithParser { protected: diff --git a/src/hotspot/share/services/management.cpp b/src/hotspot/share/services/management.cpp index 09277e16479..664fb5a8ef3 100644 --- a/src/hotspot/share/services/management.cpp +++ b/src/hotspot/share/services/management.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1146,6 +1146,7 @@ JVM_ENTRY(jint, jmm_GetThreadInfo(JNIEnv *env, jlongArray ids, jint maxDepth, jo // create dummy snapshot dump_result.add_thread_snapshot(); } else { + assert(dump_result.t_list()->includes(jt), "Must be protected"); dump_result.add_thread_snapshot(jt); } } diff --git a/src/hotspot/share/services/memoryService.cpp b/src/hotspot/share/services/memoryService.cpp index f64da3c5477..4636f125079 100644 --- a/src/hotspot/share/services/memoryService.cpp +++ b/src/hotspot/share/services/memoryService.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -119,11 +119,11 @@ void MemoryService::add_metaspace_memory_pools() { mgr->add_pool(_metaspace_pool); _pools_list->append(_metaspace_pool); - if (UseCompressedClassPointers) { - _compressed_class_pool = new CompressedKlassSpacePool(); - mgr->add_pool(_compressed_class_pool); - _pools_list->append(_compressed_class_pool); - } +#if INCLUDE_CLASS_SPACE + _compressed_class_pool = new CompressedKlassSpacePool(); + mgr->add_pool(_compressed_class_pool); + _pools_list->append(_compressed_class_pool); +#endif _managers_list->append(mgr); } diff --git a/src/hotspot/share/services/threadIdTable.cpp b/src/hotspot/share/services/threadIdTable.cpp index 1604927a0ac..585cc6377ec 100644 --- a/src/hotspot/share/services/threadIdTable.cpp +++ b/src/hotspot/share/services/threadIdTable.cpp @@ -1,5 +1,5 @@ /* -* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved. +* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,7 +23,7 @@ */ #include "classfile/javaClasses.inline.hpp" -#include "runtime/atomicAccess.hpp" +#include "runtime/handles.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/javaThread.inline.hpp" #include "runtime/threadSMR.hpp" @@ -44,7 +44,7 @@ static ThreadIdTableHash* volatile _local_table = nullptr; static volatile size_t _current_size = 0; static volatile size_t _items_count = 0; -volatile bool ThreadIdTable::_is_initialized = false; +Atomic ThreadIdTable::_is_initialized {false}; volatile bool ThreadIdTable::_has_work = false; class ThreadIdTableEntry : public CHeapObj { @@ -81,24 +81,25 @@ class ThreadIdTableConfig : public AllStatic { // Lazily creates the table and populates it with the given // thread list void ThreadIdTable::lazy_initialize(const ThreadsList *threads) { - if (!_is_initialized) { + if (!_is_initialized.load_acquire()) { { // There is no obvious benefit in allowing the thread table // to be concurrently populated during initialization. MutexLocker ml(ThreadIdTableCreate_lock); - if (_is_initialized) { + if (_is_initialized.load_relaxed()) { return; } create_table(threads->length()); - _is_initialized = true; + _is_initialized.release_store(true); } + for (uint i = 0; i < threads->length(); i++) { JavaThread* thread = threads->thread_at(i); - oop tobj = thread->threadObj(); + Handle tobj = Handle(JavaThread::current(), thread->threadObj()); if (tobj != nullptr) { - jlong java_tid = java_lang_Thread::thread_id(tobj); MutexLocker ml(Threads_lock); if (!thread->is_exiting()) { + jlong java_tid = java_lang_Thread::thread_id(tobj()); // Must be inside the lock to ensure that we don't add a thread to the table // that has just passed the removal point in Threads::remove(). add_thread(java_tid, thread); @@ -211,7 +212,7 @@ public: }; void ThreadIdTable::do_concurrent_work(JavaThread* jt) { - assert(_is_initialized, "Thread table is not initialized"); + assert(_is_initialized.load_relaxed(), "Thread table is not initialized"); _has_work = false; double load_factor = get_load_factor(); log_debug(thread, table)("Concurrent work, load factor: %g", load_factor); @@ -221,7 +222,8 @@ void ThreadIdTable::do_concurrent_work(JavaThread* jt) { } JavaThread* ThreadIdTable::add_thread(jlong tid, JavaThread* java_thread) { - assert(_is_initialized, "Thread table is not initialized"); + assert(Threads_lock->owned_by_self(), "Must hold Threads_lock"); + assert(_is_initialized.load_relaxed(), "Thread table is not initialized"); Thread* thread = Thread::current(); ThreadIdTableLookup lookup(tid); ThreadGet tg; @@ -240,7 +242,7 @@ JavaThread* ThreadIdTable::add_thread(jlong tid, JavaThread* java_thread) { } JavaThread* ThreadIdTable::find_thread_by_tid(jlong tid) { - assert(_is_initialized, "Thread table is not initialized"); + assert(_is_initialized.load_relaxed(), "Thread table is not initialized"); Thread* thread = Thread::current(); ThreadIdTableLookup lookup(tid); ThreadGet tg; @@ -249,7 +251,8 @@ JavaThread* ThreadIdTable::find_thread_by_tid(jlong tid) { } bool ThreadIdTable::remove_thread(jlong tid) { - assert(_is_initialized, "Thread table is not initialized"); + assert(Threads_lock->owned_by_self(), "Must hold Threads_lock"); + assert(_is_initialized.load_relaxed(), "Thread table is not initialized"); Thread* thread = Thread::current(); ThreadIdTableLookup lookup(tid); return _local_table->remove(thread, lookup); diff --git a/src/hotspot/share/services/threadIdTable.hpp b/src/hotspot/share/services/threadIdTable.hpp index 15dfb89d670..256484cce2d 100644 --- a/src/hotspot/share/services/threadIdTable.hpp +++ b/src/hotspot/share/services/threadIdTable.hpp @@ -1,5 +1,5 @@ /* -* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved. +* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +26,7 @@ #define SHARE_SERVICES_THREADIDTABLE_HPP #include "memory/allStatic.hpp" +#include "runtime/atomic.hpp" class JavaThread; class ThreadsList; @@ -34,13 +35,15 @@ class ThreadIdTableConfig; class ThreadIdTable : public AllStatic { friend class ThreadIdTableConfig; - static volatile bool _is_initialized; + static Atomic _is_initialized; static volatile bool _has_work; public: // Initialization static void lazy_initialize(const ThreadsList* threads); - static bool is_initialized() { return _is_initialized; } + static bool is_initialized_acquire() { + return _is_initialized.load_acquire(); + } // Lookup and list management static JavaThread* find_thread_by_tid(jlong tid); diff --git a/src/hotspot/share/services/threadService.cpp b/src/hotspot/share/services/threadService.cpp index 4f511075967..35c7de48d2e 100644 --- a/src/hotspot/share/services/threadService.cpp +++ b/src/hotspot/share/services/threadService.cpp @@ -1252,7 +1252,7 @@ private: // The first stage of async deflation does not affect any field // used by this comparison so the ObjectMonitor* is usable here. if (mark.has_monitor()) { - ObjectMonitor* mon = ObjectSynchronizer::read_monitor(current, monitor->owner(), mark); + ObjectMonitor* mon = ObjectSynchronizer::read_monitor(monitor->owner(), mark); if (// if the monitor is null we must be in the process of locking mon == nullptr || // we have marked ourself as pending on this monitor diff --git a/src/hotspot/share/utilities/bitMap.hpp b/src/hotspot/share/utilities/bitMap.hpp index 5ee462bbe47..17bf437ecae 100644 --- a/src/hotspot/share/utilities/bitMap.hpp +++ b/src/hotspot/share/utilities/bitMap.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,6 +30,7 @@ #include "utilities/globalDefinitions.hpp" // Forward decl; +class Arena; class BitMapClosure; // Operations for bitmaps represented as arrays of unsigned integers. diff --git a/src/hotspot/share/utilities/debug.cpp b/src/hotspot/share/utilities/debug.cpp index 504b923237e..23e8281f000 100644 --- a/src/hotspot/share/utilities/debug.cpp +++ b/src/hotspot/share/utilities/debug.cpp @@ -40,7 +40,7 @@ #include "nmt/memTracker.hpp" #include "oops/klass.inline.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomicAccess.hpp" +#include "runtime/atomic.hpp" #include "runtime/flags/flagSetting.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" @@ -80,7 +80,7 @@ static char g_dummy; char* g_assert_poison = &g_dummy; const char* g_assert_poison_read_only = &g_dummy; -static intx g_asserting_thread = 0; +static Atomic g_asserting_thread{0}; #endif // CAN_SHOW_REGISTERS_ON_ASSERT int DebuggingContext::_enabled = 0; // Initially disabled. @@ -193,7 +193,7 @@ void report_vm_error(const char* file, int line, const char* error_msg, const ch const void* siginfo = nullptr; #ifdef CAN_SHOW_REGISTERS_ON_ASSERT - if (os::current_thread_id() == g_asserting_thread) { + if (os::current_thread_id() == g_asserting_thread.load_relaxed()) { context = os::get_saved_assert_context(&siginfo); } #endif // CAN_SHOW_REGISTERS_ON_ASSERT @@ -220,7 +220,7 @@ void report_fatal(VMErrorType error_type, const char* file, int line, const char const void* siginfo = nullptr; #ifdef CAN_SHOW_REGISTERS_ON_ASSERT - if (os::current_thread_id() == g_asserting_thread) { + if (os::current_thread_id() == g_asserting_thread.load_relaxed()) { context = os::get_saved_assert_context(&siginfo); } #endif // CAN_SHOW_REGISTERS_ON_ASSERT @@ -265,15 +265,15 @@ void report_untested(const char* file, int line, const char* message) { } void report_java_out_of_memory(const char* message) { - static int out_of_memory_reported = 0; + static Atomic out_of_memory_reported{false}; JFR_ONLY(Jfr::on_report_java_out_of_memory();) // A number of threads may attempt to report OutOfMemoryError at around the // same time. To avoid dumping the heap or executing the data collection - // commands multiple times we just do it once when the first threads reports + // commands multiple times we just do it once when the first thread that reports // the error. - if (AtomicAccess::cmpxchg(&out_of_memory_reported, 0, 1) == 0) { + if (out_of_memory_reported.compare_set(false, true)) { // create heap dump before OnOutOfMemoryError commands are executed if (HeapDumpOnOutOfMemoryError) { tty->print_cr("java.lang.OutOfMemoryError: %s", message); @@ -342,20 +342,20 @@ class Command : public StackObj { int Command::level = 0; -extern "C" DEBUGEXPORT void blob(CodeBlob* cb) { +extern "C" NOINLINE void blob(CodeBlob* cb) { Command c("blob"); cb->print(); } -extern "C" DEBUGEXPORT void dump_vtable(address p) { +extern "C" NOINLINE void dump_vtable(address p) { Command c("dump_vtable"); Klass* k = (Klass*)p; k->vtable().print(); } -extern "C" DEBUGEXPORT void nm(intptr_t p) { +extern "C" NOINLINE void nm(intptr_t p) { // Actually we look through all CodeBlobs (the nm name has been kept for backwards compatibility) Command c("nm"); CodeBlob* cb = CodeCache::find_blob((address)p); @@ -367,7 +367,7 @@ extern "C" DEBUGEXPORT void nm(intptr_t p) { } -extern "C" DEBUGEXPORT void disnm(intptr_t p) { +extern "C" NOINLINE void disnm(intptr_t p) { Command c("disnm"); CodeBlob* cb = CodeCache::find_blob((address) p); if (cb != nullptr) { @@ -382,7 +382,7 @@ extern "C" DEBUGEXPORT void disnm(intptr_t p) { } -extern "C" DEBUGEXPORT void printnm(intptr_t p) { +extern "C" NOINLINE void printnm(intptr_t p) { char buffer[256]; os::snprintf_checked(buffer, sizeof(buffer), "printnm: " INTPTR_FORMAT, p); Command c(buffer); @@ -396,14 +396,14 @@ extern "C" DEBUGEXPORT void printnm(intptr_t p) { } -extern "C" DEBUGEXPORT void universe() { +extern "C" NOINLINE void universe() { Command c("universe"); if (!c.onThread()) return; Universe::print_on(tty); } -extern "C" DEBUGEXPORT void verify() { +extern "C" NOINLINE void verify() { // try to run a verify on the entire system // note: this may not be safe if we're not at a safepoint; for debugging, // this manipulates the safepoint settings to avoid assertion failures @@ -421,7 +421,7 @@ extern "C" DEBUGEXPORT void verify() { } -extern "C" DEBUGEXPORT void pp(void* p) { +extern "C" NOINLINE void pp(void* p) { Command c("pp"); if (!c.onThread()) return; FlagSetting fl(DisplayVMOutput, true); @@ -445,7 +445,7 @@ extern "C" DEBUGEXPORT void pp(void* p) { } -extern "C" DEBUGEXPORT void ps() { // print stack +extern "C" NOINLINE void ps() { // print stack // Prints the stack of the current Java thread Command c("ps"); if (!c.onThread()) return; @@ -477,7 +477,7 @@ extern "C" DEBUGEXPORT void ps() { // print stack } } -extern "C" DEBUGEXPORT void pfl() { +extern "C" NOINLINE void pfl() { // print frame layout Command c("pfl"); if (!c.onThread()) return; @@ -494,7 +494,7 @@ extern "C" DEBUGEXPORT void pfl() { } } -extern "C" DEBUGEXPORT void psf() { // print stack frames +extern "C" NOINLINE void psf() { // print stack frames Command c("psf"); if (!c.onThread()) return; JavaThread* p = JavaThread::active(); @@ -511,21 +511,21 @@ extern "C" DEBUGEXPORT void psf() { // print stack frames } -extern "C" DEBUGEXPORT void threads() { +extern "C" NOINLINE void threads() { Command c("threads"); if (!c.onThread()) return; Threads::print(false, true); } -extern "C" DEBUGEXPORT void psd() { +extern "C" NOINLINE void psd() { Command c("psd"); if (!c.onThread()) return; SystemDictionary::print(); } -extern "C" DEBUGEXPORT void pss() { // print all stacks +extern "C" NOINLINE void pss() { // print all stacks Command c("pss"); if (!c.onThread()) return; Threads::print(true, PRODUCT_ONLY(false) NOT_PRODUCT(true)); @@ -533,7 +533,7 @@ extern "C" DEBUGEXPORT void pss() { // print all stacks // #ifndef PRODUCT -extern "C" DEBUGEXPORT void debug() { // to set things up for compiler debugging +extern "C" NOINLINE void debug() { // to set things up for compiler debugging Command c("debug"); NOT_PRODUCT(WizardMode = true;) PrintCompilation = true; @@ -542,7 +542,7 @@ extern "C" DEBUGEXPORT void debug() { // to set things up for comp } -extern "C" DEBUGEXPORT void ndebug() { // undo debug() +extern "C" NOINLINE void ndebug() { // undo debug() Command c("ndebug"); PrintCompilation = false; PrintInlining = PrintAssembly = false; @@ -550,36 +550,36 @@ extern "C" DEBUGEXPORT void ndebug() { // undo debug() } -extern "C" DEBUGEXPORT void flush() { +extern "C" NOINLINE void flush() { Command c("flush"); tty->flush(); } -extern "C" DEBUGEXPORT void events() { +extern "C" NOINLINE void events() { Command c("events"); Events::print(); } -extern "C" DEBUGEXPORT Method* findm(intptr_t pc) { +extern "C" NOINLINE Method* findm(intptr_t pc) { Command c("findm"); nmethod* nm = CodeCache::find_nmethod((address)pc); return (nm == nullptr) ? (Method*)nullptr : nm->method(); } -extern "C" DEBUGEXPORT nmethod* findnm(intptr_t addr) { +extern "C" NOINLINE nmethod* findnm(intptr_t addr) { Command c("findnm"); return CodeCache::find_nmethod((address)addr); } -extern "C" DEBUGEXPORT void find(intptr_t x) { +extern "C" NOINLINE void find(intptr_t x) { Command c("find"); if (!c.onThread()) return; os::print_location(tty, x, false); } -extern "C" DEBUGEXPORT void findpc(intptr_t x) { +extern "C" NOINLINE void findpc(intptr_t x) { Command c("findpc"); if (!c.onThread()) return; os::print_location(tty, x, true); @@ -591,15 +591,14 @@ extern "C" DEBUGEXPORT void findpc(intptr_t x) { // call findclass("java/lang/Object", 0x3) -> find j.l.Object and disasm all of its methods // call findmethod("*ang/Object*", "wait", 0xff) -> detailed disasm of all "wait" methods in j.l.Object // call findmethod("*ang/Object*", "wait:(*J*)V", 0x1) -> list all "wait" methods in j.l.Object that have a long parameter -extern "C" DEBUGEXPORT void findclass(const char* class_name_pattern, int flags) { +extern "C" NOINLINE void findclass(const char* class_name_pattern, int flags) { Command c("findclass"); if (!c.onThread()) return; ClassPrinter::print_flags_help(tty); ClassPrinter::print_classes(class_name_pattern, flags, tty); } -extern "C" DEBUGEXPORT void findmethod(const char* class_name_pattern, - const char* method_pattern, int flags) { +extern "C" NOINLINE void findmethod(const char* class_name_pattern, const char* method_pattern, int flags) { Command c("findmethod"); if (!c.onThread()) return; ClassPrinter::print_flags_help(tty); @@ -607,7 +606,7 @@ extern "C" DEBUGEXPORT void findmethod(const char* class_name_pattern, } // Need method pointer to find bcp -extern "C" DEBUGEXPORT void findbcp(intptr_t method, intptr_t bcp) { +extern "C" NOINLINE void findbcp(intptr_t method, intptr_t bcp) { Command c("findbcp"); Method* mh = (Method*)method; if (!mh->is_native()) { @@ -618,7 +617,7 @@ extern "C" DEBUGEXPORT void findbcp(intptr_t method, intptr_t bcp) { } // check and decode a single u5 value -extern "C" DEBUGEXPORT u4 u5decode(intptr_t addr) { +extern "C" NOINLINE u4 u5decode(intptr_t addr) { Command c("u5decode"); u1* arr = (u1*)addr; size_t off = 0, lim = 5; @@ -635,9 +634,7 @@ extern "C" DEBUGEXPORT u4 u5decode(intptr_t addr) { // there is no limit on the count of items printed; the // printing stops when an null is printed or at limit. // See documentation for UNSIGNED5::Reader::print(count). -extern "C" DEBUGEXPORT intptr_t u5p(intptr_t addr, - intptr_t limit, - int count) { +extern "C" NOINLINE intptr_t u5p(intptr_t addr, intptr_t limit, int count) { Command c("u5p"); u1* arr = (u1*)addr; if (limit && limit < addr) limit = addr; @@ -650,10 +647,10 @@ extern "C" DEBUGEXPORT intptr_t u5p(intptr_t addr, // int versions of all methods to avoid having to type type casts in the debugger -void pp(intptr_t p) { pp((void*)p); } -void pp(oop p) { pp((void*)p); } +NOINLINE void pp(intptr_t p) { pp((void*)p); } +NOINLINE void pp(oop p) { pp((void*)p); } -extern "C" DEBUGEXPORT void help() { +extern "C" NOINLINE void help() { Command c("help"); tty->print_cr("basic"); tty->print_cr(" pp(void* p) - try to make sense of p"); @@ -709,7 +706,7 @@ extern "C" DEBUGEXPORT void help() { } #ifndef PRODUCT -extern "C" DEBUGEXPORT void pns(void* sp, void* fp, void* pc) { // print native stack +extern "C" NOINLINE void pns(void* sp, void* fp, void* pc) { // print native stack Command c("pns"); if (!c.onThread()) return; static char buf[O_BUFLEN]; @@ -728,7 +725,7 @@ extern "C" DEBUGEXPORT void pns(void* sp, void* fp, void* pc) { // print native // WARNING: Only intended for use when debugging. Do not leave calls to // pns2() in committed source (product or debug). // -extern "C" DEBUGEXPORT void pns2() { // print native stack +extern "C" NOINLINE void pns2() { // print native stack Command c("pns2"); if (!c.onThread()) return; static char buf[O_BUFLEN]; @@ -739,6 +736,43 @@ extern "C" DEBUGEXPORT void pns2() { // print native stack } #endif +// just an exported helper; to avoid link time elimination of the referenced functions +extern "C" JNIEXPORT void JVM_debug_helpers_keeper(void* p1, void* p2, void* p3, intptr_t ip, oop oh, address adr) { + blob((CodeBlob*)p1); + dump_vtable(adr); + nm(ip); + disnm(ip); + printnm(ip); + universe(); + verify(); + pp(p1); + ps(); + pfl(); + psf(); + threads(); + psd(); + pss(); + debug(); + ndebug(); + flush(); + events(); + findm(ip); + findnm(ip); + find(ip); + findpc(ip); + findclass("", 0); + findmethod("", "", 0); + findbcp(ip, ip); + u5decode(ip); + u5p(ip, ip, 0); + pp(ip); + pp(oh); + help(); +#ifndef PRODUCT + pns(p1, p2, p3); + pns2(); +#endif +} // Returns true iff the address p is readable and *(intptr_t*)p != errvalue extern "C" bool dbg_is_safe(const void* p, intptr_t errvalue) { @@ -813,7 +847,7 @@ bool handle_assert_poison_fault(const void* ucVoid) { if (ucVoid != nullptr) { // Save context. const intx my_tid = os::current_thread_id(); - if (AtomicAccess::cmpxchg(&g_asserting_thread, (intx)0, my_tid) == 0) { + if (g_asserting_thread.compare_set(0, my_tid)) { os::save_assert_context(ucVoid); } } diff --git a/src/hotspot/share/utilities/dtrace_disabled.hpp b/src/hotspot/share/utilities/dtrace_disabled.hpp index 6cbd79326ac..99d7cbb2f75 100644 --- a/src/hotspot/share/utilities/dtrace_disabled.hpp +++ b/src/hotspot/share/utilities/dtrace_disabled.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -683,6 +683,10 @@ #define HOTSPOT_JNI_GETMETHODID_ENTRY_ENABLED() 0 #define HOTSPOT_JNI_GETMETHODID_RETURN(arg0) #define HOTSPOT_JNI_GETMETHODID_RETURN_ENABLED() 0 +#define HOTSPOT_JNI_GETMODULE_ENTRY(arg0, arg1) +#define HOTSPOT_JNI_GETMODULE_ENTRY_ENABLED() 0 +#define HOTSPOT_JNI_GETMODULE_RETURN(arg0) +#define HOTSPOT_JNI_GETMODULE_RETURN_ENABLED() 0 #define HOTSPOT_JNI_GETOBJECTARRAYELEMENT_ENTRY(arg0, arg1, arg2) #define HOTSPOT_JNI_GETOBJECTARRAYELEMENT_ENTRY_ENABLED() 0 #define HOTSPOT_JNI_GETOBJECTARRAYELEMENT_RETURN(arg0) @@ -811,6 +815,10 @@ #define HOTSPOT_JNI_ISSAMEOBJECT_ENTRY_ENABLED() 0 #define HOTSPOT_JNI_ISSAMEOBJECT_RETURN(arg0) #define HOTSPOT_JNI_ISSAMEOBJECT_RETURN_ENABLED() 0 +#define HOTSPOT_JNI_ISVIRTUALTHREAD_ENTRY(arg0, arg1) +#define HOTSPOT_JNI_ISVIRTUALTHREAD_ENTRY_ENABLED() 0 +#define HOTSPOT_JNI_ISVIRTUALTHREAD_RETURN(arg0) +#define HOTSPOT_JNI_ISVIRTUALTHREAD_RETURN_ENABLED() 0 #define HOTSPOT_JNI_MONITORENTER_ENTRY(arg0, arg1) #define HOTSPOT_JNI_MONITORENTER_ENTRY_ENABLED() 0 #define HOTSPOT_JNI_MONITORENTER_RETURN(arg0) @@ -1080,12 +1088,6 @@ #define HOTSPOT_JNI_UNREGISTERNATIVES_RETURN(arg0) #define HOTSPOT_JNI_UNREGISTERNATIVES_RETURN_ENABLED() 0 -/* Modules */ -#define HOTSPOT_JNI_GETMODULE_ENTRY(arg0, arg1) -#define HOTSPOT_JNI_GETMODULE_ENTRY_ENABLED() 0 -#define HOTSPOT_JNI_GETMODULE_RETURN(arg0) -#define HOTSPOT_JNI_GETMODULE_RETURN_ENABLED() - #else /* !defined(DTRACE_ENABLED) */ #error This file should only be included when dtrace is not enabled #endif /* !defined(DTRACE_ENABLED) */ diff --git a/src/hotspot/share/utilities/events.cpp b/src/hotspot/share/utilities/events.cpp index 6adb5311cb5..d2b8e7ba5da 100644 --- a/src/hotspot/share/utilities/events.cpp +++ b/src/hotspot/share/utilities/events.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,14 +26,15 @@ #include "memory/allocation.inline.hpp" #include "oops/instanceKlass.hpp" #include "oops/symbol.hpp" -#include "runtime/atomicAccess.hpp" +#include "runtime/atomic.hpp" #include "runtime/javaThread.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/osThread.hpp" #include "runtime/timer.hpp" #include "utilities/events.hpp" -EventLog* Events::_logs = nullptr; +static Atomic event_logs_list{}; + StringEventLog* Events::_messages = nullptr; StringEventLog* Events::_memprotect_messages = nullptr; StringEventLog* Events::_nmethod_flush_messages = nullptr; @@ -51,15 +52,15 @@ EventLog::EventLog() { // but use lock free add because there are some events that are created later. EventLog* old_head; do { - old_head = AtomicAccess::load(&Events::_logs); + old_head = event_logs_list.load_relaxed(); _next = old_head; - } while (AtomicAccess::cmpxchg(&Events::_logs, old_head, this) != old_head); + } while (!event_logs_list.compare_set(old_head, this)); } // For each registered event logger, print out the current contents of // the buffer. void Events::print_all(outputStream* out, int max) { - EventLog* log = AtomicAccess::load(&Events::_logs); + EventLog* log = event_logs_list.load_relaxed(); while (log != nullptr) { log->print_log_on(out, max); log = log->next(); @@ -68,7 +69,7 @@ void Events::print_all(outputStream* out, int max) { // Print a single event log specified by name. void Events::print_one(outputStream* out, const char* log_name, int max) { - EventLog* log = AtomicAccess::load(&Events::_logs); + EventLog* log = event_logs_list.load_relaxed(); int num_printed = 0; while (log != nullptr) { if (log->matches_name_or_handle(log_name)) { @@ -81,7 +82,7 @@ void Events::print_one(outputStream* out, const char* log_name, int max) { if (num_printed == 0) { out->print_cr("The name \"%s\" did not match any known event log. " "Valid event log names are:", log_name); - EventLog* log = AtomicAccess::load(&Events::_logs); + EventLog* log = event_logs_list.load_relaxed(); while (log != nullptr) { log->print_names(out); out->cr(); diff --git a/src/hotspot/share/utilities/events.hpp b/src/hotspot/share/utilities/events.hpp index cbbed7232fb..0d1c08548ff 100644 --- a/src/hotspot/share/utilities/events.hpp +++ b/src/hotspot/share/utilities/events.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -214,10 +214,7 @@ class ExceptionsEventLog : public ExtendedStringEventLog { class Events : AllStatic { - friend class EventLog; - private: - static EventLog* _logs; // A log for generic messages that aren't well categorized. static StringEventLog* _messages; diff --git a/src/hotspot/share/utilities/exceptions.cpp b/src/hotspot/share/utilities/exceptions.cpp index b54474ea6d6..4455ab801cb 100644 --- a/src/hotspot/share/utilities/exceptions.cpp +++ b/src/hotspot/share/utilities/exceptions.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,7 +32,6 @@ #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomicAccess.hpp" #include "runtime/handles.inline.hpp" #include "runtime/init.hpp" #include "runtime/java.hpp" @@ -203,7 +202,7 @@ void Exceptions::_throw(JavaThread* thread, const char* file, int line, Handle h } if (h_exception->is_a(vmClasses::LinkageError_klass())) { - AtomicAccess::inc(&_linkage_errors, memory_order_relaxed); + _linkage_errors.add_then_fetch(1, memory_order_relaxed); } assert(h_exception->is_a(vmClasses::Throwable_klass()), "exception is not a subclass of java/lang/Throwable"); @@ -268,6 +267,10 @@ void Exceptions::_throw_cause(JavaThread* thread, const char* file, int line, Sy } +void Exceptions::increment_stack_overflow_errors() { + Exceptions::_stack_overflow_errors.add_then_fetch(1, memory_order_relaxed); +} + void Exceptions::throw_stack_overflow_exception(JavaThread* THREAD, const char* file, int line, const methodHandle& method) { Handle exception; if (!THREAD->has_pending_exception()) { @@ -279,7 +282,7 @@ void Exceptions::throw_stack_overflow_exception(JavaThread* THREAD, const char* java_lang_Throwable::fill_in_stack_trace(exception, method); } // Increment counter for hs_err file reporting - AtomicAccess::inc(&Exceptions::_stack_overflow_errors, memory_order_relaxed); + increment_stack_overflow_errors(); } else { // if prior exception, throw that one instead exception = Handle(THREAD, THREAD->pending_exception()); @@ -518,20 +521,20 @@ void Exceptions::wrap_dynamic_exception(bool is_indy, JavaThread* THREAD) { } // Exception counting for hs_err file -volatile int Exceptions::_stack_overflow_errors = 0; -volatile int Exceptions::_linkage_errors = 0; -volatile int Exceptions::_out_of_memory_error_java_heap_errors = 0; -volatile int Exceptions::_out_of_memory_error_metaspace_errors = 0; -volatile int Exceptions::_out_of_memory_error_class_metaspace_errors = 0; +Atomic Exceptions::_stack_overflow_errors{0}; +Atomic Exceptions::_linkage_errors{0}; +Atomic Exceptions::_out_of_memory_error_java_heap_errors{0}; +Atomic Exceptions::_out_of_memory_error_metaspace_errors{0}; +Atomic Exceptions::_out_of_memory_error_class_metaspace_errors{0}; void Exceptions::count_out_of_memory_exceptions(Handle exception) { if (Universe::is_out_of_memory_error_metaspace(exception())) { - AtomicAccess::inc(&_out_of_memory_error_metaspace_errors, memory_order_relaxed); + _out_of_memory_error_metaspace_errors.add_then_fetch(1, memory_order_relaxed); } else if (Universe::is_out_of_memory_error_class_metaspace(exception())) { - AtomicAccess::inc(&_out_of_memory_error_class_metaspace_errors, memory_order_relaxed); + _out_of_memory_error_class_metaspace_errors.add_then_fetch(1, memory_order_relaxed); } else { - // everything else reported as java heap OOM - AtomicAccess::inc(&_out_of_memory_error_java_heap_errors, memory_order_relaxed); + // everything else reported as java heap OOM + _out_of_memory_error_java_heap_errors.add_then_fetch(1, memory_order_relaxed); } } @@ -542,19 +545,24 @@ static void print_oom_count(outputStream* st, const char *err, int count) { } bool Exceptions::has_exception_counts() { - return (_stack_overflow_errors + _out_of_memory_error_java_heap_errors + - _out_of_memory_error_metaspace_errors + _out_of_memory_error_class_metaspace_errors) > 0; + return (_stack_overflow_errors.load_relaxed() + + _out_of_memory_error_java_heap_errors.load_relaxed() + + _out_of_memory_error_metaspace_errors.load_relaxed() + + _out_of_memory_error_class_metaspace_errors.load_relaxed()) > 0; } void Exceptions::print_exception_counts_on_error(outputStream* st) { - print_oom_count(st, "java_heap_errors", _out_of_memory_error_java_heap_errors); - print_oom_count(st, "metaspace_errors", _out_of_memory_error_metaspace_errors); - print_oom_count(st, "class_metaspace_errors", _out_of_memory_error_class_metaspace_errors); - if (_stack_overflow_errors > 0) { - st->print_cr("StackOverflowErrors=%d", _stack_overflow_errors); + print_oom_count(st, "java_heap_errors", + _out_of_memory_error_java_heap_errors.load_relaxed()); + print_oom_count(st, "metaspace_errors", + _out_of_memory_error_metaspace_errors.load_relaxed()); + print_oom_count(st, "class_metaspace_errors", + _out_of_memory_error_class_metaspace_errors.load_relaxed()); + if (_stack_overflow_errors.load_relaxed() > 0) { + st->print_cr("StackOverflowErrors=%d", _stack_overflow_errors.load_relaxed()); } - if (_linkage_errors > 0) { - st->print_cr("LinkageErrors=%d", _linkage_errors); + if (_linkage_errors.load_relaxed() > 0) { + st->print_cr("LinkageErrors=%d", _linkage_errors.load_relaxed()); } } diff --git a/src/hotspot/share/utilities/exceptions.hpp b/src/hotspot/share/utilities/exceptions.hpp index e76a0041d20..0299dedf2e6 100644 --- a/src/hotspot/share/utilities/exceptions.hpp +++ b/src/hotspot/share/utilities/exceptions.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ #include "memory/allocation.hpp" #include "oops/oopsHierarchy.hpp" +#include "runtime/atomic.hpp" #include "utilities/ostream.hpp" #include "utilities/sizes.hpp" @@ -113,12 +114,16 @@ class Exceptions { static bool special_exception(JavaThread* thread, const char* file, int line, Handle exception, Symbol* name = nullptr, const char* message = nullptr); // Count out of memory errors that are interesting in error diagnosis - static volatile int _out_of_memory_error_java_heap_errors; - static volatile int _out_of_memory_error_metaspace_errors; - static volatile int _out_of_memory_error_class_metaspace_errors; + static Atomic _out_of_memory_error_java_heap_errors; + static Atomic _out_of_memory_error_metaspace_errors; + static Atomic _out_of_memory_error_class_metaspace_errors; // Count linkage errors - static volatile int _linkage_errors; + static Atomic _linkage_errors; + + // Count stack overflow errors. + static Atomic _stack_overflow_errors; + public: // this enum is defined to indicate whether it is safe to // ignore the encoding scheme of the original message string. @@ -179,10 +184,9 @@ class Exceptions { static void wrap_dynamic_exception(bool is_indy, JavaThread* thread); - // Exception counting for error files of interesting exceptions that may have - // caused a problem for the jvm - static volatile int _stack_overflow_errors; - + // Exception counting of interesting exceptions that may have caused a + // problem for the JVM, for reporting in the hs_err file. + static void increment_stack_overflow_errors(); static bool has_exception_counts(); static void count_out_of_memory_exceptions(Handle exception); static void print_exception_counts_on_error(outputStream* st); diff --git a/src/hotspot/share/utilities/filterQueue.hpp b/src/hotspot/share/utilities/filterQueue.hpp index 141c40f09c8..ea47f07b7b8 100644 --- a/src/hotspot/share/utilities/filterQueue.hpp +++ b/src/hotspot/share/utilities/filterQueue.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,7 +26,7 @@ #define SHARE_UTILITIES_FILTERQUEUE_HPP #include "memory/allocation.hpp" -#include "runtime/atomicAccess.hpp" +#include "runtime/atomic.hpp" // The FilterQueue is FIFO with the ability to skip over queued items. // The skipping is controlled by using a filter when popping. @@ -42,9 +42,9 @@ class FilterQueue { E _data; }; - Node* _first; + Atomic _first; Node* load_first() { - return AtomicAccess::load_acquire(&_first); + return _first.load_acquire(); } static bool match_all(E d) { return true; } diff --git a/src/hotspot/share/utilities/filterQueue.inline.hpp b/src/hotspot/share/utilities/filterQueue.inline.hpp index 18b40b81c6c..7fa1bc94b7b 100644 --- a/src/hotspot/share/utilities/filterQueue.inline.hpp +++ b/src/hotspot/share/utilities/filterQueue.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,7 +37,7 @@ void FilterQueue::push(E data) { while (true){ head = load_first(); insnode->_next = head; - if (AtomicAccess::cmpxchg(&_first, head, insnode) == head) { + if (_first.compare_set(head, insnode)) { break; } yield.wait(); @@ -91,7 +91,7 @@ E FilterQueue::pop(MATCH_FUNC& match_func) { if (match_prev == nullptr) { // Working on first - if (AtomicAccess::cmpxchg(&_first, match, match->_next) == match) { + if (_first.compare_set(match, match->_next)) { E ret = match->_data; delete match; return ret; diff --git a/src/hotspot/share/utilities/globalCounter.inline.hpp b/src/hotspot/share/utilities/globalCounter.inline.hpp index ed37b8a878d..0d05096716a 100644 --- a/src/hotspot/share/utilities/globalCounter.inline.hpp +++ b/src/hotspot/share/utilities/globalCounter.inline.hpp @@ -29,6 +29,7 @@ #include "runtime/atomic.hpp" #include "runtime/javaThread.hpp" +#include "runtime/safepointVerifiers.hpp" inline GlobalCounter::CSContext GlobalCounter::critical_section_begin(Thread *thread) { @@ -53,11 +54,13 @@ GlobalCounter::critical_section_end(Thread *thread, CSContext context) { } class GlobalCounter::CriticalSection { - private: + NoSafepointVerifier _nsv; Thread* _thread; CSContext _context; - public: + +public: inline CriticalSection(Thread* thread) : + _nsv(), _thread(thread), _context(GlobalCounter::critical_section_begin(_thread)) {} diff --git a/src/hotspot/share/utilities/globalDefinitions.hpp b/src/hotspot/share/utilities/globalDefinitions.hpp index 9560d863a2c..40691de518e 100644 --- a/src/hotspot/share/utilities/globalDefinitions.hpp +++ b/src/hotspot/share/utilities/globalDefinitions.hpp @@ -1072,18 +1072,26 @@ const intptr_t NoBits = 0; // no bits set in a word const jlong NoLongBits = 0; // no bits set in a long const intptr_t OneBit = 1; // only right_most bit set in a word -// get a word with the n.th or the right-most or left-most n bits set -// (note: #define used only so that they can be used in enum constant definitions) -#define nth_bit(n) (((n) >= BitsPerWord) ? 0 : (OneBit << (n))) -#define right_n_bits(n) (nth_bit(n) - 1) - -// same as nth_bit(n), but allows handing in a type as template parameter. Allows -// us to use nth_bit with 64-bit types on 32-bit platforms -template inline T nth_bit_typed(int n) { - return ((T)1) << n; +// Return a value of type T with the n.th bit set and all other bits zero. +// T must be an integral or enum type. n must be non-negative. If n is at +// least the bitwise size of T then all bits in the result are zero. +template +constexpr T nth_bit(int n) { + assert(n >= 0, "n must be non-negative"); + using U = std::make_unsigned_t; + constexpr size_t size = sizeof(U) * BitsPerByte; + return T((size_t(n) >= size) ? U(0) : (U(1) << n)); } -template inline T right_n_bits_typed(int n) { - return nth_bit_typed(n) - 1; + +// Return a value of type T with all bits below the n.th bit set and all +// other bits zero. T must be an integral or enum type. n must be +// non-negative. If n is at least the bitwise size of T then all bits in +// the result are set. +template +constexpr T right_n_bits(int n) { + assert(n >= 0, "n must be non-negative"); + using U = std::make_unsigned_t; + return T(nth_bit(n) - 1); } // bit-operations using a mask m diff --git a/src/hotspot/share/utilities/globalDefinitions_visCPP.hpp b/src/hotspot/share/utilities/globalDefinitions_visCPP.hpp index dfd6f2f1880..f106d325c68 100644 --- a/src/hotspot/share/utilities/globalDefinitions_visCPP.hpp +++ b/src/hotspot/share/utilities/globalDefinitions_visCPP.hpp @@ -69,7 +69,9 @@ inline int strncasecmp(const char *s1, const char *s2, size_t n) { // *not* the same as the C99 Annex K strtok_s. VS provides that function // under the name strtok_s_l. Make strtok_r a synonym so we can use that name // in shared code. -const auto strtok_r = strtok_s; +inline char* strtok_r(char* str, const char* delim, char** saveptr) { + return strtok_s(str, delim, saveptr); +} // VS doesn't provide POSIX macros S_ISFIFO or S_IFIFO. It doesn't even // provide _S_ISFIFO, per its usual naming convention for POSIX stuff. But it diff --git a/src/hotspot/share/utilities/growableArray.cpp b/src/hotspot/share/utilities/growableArray.cpp index 6a1cb0b0414..9cc0813a1f6 100644 --- a/src/hotspot/share/utilities/growableArray.cpp +++ b/src/hotspot/share/utilities/growableArray.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,6 +22,7 @@ * */ +#include "cds/aotMetaspace.hpp" #include "memory/allocation.inline.hpp" #include "memory/resourceArea.hpp" #include "runtime/javaThread.hpp" @@ -56,7 +57,9 @@ void* GrowableArrayCHeapAllocator::allocate(int max, int element_size, MemTag me } void GrowableArrayCHeapAllocator::deallocate(void* elements) { - FreeHeap(elements); + if (!AOTMetaspace::in_aot_cache(elements)) { + FreeHeap(elements); + } } #ifdef ASSERT diff --git a/src/hotspot/share/utilities/growableArray.hpp b/src/hotspot/share/utilities/growableArray.hpp index e300bea6993..14b54cfc4ea 100644 --- a/src/hotspot/share/utilities/growableArray.hpp +++ b/src/hotspot/share/utilities/growableArray.hpp @@ -116,12 +116,6 @@ protected: ~GrowableArrayView() {} -protected: - // Used by AOTGrowableArray for MetaspaceClosure support. - E** data_addr() { - return &_data; - } - public: bool operator==(const GrowableArrayView& rhs) const { if (_len != rhs._len) @@ -303,6 +297,11 @@ public: } tty->print("}\n"); } + + // MetaspaceClosure support + E** data_addr() { + return &_data; + } }; template @@ -821,6 +820,8 @@ public: this->clear_and_deallocate(); } } + + void assert_on_C_heap() { assert(on_C_heap(), "must be on C heap"); } }; // Leaner GrowableArray for CHeap backed data arrays, with compile-time decided MemTag. diff --git a/src/hotspot/share/utilities/integerCast.hpp b/src/hotspot/share/utilities/integerCast.hpp new file mode 100644 index 00000000000..0715cab18d5 --- /dev/null +++ b/src/hotspot/share/utilities/integerCast.hpp @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_UTILITIES_INTEGERCAST_HPP +#define SHARE_UTILITIES_INTEGERCAST_HPP + +#include "cppstdlib/limits.hpp" +#include "cppstdlib/type_traits.hpp" +#include "metaprogramming/enableIf.hpp" +#include "utilities/debug.hpp" +#include "utilities/macros.hpp" + +#include + +// Tests whether all values for the From type are within the range of values +// for the To Type. From and To must be integral types. This is used by +// integer_cast to test for tautological conversions. +template), + ENABLE_IF(std::is_integral_v)> +constexpr bool is_always_integer_convertible() { + if constexpr (std::is_signed_v == std::is_signed_v) { + // signed => signed or unsigned => unsigned. + return sizeof(To) >= sizeof(From); + } else if constexpr (std::is_signed_v) { + // signed => unsigned is never tautological, because of negative values. + return false; + } else { + // unsigned => signed. + return sizeof(To) > sizeof(From); + } +} + +// Tests whether the value of from is within the range of values for the To +// type. To and From must be integral types. This is used by integer_cast +// to test whether the conversion should be performed. +template), + ENABLE_IF(std::is_integral_v)> +constexpr bool is_integer_convertible(From from) { + if constexpr (is_always_integer_convertible()) { + // This clause simplifies direct calls and the implementation below. It + // isn't needed by integer_cast, where a tautological call is discarded. + return true; + } else if constexpr (std::is_unsigned_v) { + // unsigned => signed or unsigned => unsigned. + // Convert To::max to corresponding unsigned for compare. + using U = std::make_unsigned_t; + return from <= static_cast(std::numeric_limits::max()); + } else if constexpr (std::is_signed_v) { + // signed => signed. + return ((std::numeric_limits::min() <= from) && + (from <= std::numeric_limits::max())); + } else { + // signed => unsigned. Convert from to corresponding unsigned for compare. + using U = std::make_unsigned_t; + return (0 <= from) && (static_cast(from) <= std::numeric_limits::max()); + } +} + +// Convert the from value to the To type, after a debug-only check that the +// value of from is within the range of values for the To type. To and From +// must be integral types. +// +// permit_tautology determines the behavior when a conversion will always +// succeed because the range of values for the From type is enclosed by the +// range of values for the To type (is_always_integer_convertible() +// is true). If true, the conversion will be performed as requested. If +// false, a compile-time error is produced. The default is false for 64bit +// platforms, true for 32bit platforms. See integer_cast_permit_tautology as +// the preferred way to override the default and always provide a true value. +// +// Unnecessary integer_casts make code harder to understand. Hence the +// compile-time failure for tautological conversions, to alert that a code +// change is making a integer_cast unnecessary. This can be suppressed on a +// per-call basis, because there are cases where a conversion might only +// sometimes be tautological. For example, the types involved may vary by +// platform. Another case is if the operation is in a template with dependent +// types, with the operation only being tautological for some instantiations. +// Suppressing the tautology check is an alternative to possibly complex +// metaprogramming to only perform the integer_cast when necessary. +// +// Despite that, for 32bit platforms the default is to not reject unnecessary +// integer_casts. This is because 64bit platforms are the primary target, and +// are likely to require conversions in some places. However, some of those +// conversions will be tautological on 32bit platforms, such as size_t => uint. +template), + ENABLE_IF(std::is_integral_v)> +constexpr To integer_cast(From from) { + if constexpr (is_always_integer_convertible()) { + static_assert(permit_tautology, "tautological integer_cast"); + } else { +#ifdef ASSERT + if (!is_integer_convertible(from)) { + if constexpr (std::is_signed_v) { + fatal("integer_cast failed: %jd", static_cast(from)); + } else { + fatal("integer_cast failed: %ju", static_cast(from)); + } + } +#endif // ASSERT + } + return static_cast(from); +} + +// Equivalent to "integer_cast(from)", disabling the compile-time +// check for tautological casts. Using this function is prefered to direct +// use of the permit_tautology template parameter for integer_cast, unless the +// choice is computed. +template), + ENABLE_IF(std::is_integral_v)> +constexpr To integer_cast_permit_tautology(From from) { + return integer_cast(from); +} + +// Convert an enumerator to an integral value via static_cast, after a +// debug-only check that the value is within the range for the destination +// type. This is mostly for compatibility with old code. Class scoped enums +// were used to work around ancient compilers that didn't implement class +// scoped static integral constants properly, and HotSpot code still has many +// examples of this. For others it might be sufficient to provide an explicit +// underlying type and either permit implicit conversions or use +// PrimitiveConversion::cast. +template), + ENABLE_IF(std::is_enum_v)> +constexpr To integer_cast(From from) { + using U = std::underlying_type_t; + return integer_cast(static_cast(from)); +} + +#endif // SHARE_UTILITIES_INTEGERCAST_HPP diff --git a/src/hotspot/share/utilities/intn_t.hpp b/src/hotspot/share/utilities/intn_t.hpp index 594e62a1694..1b8e7de652e 100644 --- a/src/hotspot/share/utilities/intn_t.hpp +++ b/src/hotspot/share/utilities/intn_t.hpp @@ -84,6 +84,7 @@ public: constexpr bool operator>(intn_t o) const { return int(*this) > int(o); } constexpr bool operator<=(intn_t o) const { return int(*this) <= int(o); } constexpr bool operator>=(intn_t o) const { return int(*this) >= int(o); } + constexpr intn_t operator>>(unsigned int s) const { return intn_t(int(*this) >> s); } }; template @@ -163,4 +164,35 @@ inline unsigned count_leading_zeros(uintn_t v) { return count_leading_zeros(v._v & uintn_t::_mask) - (32 - nbits); } +class HotSpotNumerics { +private: + template + static constexpr int type_width_impl(T value) { + // Count the number of 1s in `value`. We can't use population_count() from + // utilities/population_count.hpp, since it requires `std::is_integral`, which + // fails for `uintn_t`. Since this is a constexpr function, this function + // does not impose a runtime performance overhead. + return value == T(0) ? 0 : 1 + type_width_impl(value >> 1); + } + +public: + // Returns true if T is a signed type. We can't rely on std::is_signed + // because it returns false for intn_t, which is not a standard integral + // type. Instead, we check whether T(-1) is less than T(0). + template + static constexpr bool is_signed() { + return T(-1) < T(0); + } + + // Returns the bit width of the unsigned type T. We can't use sizeof() on T, + // since sizeof(uintn_t) returns the size of the underlying storage rather + // than the logical type width. So we instead compute the number of 1s in the + // maximum value. + template + static constexpr int type_width() { + static_assert(!is_signed(), "type_width requires an unsigned type"); + return type_width_impl(std::numeric_limits::max()); + } +}; + #endif // SHARE_UTILITIES_INTN_T_HPP diff --git a/src/hotspot/share/utilities/macros.hpp b/src/hotspot/share/utilities/macros.hpp index a03255b5cf3..3621f675ecb 100644 --- a/src/hotspot/share/utilities/macros.hpp +++ b/src/hotspot/share/utilities/macros.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -586,6 +586,18 @@ #define BIG_ENDIAN_ONLY(code) code #endif +#ifdef _LP64 +#define INCLUDE_CLASS_SPACE 1 +#define CLASS_SPACE_ONLY(x) x +#define NOT_CLASS_SPACE(x) +#else +// On 32-bit we use fake "narrow class pointers" which are really just 32-bit pointers, +// but we don't use a class space (would cause too much address space fragmentation) +#define INCLUDE_CLASS_SPACE 0 +#define CLASS_SPACE_ONLY(x) +#define NOT_CLASS_SPACE(x) x +#endif + #define define_pd_global(type, name, value) const type pd_##name = value; // Helper macros for constructing file names for includes. diff --git a/src/hotspot/share/utilities/ostream.cpp b/src/hotspot/share/utilities/ostream.cpp index 5e339a700cb..ded233d48bf 100644 --- a/src/hotspot/share/utilities/ostream.cpp +++ b/src/hotspot/share/utilities/ostream.cpp @@ -611,15 +611,15 @@ void fileStream::write(const char* s, size_t len) { } } -long fileStream::fileSize() { - long size = -1; +int64_t fileStream::fileSize() { + int64_t size = -1; if (_file != nullptr) { - long pos = ::ftell(_file); + int64_t pos = os::ftell(_file); if (pos < 0) return pos; - if (::fseek(_file, 0, SEEK_END) == 0) { - size = ::ftell(_file); + if (os::fseek(_file, 0, SEEK_END) == 0) { + size = os::ftell(_file); } - ::fseek(_file, pos, SEEK_SET); + os::fseek(_file, pos, SEEK_SET); } return size; } diff --git a/src/hotspot/share/utilities/ostream.hpp b/src/hotspot/share/utilities/ostream.hpp index e971ac4d125..c3a4026f4d9 100644 --- a/src/hotspot/share/utilities/ostream.hpp +++ b/src/hotspot/share/utilities/ostream.hpp @@ -312,7 +312,7 @@ class fileStream : public outputStream { fclose(_file); _need_close = false; } - long fileSize(); + int64_t fileSize(); void flush(); }; diff --git a/src/hotspot/share/utilities/rbTree.hpp b/src/hotspot/share/utilities/rbTree.hpp index c522d787466..9c04ccbe9ab 100644 --- a/src/hotspot/share/utilities/rbTree.hpp +++ b/src/hotspot/share/utilities/rbTree.hpp @@ -429,7 +429,12 @@ public: void free(void* ptr); }; - +template +RBTreeOrdering rbtree_primitive_cmp(T a, T b) { // handy function + if (a < b) return RBTreeOrdering::LT; + if (a > b) return RBTreeOrdering::GT; + return RBTreeOrdering::EQ; +} template using RBTreeCHeap = RBTree>; diff --git a/src/hotspot/share/utilities/singleWriterSynchronizer.hpp b/src/hotspot/share/utilities/singleWriterSynchronizer.hpp index 450c7e89233..c21c9d4ee5e 100644 --- a/src/hotspot/share/utilities/singleWriterSynchronizer.hpp +++ b/src/hotspot/share/utilities/singleWriterSynchronizer.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ #include "memory/allocation.hpp" #include "runtime/atomic.hpp" +#include "runtime/safepointVerifiers.hpp" #include "runtime/semaphore.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/macros.hpp" @@ -101,12 +102,14 @@ inline void SingleWriterSynchronizer::exit(uint enter_value) { } class SingleWriterSynchronizer::CriticalSection : public StackObj { + NoSafepointVerifier _nsv; SingleWriterSynchronizer* _synchronizer; uint _enter_value; public: // Enter synchronizer's critical section. explicit CriticalSection(SingleWriterSynchronizer* synchronizer) : + _nsv(), _synchronizer(synchronizer), _enter_value(synchronizer->enter()) {} diff --git a/src/hotspot/share/utilities/stringUtils.hpp b/src/hotspot/share/utilities/stringUtils.hpp index c3d21233808..66c8d30c7c0 100644 --- a/src/hotspot/share/utilities/stringUtils.hpp +++ b/src/hotspot/share/utilities/stringUtils.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,11 +26,7 @@ #define SHARE_UTILITIES_STRINGUTILS_HPP #include "memory/allStatic.hpp" - -#ifdef _WINDOWS - // strtok_s is the Windows thread-safe equivalent of POSIX strtok_r -# define strtok_r strtok_s -#endif +#include "utilities/globalDefinitions.hpp" class StringUtils : AllStatic { public: diff --git a/src/hotspot/share/utilities/tableStatistics.cpp b/src/hotspot/share/utilities/tableStatistics.cpp index 331652becd5..34d0969969a 100644 --- a/src/hotspot/share/utilities/tableStatistics.cpp +++ b/src/hotspot/share/utilities/tableStatistics.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +22,6 @@ * */ -#include "runtime/atomicAccess.hpp" #include "runtime/os.hpp" #include "utilities/debug.hpp" #include "utilities/macros.hpp" @@ -42,7 +41,7 @@ TableRateStatistics::~TableRateStatistics() { }; void TableRateStatistics::add() { #if INCLUDE_JFR if (Jfr::is_recording()) { - AtomicAccess::inc(&_added_items); + _added_items.add_then_fetch(1u); } #endif } @@ -50,7 +49,7 @@ void TableRateStatistics::add() { void TableRateStatistics::remove() { #if INCLUDE_JFR if (Jfr::is_recording()) { - AtomicAccess::inc(&_removed_items); + _removed_items.add_then_fetch(1u); } #endif } @@ -61,8 +60,8 @@ void TableRateStatistics::stamp() { _added_items_stamp_prev = _added_items_stamp; _removed_items_stamp_prev = _removed_items_stamp; - _added_items_stamp = _added_items; - _removed_items_stamp = _removed_items; + _added_items_stamp = _added_items.load_relaxed(); + _removed_items_stamp = _removed_items.load_relaxed(); if (_time_stamp == 0) { _time_stamp = now - 1000000000; diff --git a/src/hotspot/share/utilities/tableStatistics.hpp b/src/hotspot/share/utilities/tableStatistics.hpp index d4fd3302922..95856114833 100644 --- a/src/hotspot/share/utilities/tableStatistics.hpp +++ b/src/hotspot/share/utilities/tableStatistics.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +26,7 @@ #define SHARE_UTILITIES_TABLE_STATISTICS_HPP #include "memory/allocation.hpp" +#include "runtime/atomic.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/numberSeq.hpp" @@ -35,8 +36,8 @@ class TableRateStatistics : public CHeapObj { friend class TableStatistics; private: - volatile size_t _added_items; - volatile size_t _removed_items; + Atomic _added_items; + Atomic _removed_items; jlong _time_stamp; double _seconds_stamp; diff --git a/src/hotspot/share/utilities/vmError.cpp b/src/hotspot/share/utilities/vmError.cpp index 6088eafbda4..1cecdc0cb33 100644 --- a/src/hotspot/share/utilities/vmError.cpp +++ b/src/hotspot/share/utilities/vmError.cpp @@ -43,7 +43,6 @@ #include "oops/compressedOops.hpp" #include "prims/whitebox.hpp" #include "runtime/arguments.hpp" -#include "runtime/atomicAccess.hpp" #include "runtime/flags/jvmFlag.hpp" #include "runtime/frame.inline.hpp" #include "runtime/init.hpp" @@ -86,12 +85,12 @@ bool VMError::coredump_status; char VMError::coredump_message[O_BUFLEN]; int VMError::_current_step; const char* VMError::_current_step_info; -volatile jlong VMError::_reporting_start_time = -1; -volatile bool VMError::_reporting_did_timeout = false; -volatile jlong VMError::_step_start_time = -1; -volatile bool VMError::_step_did_timeout = false; -volatile bool VMError::_step_did_succeed = false; -volatile intptr_t VMError::_first_error_tid = -1; +Atomic VMError::_reporting_start_time{-1}; +Atomic VMError::_reporting_did_timeout{false}; +Atomic VMError::_step_start_time{-1}; +Atomic VMError::_step_did_timeout{false}; +Atomic VMError::_step_did_succeed{false}; +Atomic VMError::_first_error_tid{-1}; int VMError::_id; const char* VMError::_message; char VMError::_detail_msg[1024]; @@ -105,8 +104,8 @@ int VMError::_lineno; size_t VMError::_size; const size_t VMError::_reattempt_required_stack_headroom = 64 * K; const intptr_t VMError::segfault_address = pd_segfault_address; -Thread* volatile VMError::_handshake_timed_out_thread = nullptr; -Thread* volatile VMError::_safepoint_timed_out_thread = nullptr; +Atomic VMError::_handshake_timed_out_thread{}; +Atomic VMError::_safepoint_timed_out_thread{}; // List of environment variables that should be reported in error log file. static const char* env_list[] = { @@ -248,7 +247,7 @@ bool VMError::can_reattempt_step(const char* &stop_reason) { return false; } - if (_step_did_timeout) { + if (_step_did_timeout.load_relaxed()) { stop_reason = "Step time limit reached"; return false; } @@ -534,8 +533,7 @@ static void report_vm_version(outputStream* st, char* buf, int buflen) { "", "", #endif UseCompressedOops ? ", compressed oops" : "", - UseCompactObjectHeaders ? ", compact obj headers" - : (UseCompressedClassPointers ? ", compressed class ptrs" : ""), + UseCompactObjectHeaders ? ", compact obj headers" : "", GCConfig::hs_err_name(), VM_Version::vm_platform_string() ); @@ -543,12 +541,12 @@ static void report_vm_version(outputStream* st, char* buf, int buflen) { // Returns true if at least one thread reported a fatal error and fatal error handling is in process. bool VMError::is_error_reported() { - return _first_error_tid != -1; + return _first_error_tid.load_relaxed() != -1; } // Returns true if the current thread reported a fatal error. bool VMError::is_error_reported_in_current_thread() { - return _first_error_tid == os::current_thread_id(); + return _first_error_tid.load_relaxed() == os::current_thread_id(); } // Helper, return current timestamp for timeout handling. @@ -560,24 +558,24 @@ jlong VMError::get_current_timestamp() { void VMError::record_reporting_start_time() { const jlong now = get_current_timestamp(); - AtomicAccess::store(&_reporting_start_time, now); + _reporting_start_time.store_relaxed(now); } jlong VMError::get_reporting_start_time() { - return AtomicAccess::load(&_reporting_start_time); + return _reporting_start_time.load_relaxed(); } void VMError::record_step_start_time() { const jlong now = get_current_timestamp(); - AtomicAccess::store(&_step_start_time, now); + _step_start_time.store_relaxed(now); } jlong VMError::get_step_start_time() { - return AtomicAccess::load(&_step_start_time); + return _step_start_time.load_relaxed(); } void VMError::clear_step_start_time() { - return AtomicAccess::store(&_step_start_time, (jlong)0); + return _step_start_time.store_relaxed(0); } // This is the main function to report a fatal error. Only one thread can @@ -612,31 +610,31 @@ void VMError::report(outputStream* st, bool _verbose) { const char* stop_reattempt_reason = nullptr; # define BEGIN \ if (_current_step == 0) { \ - _step_did_succeed = false; \ + _step_did_succeed.store_relaxed(false); \ _current_step = __LINE__; \ { // [Begin logic] # define STEP_IF(s, cond) \ } \ - _step_did_succeed = true; \ + _step_did_succeed.store_relaxed(true); \ } \ if (_current_step < __LINE__) { \ - _step_did_succeed = false; \ + _step_did_succeed.store_relaxed(false); \ _current_step = __LINE__; \ _current_step_info = s; \ if ((cond)) { \ record_step_start_time(); \ - _step_did_timeout = false; + _step_did_timeout.store_relaxed(false); // [Step logic] # define STEP(s) STEP_IF(s, true) # define REATTEMPT_STEP_IF(s, cond) \ } \ - _step_did_succeed = true; \ + _step_did_succeed.store_relaxed(true); \ } \ - if (_current_step < __LINE__ && !_step_did_succeed) { \ + if (_current_step < __LINE__ && !_step_did_succeed.load_relaxed()) { \ _current_step = __LINE__; \ _current_step_info = s; \ const bool cond_value = (cond); \ @@ -650,7 +648,7 @@ void VMError::report(outputStream* st, bool _verbose) { # define END \ } \ - _step_did_succeed = true; \ + _step_did_succeed.store_relaxed(true); \ clear_step_start_time(); \ } @@ -1216,7 +1214,7 @@ void VMError::report(outputStream* st, bool _verbose) { CompressedOops::print_mode(st); st->cr(); - STEP_IF("printing compressed klass pointers mode", _verbose && UseCompressedClassPointers) + STEP_IF("printing compressed klass pointers mode", _verbose) CDS_ONLY(AOTMetaspace::print_on(st);) Metaspace::print_compressed_class_space(st); CompressedKlassPointers::print_mode(st); @@ -1329,7 +1327,14 @@ void VMError::report(outputStream* st, bool _verbose) { STEP_IF("printing OS information", _verbose) os::print_os_info(st); +#ifdef __APPLE__ + // Avoid large stack allocation on Mac for FD count during signal-handling. + os::Bsd::print_open_file_descriptors(st, buf, sizeof(buf)); st->cr(); +#else + os::print_open_file_descriptors(st); + st->cr(); +#endif STEP_IF("printing CPU info", _verbose) os::print_cpu_info(st, buf, sizeof(buf)); @@ -1359,21 +1364,21 @@ void VMError::report(outputStream* st, bool _verbose) { void VMError::set_handshake_timed_out_thread(Thread* thread) { // Only preserve the first thread to time-out this way. The atomic operation ensures // visibility to the target thread. - AtomicAccess::replace_if_null(&_handshake_timed_out_thread, thread); + _handshake_timed_out_thread.compare_exchange(nullptr, thread); } void VMError::set_safepoint_timed_out_thread(Thread* thread) { // Only preserve the first thread to time-out this way. The atomic operation ensures // visibility to the target thread. - AtomicAccess::replace_if_null(&_safepoint_timed_out_thread, thread); + _safepoint_timed_out_thread.compare_exchange(nullptr, thread); } Thread* VMError::get_handshake_timed_out_thread() { - return AtomicAccess::load(&_handshake_timed_out_thread); + return _handshake_timed_out_thread.load_relaxed(); } Thread* VMError::get_safepoint_timed_out_thread() { - return AtomicAccess::load(&_safepoint_timed_out_thread); + return _safepoint_timed_out_thread.load_relaxed(); } // Report for the vm_info_cmd. This prints out the information above omitting @@ -1431,12 +1436,10 @@ void VMError::print_vm_info(outputStream* st) { #endif // STEP("printing compressed class ptrs mode") - if (UseCompressedClassPointers) { - CDS_ONLY(AOTMetaspace::print_on(st);) - Metaspace::print_compressed_class_space(st); - CompressedKlassPointers::print_mode(st); - st->cr(); - } + CDS_ONLY(AOTMetaspace::print_on(st);) + Metaspace::print_compressed_class_space(st); + CompressedKlassPointers::print_mode(st); + st->cr(); // Take heap lock over heap, GC and metaspace printing so that information // is consistent. @@ -1550,6 +1553,7 @@ void VMError::print_vm_info(outputStream* st) { // STEP("printing OS information") os::print_os_info(st); + os::print_open_file_descriptors(st); st->cr(); // STEP("printing CPU info") @@ -1708,8 +1712,7 @@ void VMError::report_and_die(int id, const char* message, const char* detail_fmt static bool log_done = false; // done saving error log intptr_t mytid = os::current_thread_id(); - if (_first_error_tid == -1 && - AtomicAccess::cmpxchg(&_first_error_tid, (intptr_t)-1, mytid) == -1) { + if (_first_error_tid.compare_set(-1, mytid)) { if (SuppressFatalErrorMessage) { os::abort(CreateCoredumpOnCrash); @@ -1756,7 +1759,7 @@ void VMError::report_and_die(int id, const char* message, const char* detail_fmt } else { // This is not the first error, see if it happened in a different thread // or in the same thread during error reporting. - if (_first_error_tid != mytid) { + if (_first_error_tid.load_relaxed() != mytid) { if (!SuppressFatalErrorMessage) { char msgbuf[64]; jio_snprintf(msgbuf, sizeof(msgbuf), @@ -1788,19 +1791,19 @@ void VMError::report_and_die(int id, const char* message, const char* detail_fmt st->cr(); // Timeout handling. - if (_step_did_timeout) { + if (_step_did_timeout.load_relaxed()) { // The current step had a timeout. Lets continue reporting with the next step. st->print_raw("[timeout occurred during error reporting in step \""); st->print_raw(_current_step_info); st->print_cr("\"] after " INT64_FORMAT " s.", (int64_t) - ((get_current_timestamp() - _step_start_time) / TIMESTAMP_TO_SECONDS_FACTOR)); - } else if (_reporting_did_timeout) { + ((get_current_timestamp() - get_step_start_time()) / TIMESTAMP_TO_SECONDS_FACTOR)); + } else if (_reporting_did_timeout.load_relaxed()) { // We hit ErrorLogTimeout. Reporting will stop altogether. Let's wrap things // up, the process is about to be stopped by the WatcherThread. st->print_cr("------ Timeout during error reporting after " INT64_FORMAT " s. ------", (int64_t) - ((get_current_timestamp() - _reporting_start_time) / TIMESTAMP_TO_SECONDS_FACTOR)); + ((get_current_timestamp() - get_reporting_start_time()) / TIMESTAMP_TO_SECONDS_FACTOR)); st->flush(); // Watcherthread is about to call os::die. Lets just wait. os::infinite_sleep(); @@ -2100,10 +2103,10 @@ bool VMError::check_timeout() { // Timestamp is stored in nanos. if (reporting_start_time > 0) { const jlong end = reporting_start_time + (jlong)ErrorLogTimeout * TIMESTAMP_TO_SECONDS_FACTOR; - if (end <= now && !_reporting_did_timeout) { + if (end <= now && !_reporting_did_timeout.load_relaxed()) { // We hit ErrorLogTimeout and we haven't interrupted the reporting // thread yet. - _reporting_did_timeout = true; + _reporting_did_timeout.store_relaxed(true); interrupt_reporting_thread(); return true; // global timeout } @@ -2119,10 +2122,10 @@ bool VMError::check_timeout() { const int max_step_timeout_secs = 5; const jlong timeout_duration = MAX2((jlong)max_step_timeout_secs, (jlong)ErrorLogTimeout * TIMESTAMP_TO_SECONDS_FACTOR / 4); const jlong end = step_start_time + timeout_duration; - if (end <= now && !_step_did_timeout) { + if (end <= now && !_step_did_timeout.load_relaxed()) { // The step timed out and we haven't interrupted the reporting // thread yet. - _step_did_timeout = true; + _step_did_timeout.store_relaxed(true); interrupt_reporting_thread(); return false; // (Not a global timeout) } diff --git a/src/hotspot/share/utilities/vmError.hpp b/src/hotspot/share/utilities/vmError.hpp index 04cea6de47c..b46ba208788 100644 --- a/src/hotspot/share/utilities/vmError.hpp +++ b/src/hotspot/share/utilities/vmError.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2017, 2022 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -27,6 +27,7 @@ #define SHARE_UTILITIES_VMERROR_HPP #include "memory/allStatic.hpp" +#include "runtime/atomic.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/ostream.hpp" @@ -73,7 +74,7 @@ class VMError : public AllStatic { // Thread id of the first error. We must be able to handle native thread, // so use thread id instead of Thread* to identify thread. - static volatile intptr_t _first_error_tid; + static Atomic _first_error_tid; // Core dump status, false if we have been unable to write a core/minidump for some reason static bool coredump_status; @@ -85,16 +86,16 @@ class VMError : public AllStatic { // Timeout handling: // Timestamp at which error reporting started; -1 if no error reporting in progress. - static volatile jlong _reporting_start_time; + static Atomic _reporting_start_time; // Whether or not error reporting did timeout. - static volatile bool _reporting_did_timeout; + static Atomic _reporting_did_timeout; // Timestamp at which the last error reporting step started; -1 if no error reporting // in progress. - static volatile jlong _step_start_time; + static Atomic _step_start_time; // Whether or not the last error reporting step did timeout. - static volatile bool _step_did_timeout; + static Atomic _step_did_timeout; // Whether or not the last error reporting step did succeed. - static volatile bool _step_did_succeed; + static Atomic _step_did_succeed; // Install secondary signal handler to handle secondary faults during error reporting // (see VMError::crash_handler) @@ -143,8 +144,8 @@ class VMError : public AllStatic { static void clear_step_start_time(); // Handshake/safepoint timed out threads - static Thread* volatile _handshake_timed_out_thread; - static Thread* volatile _safepoint_timed_out_thread; + static Atomic _handshake_timed_out_thread; + static Atomic _safepoint_timed_out_thread; WINDOWS_ONLY([[noreturn]] static void raise_fail_fast(const void* exrecord, const void* context);) diff --git a/src/hotspot/share/utilities/zipLibrary.cpp b/src/hotspot/share/utilities/zipLibrary.cpp index 54875516a0f..ad6bb82d43f 100644 --- a/src/hotspot/share/utilities/zipLibrary.cpp +++ b/src/hotspot/share/utilities/zipLibrary.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ #include "jvm_io.h" #include "runtime/arguments.hpp" +#include "runtime/atomic.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/os.inline.hpp" #include "runtime/semaphore.inline.hpp" @@ -50,10 +51,10 @@ static ZIP_GZip_InitParams_t ZIP_GZip_InitParams = nullptr; static ZIP_GZip_Fully_t ZIP_GZip_Fully = nullptr; static void* _zip_handle = nullptr; -static bool _loaded = false; +static Atomic _loaded{false}; static inline bool is_loaded() { - return AtomicAccess::load_acquire(&_loaded); + return _loaded.load_acquire(); } static inline bool not_loaded() { @@ -111,7 +112,7 @@ static void load_zip_library(bool vm_exit_on_failure) { } store_function_pointers(&path[0], vm_exit_on_failure); - AtomicAccess::release_store(&_loaded, true); + _loaded.release_store(true); assert(is_loaded(), "invariant"); } diff --git a/src/java.desktop/aix/native/libawt/porting_aix.h b/src/java.base/aix/native/include/dl_info.h similarity index 75% rename from src/java.desktop/aix/native/libawt/porting_aix.h rename to src/java.base/aix/native/include/dl_info.h index 719bbaf224e..0ea10149300 100644 --- a/src/java.desktop/aix/native/libawt/porting_aix.h +++ b/src/java.base/aix/native/include/dl_info.h @@ -1,5 +1,6 @@ /* - * Copyright (c) 2012, 2018 SAP SE. All rights reserved. + * Copyright (c) 1996, 2026, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2026 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -21,24 +22,15 @@ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. - * */ -/* - * Header file to contain porting-relevant code which does not have a - * home anywhere else. - * This is initially based on hotspot/src/os/aix/vm/{loadlib,porting}_aix.{hpp,cpp} - */ +#ifndef AIX_INCLUDE_DL_INFO_H +#define AIX_INCLUDE_DL_INFO_H -/* - * Aix' own version of dladdr(). - * This function tries to mimic dladdr(3) on Linux - * (see http://linux.die.net/man/3/dladdr) - * dladdr(3) is not POSIX but a GNU extension, and is not available on AIX. - * +/* struct for dladdr * Differences between AIX dladdr and Linux dladdr: * - * 1) Dl_info.dli_fbase: can never work, is disabled. + * 1) Dl_info.dli_fbase: can never work, is not included in our struct * A loaded image on AIX is divided in multiple segments, at least two * (text and data) but potentially also far more. This is because the loader may * load each member into an own segment, as for instance happens with the libC.a @@ -50,12 +42,8 @@ typedef struct { const char *dli_fname; /* file path of loaded library */ - void *dli_fbase; /* doesn't make sense on AIX */ const char *dli_sname; /* symbol name; "" if not known */ void *dli_saddr; /* address of *entry* of function; not function descriptor; */ } Dl_info; -#ifdef __cplusplus -extern "C" #endif -int dladdr(void *addr, Dl_info *info); diff --git a/src/java.base/aix/native/libjli/java_md_aix.h b/src/java.base/aix/native/libjli/java_md_aix.h index d319a1d6353..d63030fc65e 100644 --- a/src/java.base/aix/native/libjli/java_md_aix.h +++ b/src/java.base/aix/native/libjli/java_md_aix.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2018 SAP SE. All rights reserved. + * Copyright (c) 2016, 2026 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,12 +37,7 @@ * in the hotspot implementation which is not available at this place, though. */ -typedef struct { - const char *dli_fname; /* file path of loaded library */ - void *dli_fbase; /* unsupported */ - const char *dli_sname; /* unsupported */ - void *dli_saddr; /* unsupported */ -} Dl_info; +#include "dl_info.h" int dladdr(void *addr, Dl_info *info); diff --git a/src/java.base/linux/classes/sun/nio/fs/LinuxFileSystem.java b/src/java.base/linux/classes/sun/nio/fs/LinuxFileSystem.java index 44e957f54fb..ec3e135b8b1 100644 --- a/src/java.base/linux/classes/sun/nio/fs/LinuxFileSystem.java +++ b/src/java.base/linux/classes/sun/nio/fs/LinuxFileSystem.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -139,10 +139,7 @@ class LinuxFileSystem extends UnixFileSystem { int size, long addressToPollForCancel) throws UnixException { - int advice = POSIX_FADV_SEQUENTIAL | // sequential data access - POSIX_FADV_NOREUSE | // will access only once - POSIX_FADV_WILLNEED; // will access in near future - posix_fadvise(src, 0, 0, advice); + posix_fadvise(src, 0, 0, POSIX_FADV_SEQUENTIAL); super.bufferedCopy(dst, src, address, size, addressToPollForCancel); } @@ -151,10 +148,7 @@ class LinuxFileSystem extends UnixFileSystem { int directCopy(int dst, int src, long addressToPollForCancel) throws UnixException { - int advice = POSIX_FADV_SEQUENTIAL | // sequential data access - POSIX_FADV_NOREUSE | // will access only once - POSIX_FADV_WILLNEED; // will access in near future - posix_fadvise(src, 0, 0, advice); + posix_fadvise(src, 0, 0, POSIX_FADV_SEQUENTIAL); return directCopy0(dst, src, addressToPollForCancel); } diff --git a/src/java.base/share/classes/java/io/ObjectInputStream.java b/src/java.base/share/classes/java/io/ObjectInputStream.java index daed5f3cce5..cd6bd7683f7 100644 --- a/src/java.base/share/classes/java/io/ObjectInputStream.java +++ b/src/java.base/share/classes/java/io/ObjectInputStream.java @@ -2219,7 +2219,7 @@ public class ObjectInputStream * mechanism marks the record as having an exception. * Null is returned from readRecord and later the exception is thrown at * the exit of {@link #readObject(Class)}. - **/ + */ private Object readRecord(ObjectStreamClass desc) throws IOException { ObjectStreamClass.ClassDataSlot[] slots = desc.getClassDataLayout(); if (slots.length != 1) { diff --git a/src/java.base/share/classes/java/lang/Boolean.java b/src/java.base/share/classes/java/lang/Boolean.java index 4c24e98a549..49ab80edfea 100644 --- a/src/java.base/share/classes/java/lang/Boolean.java +++ b/src/java.base/share/classes/java/lang/Boolean.java @@ -28,14 +28,10 @@ package java.lang; import jdk.internal.vm.annotation.IntrinsicCandidate; import java.lang.constant.Constable; -import java.lang.constant.ConstantDesc; import java.lang.constant.ConstantDescs; import java.lang.constant.DynamicConstantDesc; import java.util.Optional; -import static java.lang.constant.ConstantDescs.BSM_GET_STATIC_FINAL; -import static java.lang.constant.ConstantDescs.CD_Boolean; - /** * The {@code Boolean} class is the {@linkplain * java.lang##wrapperClass wrapper class} for values of the primitive diff --git a/src/java.base/share/classes/java/lang/Byte.java b/src/java.base/share/classes/java/lang/Byte.java index 0f3f7f40d05..c2c03e7a3c2 100644 --- a/src/java.base/share/classes/java/lang/Byte.java +++ b/src/java.base/share/classes/java/lang/Byte.java @@ -36,7 +36,6 @@ import java.util.Optional; import static java.lang.constant.ConstantDescs.BSM_EXPLICIT_CAST; import static java.lang.constant.ConstantDescs.CD_byte; -import static java.lang.constant.ConstantDescs.CD_int; import static java.lang.constant.ConstantDescs.DEFAULT_NAME; /** diff --git a/src/java.base/share/classes/java/lang/Character.java b/src/java.base/share/classes/java/lang/Character.java index 33284d86e2d..158c3126ab3 100644 --- a/src/java.base/share/classes/java/lang/Character.java +++ b/src/java.base/share/classes/java/lang/Character.java @@ -67,9 +67,13 @@ import static java.lang.constant.ConstantDescs.DEFAULT_NAME; * Character information is based on the Unicode Standard, version 17.0. *

* The Java platform has supported different versions of the Unicode - * Standard over time. Upgrades to newer versions of the Unicode Standard - * occurred in the following Java releases, each indicating the new version: + * Standard over time. The following tables list the version of Unicode used + * in each Java release. Unless otherwise specified, all update releases in a + * given Java release family use the same Unicode version. * + * * * * @@ -78,26 +82,56 @@ import static java.lang.constant.ConstantDescs.DEFAULT_NAME; * * * + * + * + * + * + * + * + * + * + * + * + * + *
Shows Java releases and supported Unicode versions
Java release
Java SE 26Unicode 17.0
Java SE 25Unicode 16.0
Java SE 21Unicode 15.0
Java SE 17Unicode 13.0
Java SE 11Unicode 10.0
Java SE 8Unicode 6.2
+ *

+ * Show other Java releases + *

Java releases prior to Java SE 8 are listed only if they upgraded the + * Unicode version

+ * + * + * + * + * + * + * * * + * + * * * * * * * + * + * + * + * * * + * + * * * * * - * - * + * + * * * - * - * * * * @@ -110,6 +144,8 @@ import static java.lang.constant.ConstantDescs.DEFAULT_NAME; * * *
Shows other Java releases and supported Unicode + * versions
Java releaseUnicode version
Java SE 24Unicode 16.0
Java SE 23Unicode 15.1
Java SE 22Unicode 15.1
Java SE 20Unicode 15.0
Java SE 19Unicode 14.0
Java SE 18Unicode 13.0
Java SE 16Unicode 13.0
Java SE 15Unicode 13.0
Java SE 14Unicode 12.1
Java SE 13Unicode 12.1
Java SE 12Unicode 11.0
Java SE 11Unicode 10.0
Java SE 10Unicode 8.0
Java SE 9Unicode 8.0
Java SE 8Unicode 6.2
Java SE 7Unicode 6.0
Java SE 5.0Unicode 1.1.5
+ *
+ *

* Variations from these base Unicode versions, such as recognized appendixes, * are documented elsewhere. *

Unicode Character Representations

diff --git a/src/java.base/share/classes/java/lang/Class.java b/src/java.base/share/classes/java/lang/Class.java index eab1993a2b4..f15291827d5 100644 --- a/src/java.base/share/classes/java/lang/Class.java +++ b/src/java.base/share/classes/java/lang/Class.java @@ -224,9 +224,9 @@ public final class Class implements java.io.Serializable, AnnotatedElement, TypeDescriptor.OfField>, Constable { - private static final int ANNOTATION= 0x00002000; - private static final int ENUM = 0x00004000; - private static final int SYNTHETIC = 0x00001000; + private static final int ANNOTATION = 0x00002000; + private static final int ENUM = 0x00004000; + private static final int SYNTHETIC = 0x00001000; private static native void registerNatives(); static { @@ -1390,6 +1390,7 @@ public final class Class implements java.io.Serializable, // INNER_CLASS forbids. INNER_CLASS allows PRIVATE, PROTECTED, // and STATIC, which are not allowed on Location.CLASS. // Use getClassFileAccessFlags to expose SUPER status. + // Arrays need to use PRIVATE/PROTECTED from its component modifiers. var location = (isMemberClass() || isLocalClass() || isAnonymousClass() || isArray()) ? AccessFlag.Location.INNER_CLASS : @@ -3779,7 +3780,7 @@ public final class Class implements java.io.Serializable, * @since 1.8 */ public AnnotatedType[] getAnnotatedInterfaces() { - return TypeAnnotationParser.buildAnnotatedInterfaces(getRawTypeAnnotations(), getConstantPool(), this); + return TypeAnnotationParser.buildAnnotatedInterfaces(getRawTypeAnnotations(), getConstantPool(), this); } private native Class getNestHost0(); @@ -3840,7 +3841,7 @@ public final class Class implements java.io.Serializable, return false; } - return getNestHost() == c.getNestHost(); + return Reflection.areNestMates(this, c); } private native Class[] getNestMembers0(); diff --git a/src/java.base/share/classes/java/lang/Short.java b/src/java.base/share/classes/java/lang/Short.java index 920500a7fa3..14f7a267165 100644 --- a/src/java.base/share/classes/java/lang/Short.java +++ b/src/java.base/share/classes/java/lang/Short.java @@ -35,7 +35,6 @@ import java.lang.constant.DynamicConstantDesc; import java.util.Optional; import static java.lang.constant.ConstantDescs.BSM_EXPLICIT_CAST; -import static java.lang.constant.ConstantDescs.CD_int; import static java.lang.constant.ConstantDescs.CD_short; import static java.lang.constant.ConstantDescs.DEFAULT_NAME; diff --git a/src/java.base/share/classes/java/lang/String.java b/src/java.base/share/classes/java/lang/String.java index fc05febdb45..760f3ebc255 100644 --- a/src/java.base/share/classes/java/lang/String.java +++ b/src/java.base/share/classes/java/lang/String.java @@ -70,9 +70,9 @@ import sun.nio.cs.UTF_8; * string literals in Java programs, such as {@code "abc"}, are * implemented as instances of this class. *

- * Strings are constant; their values cannot be changed after they - * are created. String buffers support mutable strings. - * Because String objects are immutable they can be shared. For example: + * Strings are immutable; their values cannot be changed after they + * are created. Because String objects are immutable they can be shared. + * For example: *

  *     String str = "abc";
  * 

@@ -1133,6 +1133,34 @@ public final class String return Arrays.copyOf(dst, dp); } + // This follows the implementation of encodeASCII and encode8859_1 + private static int encodedLengthASCIIor8859_1(byte coder, byte[] val) { + if (coder == LATIN1) { + return val.length; + } + int len = val.length >> 1; + int dp = 0; + int sp = 0; + int sl = len; + while (sp < sl) { + char c = StringUTF16.getChar(val, sp); + if (c >= Character.MIN_HIGH_SURROGATE) { + break; + } + dp++; + sp++; + } + while (sp < sl) { + char c = StringUTF16.getChar(val, sp++); + if (Character.isHighSurrogate(c) && sp < sl && + Character.isLowSurrogate(StringUTF16.getChar(val, sp))) { + sp++; + } + dp++; + } + return dp; + } + //------------------------------ utf8 ------------------------------------ /** @@ -1467,6 +1495,27 @@ public final class String return Arrays.copyOf(dst, dp); } + // This follows the implementation of encodeUTF8 + private static int encodedLengthUTF8(byte coder, byte[] val) { + if (coder == UTF16) { + return encodedLengthUTF8_UTF16(val, null); + } + int positives = StringCoding.countPositives(val, 0, val.length); + if (positives == val.length) { + return positives; + } + int dp = positives; + for (int i = dp; i < val.length; i++) { + byte c = val[i]; + if (c < 0) { + dp += 2; + } else { + dp++; + } + } + return dp; + } + /** * {@return the byte array obtained by first decoding {@code val} with * UTF-16, and then encoding the result with UTF-8} @@ -1484,11 +1533,8 @@ public final class String int sl = val.length >> 1; // UTF-8 encoded can be as much as 3 times the string length // For very large estimate, (as in overflow of 32 bit int), precompute the exact size - long allocLen = (sl * 3 < 0) ? computeSizeUTF8_UTF16(val, exClass) : sl * 3; - if (allocLen > (long)Integer.MAX_VALUE) { - throw new OutOfMemoryError("Required length exceeds implementation limit"); - } - byte[] dst = new byte[(int) allocLen]; + int allocLen = (sl * 3 < 0) ? encodedLengthUTF8_UTF16(val, exClass) : sl * 3; + byte[] dst = new byte[allocLen]; while (sp < sl) { // ascii fast loop; char c = StringUTF16.getChar(val, sp); @@ -1547,11 +1593,20 @@ public final class String * @param The exception type parameter to enable callers to avoid * having to declare the exception */ - private static long computeSizeUTF8_UTF16(byte[] val, Class exClass) throws E { + private static int encodedLengthUTF8_UTF16(byte[] val, Class exClass) throws E { long dp = 0L; int sp = 0; int sl = val.length >> 1; + while (sp < sl) { + // ascii fast loop; + char c = StringUTF16.getChar(val, sp); + if (c >= '\u0080') { + break; + } + dp++; + sp++; + } while (sp < sl) { char c = StringUTF16.getChar(val, sp++); if (c < 0x80) { @@ -1580,7 +1635,10 @@ public final class String dp += 3; } } - return dp; + if (dp > (long)Integer.MAX_VALUE) { + throw new OutOfMemoryError("Required length exceeds implementation limit"); + } + return (int) dp; } /** @@ -2045,6 +2103,29 @@ public final class String return encode(Charset.defaultCharset(), coder(), value); } + /** + * {@return the length in bytes of this {@code String} encoded with the given {@link Charset}} + * + *

The returned length accounts for the replacement of malformed-input and unmappable-character + * sequences with the charset's default replacement byte array. The result will be the same value + * as {@link #getBytes(Charset) getBytes(cs).length}. + * + * @apiNote This method provides equivalent or better performance than {@link #getBytes(Charset) + * getBytes(cs).length}. + * + * @param cs The {@link Charset} used to the compute the length + * @since 27 + */ + public int encodedLength(Charset cs) { + Objects.requireNonNull(cs); + if (cs == UTF_8.INSTANCE) { + return encodedLengthUTF8(coder, value); + } else if (cs == ISO_8859_1.INSTANCE || cs == US_ASCII.INSTANCE) { + return encodedLengthASCIIor8859_1(coder, value); + } + return getBytes(cs).length; + } + boolean bytesCompatible(Charset charset, int srcIndex, int numChars) { if (isLatin1()) { if (charset == ISO_8859_1.INSTANCE) { diff --git a/src/java.base/share/classes/java/lang/StringUTF16.java b/src/java.base/share/classes/java/lang/StringUTF16.java index 27b9ae54a8a..23de31a61b7 100644 --- a/src/java.base/share/classes/java/lang/StringUTF16.java +++ b/src/java.base/share/classes/java/lang/StringUTF16.java @@ -67,30 +67,61 @@ final class StringUTF16 { // Check the size of a UTF16-coded string // Throw an exception if out of range - static int newBytesLength(int len) { - if (len < 0) { - throw new NegativeArraySizeException(); - } - if (len >= MAX_LENGTH) { - throw new OutOfMemoryError("UTF16 String size is " + len + - ", should be less than " + MAX_LENGTH); - } + private static int newBytesLength(int len) { + checkBytesLength(len); return len << 1; } + /** + * Checks if the provided length is a valid UTF-16 string byte array length. + * + * @param length a UTF-16 string byte array length + * + * @throws NegativeArraySizeException if {@code length < 0} + * @throws OutOfMemoryError if {@code length > (Integer.MAX_VALUE / 2)} + */ + private static void checkBytesLength(int length) { + if (length < 0) { + throw new NegativeArraySizeException(); + } + if (length >= MAX_LENGTH) { + throw new OutOfMemoryError("UTF16 String size is " + length + + ", should be less than " + MAX_LENGTH); + } + } + + /** + * Writes the given code point to the specified position of the provided + * UTF-16 string byte array. + *

+ * WARNING: This method does not perform any input validations. + * + * @param val a UTF-16 string byte array + * @param index the index of the character to write the code point to + * @param c a code point + */ + // vmIntrinsics::_putCharStringU @IntrinsicCandidate - // intrinsic performs no bounds checks static void putChar(byte[] val, int index, int c) { - assert index >= 0 && index < length(val) : "Trusted caller missed bounds check"; + assert val != null && index >= 0 && index < length(val) : "Trusted caller violated input constraints"; index <<= 1; val[index++] = (byte)(c >> HI_BYTE_SHIFT); val[index] = (byte)(c >> LO_BYTE_SHIFT); } + /** + * {@return the code point at the the specified position of the provided + * UTF-16 string byte array} + *

+ * WARNING: This method does not perform any input validations. + * + * @param val a UTF-16 string byte array + * @param index the index of the character to get the code point from + */ + // vmIntrinsics::_getCharStringU @IntrinsicCandidate - // intrinsic performs no bounds checks static char getChar(byte[] val, int index) { - assert index >= 0 && index < length(val) : "Trusted caller missed bounds check"; + assert val != null && index >= 0 && index < length(val) : "Trusted caller violated input constraints"; index <<= 1; return (char)(((val[index++] & 0xff) << HI_BYTE_SHIFT) | ((val[index] & 0xff) << LO_BYTE_SHIFT)); @@ -173,14 +204,27 @@ final class StringUTF16 { } /** - * {@return an encoded byte[] for the UTF16 characters in char[]} - * No checking is done on the characters, some may or may not be latin1. - * @param value a char array - * @param off an offset - * @param len a length + * {@return a UTF-16 string byte array produced by encoding the characters + * in the provided character array sub-range} + * + * @param value a character array to encode + * @param off the index of the character to start encoding from + * @param len the number of characters to encode + * + * @throws NegativeArraySizeException if {@code len < 0} + * @throws NullPointerException if {@code value} is null + * @throws OutOfMemoryError if {@code len > (Integer.MAX_VALUE / 2)} + * @throws StringIndexOutOfBoundsException if the sub-range is out of bounds */ - @IntrinsicCandidate static byte[] toBytes(char[] value, int off, int len) { + checkBytesLength(len); + String.checkBoundsOffCount(off, len, value.length); // Implicit null check on `value` + return toBytes0(value, off, len); + } + + // vmIntrinsics::_toBytesStringU + @IntrinsicCandidate + private static byte[] toBytes0(char[] value, int off, int len) { byte[] val = newBytesFor(len); for (int i = 0; i < len; i++) { putChar(val, i, value[off]); @@ -495,12 +539,28 @@ final class StringUTF16 { return result; } - @IntrinsicCandidate + /** + * Copies the specified sub-range of characters from a UTF-16 string byte + * array to the specified character array sub-range. + * + * @param value the source UTF-16 string byte array to copy from + * @param srcBegin the index (inclusive) of the first character in the source sub-range + * @param srcEnd the index (exclusive) of the last character in the source sub-range + * @param dst the target character array to copy to + * @param dstBegin the index (inclusive) of the first character in the target sub-range + * + * @throws NullPointerException if {@code value} or {@code dst} is null + * @throws StringIndexOutOfBoundsException if the sub-ranges are out of bounds + */ static void getChars(byte[] value, int srcBegin, int srcEnd, char[] dst, int dstBegin) { - // We need a range check here because 'getChar' has no checks - if (srcBegin < srcEnd) { - String.checkBoundsOffCount(srcBegin, srcEnd - srcBegin, length(value)); - } + checkBoundsBeginEnd(srcBegin, srcEnd, value); // Implicit null check on `value` via `checkBoundsBeginEnd()` + String.checkBoundsOffCount(dstBegin, srcEnd - srcBegin, dst.length); // Implicit null check on `dst` + getChars0(value, srcBegin, srcEnd, dst, dstBegin); + } + + // vmIntrinsics::_getCharsStringU + @IntrinsicCandidate + private static void getChars0(byte[] value, int srcBegin, int srcEnd, char[] dst, int dstBegin) { for (int i = srcBegin; i < srcEnd; i++) { dst[dstBegin++] = getChar(value, i); } @@ -721,7 +781,7 @@ final class StringUTF16 { return -StringLatin1.compareToCI_UTF16(other, value); } - public static int compareToFC_Latin1(byte[] value, byte[] other) { + static int compareToFC_Latin1(byte[] value, byte[] other) { return -StringLatin1.compareToFC_UTF16(other, value); } @@ -769,7 +829,7 @@ final class StringUTF16 { return 0; } - public static int compareToFC(byte[] value, byte[] other) { + static int compareToFC(byte[] value, byte[] other) { int tlast = length(value); int olast = length(other); int lim = Math.min(tlast, olast); @@ -1970,13 +2030,13 @@ final class StringUTF16 { } } - static final int MAX_LENGTH = Integer.MAX_VALUE >> 1; + private static final int MAX_LENGTH = Integer.MAX_VALUE >> 1; - static void checkIndex(int off, byte[] val) { + private static void checkIndex(int off, byte[] val) { String.checkIndex(off, length(val)); } - static void checkOffset(int off, byte[] val) { + private static void checkOffset(int off, byte[] val) { String.checkOffset(off, length(val)); } diff --git a/src/java.base/share/classes/java/lang/classfile/package-info.java b/src/java.base/share/classes/java/lang/classfile/package-info.java index 460f6699e7b..8bf5559df0a 100644 --- a/src/java.base/share/classes/java/lang/classfile/package-info.java +++ b/src/java.base/share/classes/java/lang/classfile/package-info.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -493,9 +493,9 @@ * * {@snippet lang="text" : * ClassElement = - * FieldModel*(UtfEntry name, Utf8Entry descriptor) - * | MethodModel*(UtfEntry name, Utf8Entry descriptor) - * | ModuleAttribute?(int flags, ModuleEntry moduleName, UtfEntry moduleVersion, + * FieldModel*(Utf8Entry name, Utf8Entry descriptor) + * | MethodModel*(Utf8Entry name, Utf8Entry descriptor) + * | ModuleAttribute?(int flags, ModuleEntry moduleName, Utf8Entry moduleVersion, * List requires, List opens, * List exports, List provides, * List uses) @@ -588,7 +588,7 @@ * | LabelTarget(Label label) * | LineNumber(int line) * | ExceptionCatch(Label tryStart, Label tryEnd, Label handler, ClassEntry exception) - * | LocalVariable(int slot, UtfEntry name, Utf8Entry type, Label startScope, Label endScope) + * | LocalVariable(int slot, Utf8Entry name, Utf8Entry type, Label startScope, Label endScope) * | LocalVariableType(int slot, Utf8Entry name, Utf8Entry type, Label startScope, Label endScope) * | CharacterRange(int rangeStart, int rangeEnd, int flags, Label startScope, Label endScope) * } diff --git a/src/java.base/share/classes/java/lang/doc-files/ValueBased.html b/src/java.base/share/classes/java/lang/doc-files/ValueBased.html index 6a935afe04b..3b860ce0534 100644 --- a/src/java.base/share/classes/java/lang/doc-files/ValueBased.html +++ b/src/java.base/share/classes/java/lang/doc-files/ValueBased.html @@ -30,35 +30,35 @@

{@index "Value-based Classes"}

-Some classes, such as java.lang.Integer and -java.time.LocalDate, are value-based. +Some classes, such as {@code java.lang.Integer} and +{@code java.time.LocalDate}, are value-based. A value-based class has the following properties:
  • the class declares only final instance fields (though these may contain references to mutable objects);
  • -
  • the class's implementations of equals, hashCode, - and toString compute their results solely from the values +
  • the class's implementations of {@code equals}, {@code hashCode}, + and {@code toString} compute their results solely from the values of the class's instance fields (and the members of the objects they reference), not from the instance's identity;
  • the class's methods treat instances as freely substitutable - when equal, meaning that interchanging any two instances x and - y that are equal according to equals() produces no + when equal, meaning that interchanging any two instances {@code x} and + {@code y} that are equal according to {@code equals()} produces no visible change in the behavior of the class's methods;
  • the class performs no synchronization using an instance's monitor;
  • -
  • the class does not declare (or has deprecated any) accessible constructors;
  • +
  • the class does not declare (or discourages use of) accessible constructors;
  • the class does not provide any instance creation mechanism that promises a unique identity on each method call—in particular, any factory method's contract must allow for the possibility that if two independently-produced - instances are equal according to equals(), they may also be - equal according to ==;
  • -
  • the class is final, and extends either Object or a hierarchy of + instances are equal according to {@code equals()}, they may also be + equal according to {@code ==};
  • +
  • the class is final, and extends either {@code Object} or a hierarchy of abstract classes that declare no instance fields or instance initializers and whose constructors are empty.

When two instances of a value-based class are equal (according to `equals`), a program should not attempt to distinguish between their identities, whether directly via reference - equality or indirectly via an appeal to synchronization, identity hashing, + equality {@code ==} or indirectly via an appeal to synchronization, identity hashing, serialization, or any other identity-sensitive mechanism.

Synchronization on instances of value-based classes is strongly discouraged, diff --git a/src/java.base/share/classes/java/lang/foreign/Linker.java b/src/java.base/share/classes/java/lang/foreign/Linker.java index cfa03090299..f597e4ee52e 100644 --- a/src/java.base/share/classes/java/lang/foreign/Linker.java +++ b/src/java.base/share/classes/java/lang/foreign/Linker.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -813,25 +813,28 @@ public sealed interface Linker permits AbstractLinker { } /** - * {@return a linker option used to save portions of the execution state - * immediately after calling a foreign function associated with a - * downcall method handle, before it can be overwritten by the Java - * runtime, or read through conventional means} + * {@return a linker option used to initialize portions of the execution + * state immediately before, and save portions of the execution + * state immediately after calling a foreign function associated + * with a downcall method handle, before it can be overwritten by the + * Java runtime, or read through conventional means} *

- * Execution state is captured by a downcall method handle on invocation, by - * writing it to a native segment provided by the user to the downcall method - * handle. For this purpose, a downcall method handle linked with this option - * will feature an additional {@link MemorySegment} parameter directly following - * the target address, and optional {@link SegmentAllocator} parameters. This - * parameter, the capture state segment, represents the native segment - * into which the captured state is written. + * Execution state is initialized from, or saved to a native segment provided by + * the user to the downcall method handle. For this purpose, a downcall method + * handle linked with this option will feature an additional {@link MemorySegment} + * parameter directly following the target address, and optional {@link SegmentAllocator} + * parameters. This parameter, the capture state segment, represents the + * native segment from which the capture state is initialized, and into which the + * capture state is saved. *

* The capture state segment must have size and alignment compatible with the * layout returned by {@linkplain #captureStateLayout}. This layout is a struct * layout which has a named field for each captured value. *

- * Captured state can be retrieved from the capture state segment by constructing - * var handles from the {@linkplain #captureStateLayout capture state layout}. + * Captured state can be stored in, or retrieved from the capture state segment by + * constructing var handles from the {@linkplain #captureStateLayout capture state layout}. + * Some functions require this state to be initialized to a particular value before + * invoking the downcall. *

* The following example demonstrates the use of this linker option: * {@snippet lang = "java": @@ -843,6 +846,7 @@ public sealed interface Linker permits AbstractLinker { * VarHandle errnoHandle = capturedStateLayout.varHandle(PathElement.groupElement("errno")); * try (Arena arena = Arena.ofConfined()) { * MemorySegment capturedState = arena.allocate(capturedStateLayout); + * errnoHandle.set(capturedState, 0L, 0); // set errno to 0 * handle.invoke(capturedState); * int errno = (int) errnoHandle.get(capturedState, 0L); * // use errno diff --git a/src/java.base/share/classes/java/lang/foreign/package-info.java b/src/java.base/share/classes/java/lang/foreign/package-info.java index 438d42ae7d1..2070f0c70a8 100644 --- a/src/java.base/share/classes/java/lang/foreign/package-info.java +++ b/src/java.base/share/classes/java/lang/foreign/package-info.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -114,7 +114,7 @@ * and we use it to {@linkplain java.lang.foreign.SymbolLookup#findOrThrow(java.lang.String) look up} * the {@code strlen} function in the standard C library; a downcall method handle * targeting said function is subsequently - * {@linkplain java.lang.foreign.Linker#downcallHandle(FunctionDescriptor, Linker.Option...) obtained}. + * {@linkplain java.lang.foreign.Linker#downcallHandle(MemorySegment, FunctionDescriptor, Linker.Option...) obtained}. * To complete the linking successfully, we must provide a * {@link java.lang.foreign.FunctionDescriptor} instance, describing the signature of the * {@code strlen} function. From this information, the linker will uniquely determine diff --git a/src/java.base/share/classes/java/lang/invoke/MemberName.java b/src/java.base/share/classes/java/lang/invoke/MemberName.java index 918d1b10791..d320ad11ade 100644 --- a/src/java.base/share/classes/java/lang/invoke/MemberName.java +++ b/src/java.base/share/classes/java/lang/invoke/MemberName.java @@ -408,11 +408,12 @@ final class MemberName implements Member, Cloneable { // let the rest (native, volatile, transient, etc.) be tested via Modifier.isFoo // unofficial modifier flags, used by HotSpot: - static final int BRIDGE = 0x00000040; - static final int VARARGS = 0x00000080; - static final int SYNTHETIC = 0x00001000; - static final int ANNOTATION= 0x00002000; - static final int ENUM = 0x00004000; + static final int BRIDGE = 0x00000040; + static final int VARARGS = 0x00000080; + static final int SYNTHETIC = 0x00001000; + static final int ANNOTATION = 0x00002000; + static final int ENUM = 0x00004000; + /** Utility method to query the modifier flags of this member; returns false if the member is not a method. */ public boolean isBridge() { return allFlagsSet(IS_METHOD | BRIDGE); @@ -426,19 +427,19 @@ final class MemberName implements Member, Cloneable { return allFlagsSet(SYNTHETIC); } - static final String CONSTRUCTOR_NAME = ""; // the ever-popular + static final String CONSTRUCTOR_NAME = ""; // modifiers exported by the JVM: static final int RECOGNIZED_MODIFIERS = 0xFFFF; // private flags, not part of RECOGNIZED_MODIFIERS: static final int - IS_METHOD = MN_IS_METHOD, // method (not constructor) - IS_CONSTRUCTOR = MN_IS_CONSTRUCTOR, // constructor - IS_FIELD = MN_IS_FIELD, // field - IS_TYPE = MN_IS_TYPE, // nested type - CALLER_SENSITIVE = MN_CALLER_SENSITIVE, // @CallerSensitive annotation detected - TRUSTED_FINAL = MN_TRUSTED_FINAL; // trusted final field + IS_METHOD = MN_IS_METHOD, // method (not constructor) + IS_CONSTRUCTOR = MN_IS_CONSTRUCTOR, // constructor + IS_FIELD = MN_IS_FIELD, // field + IS_TYPE = MN_IS_TYPE, // nested type + CALLER_SENSITIVE = MN_CALLER_SENSITIVE, // @CallerSensitive annotation detected + TRUSTED_FINAL = MN_TRUSTED_FINAL; // trusted final field static final int ALL_ACCESS = Modifier.PUBLIC | Modifier.PRIVATE | Modifier.PROTECTED; static final int ALL_KINDS = IS_METHOD | IS_CONSTRUCTOR | IS_FIELD | IS_TYPE; diff --git a/src/java.base/share/classes/java/lang/invoke/MethodHandleProxies.java b/src/java.base/share/classes/java/lang/invoke/MethodHandleProxies.java index 16f5c7e59b8..8dac21c8968 100644 --- a/src/java.base/share/classes/java/lang/invoke/MethodHandleProxies.java +++ b/src/java.base/share/classes/java/lang/invoke/MethodHandleProxies.java @@ -344,7 +344,7 @@ public final class MethodHandleProxies { ClassLoaders.platformClassLoader() : loader))) .build(proxyDesc, clb -> { clb.withSuperclass(CD_Object) - .withFlags(ACC_FINAL | ACC_SYNTHETIC) + .withFlags(ACC_SUPER | ACC_FINAL | ACC_SYNTHETIC) .withInterfaceSymbols(ifaceDesc) // static and instance fields .withField(TYPE_NAME, CD_Class, ACC_PRIVATE | ACC_STATIC | ACC_FINAL) diff --git a/src/java.base/share/classes/java/lang/ref/Reference.java b/src/java.base/share/classes/java/lang/ref/Reference.java index 88bdb99dfd6..df46ffe6ca6 100644 --- a/src/java.base/share/classes/java/lang/ref/Reference.java +++ b/src/java.base/share/classes/java/lang/ref/Reference.java @@ -644,12 +644,9 @@ public abstract sealed class Reference<@jdk.internal.RequiresIdentity T> * {@code null}, this method has no effect. * @since 9 */ - @ForceInline + @IntrinsicCandidate public static void reachabilityFence(Object ref) { - // Does nothing. This method is annotated with @ForceInline to eliminate - // most of the overhead that using @DontInline would cause with the - // HotSpot JVM, when this fence is used in a wide variety of situations. - // HotSpot JVM retains the ref and does not GC it before a call to - // this method, because the JIT-compilers do not have GC-only safepoints. + // Does nothing. HotSpot JVM retains the ref and does not GC it before a call to this method. + // Using an intrinsic allows JIT-compilers to further optimize it while retaining the correct semantics. } } diff --git a/src/java.base/share/classes/java/lang/reflect/Proxy.java b/src/java.base/share/classes/java/lang/reflect/Proxy.java index b811deb863d..6ce8e80cdca 100644 --- a/src/java.base/share/classes/java/lang/reflect/Proxy.java +++ b/src/java.base/share/classes/java/lang/reflect/Proxy.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ package java.lang.reflect; +import java.lang.classfile.ClassFile; import java.lang.invoke.MethodHandle; import java.lang.invoke.MethodHandles; import java.lang.invoke.MethodType; @@ -467,7 +468,7 @@ public class Proxy implements java.io.Serializable { * Generate the specified proxy class. */ byte[] proxyClassFile = ProxyGenerator.generateProxyClass(loader, proxyName, interfaces, - context.accessFlags() | Modifier.FINAL); + context.accessFlags() | Modifier.FINAL | ClassFile.ACC_SUPER); try { Class pc = JLA.defineClass(loader, proxyName, proxyClassFile, null, "__dynamic_proxy__"); diff --git a/src/java.base/share/classes/java/math/BigDecimal.java b/src/java.base/share/classes/java/math/BigDecimal.java index 14d81d30c3d..6e651b4fde2 100644 --- a/src/java.base/share/classes/java/math/BigDecimal.java +++ b/src/java.base/share/classes/java/math/BigDecimal.java @@ -1026,12 +1026,11 @@ public class BigDecimal extends Number implements Comparable { return; } // Normalize - while ((significand & 1) == 0) { // i.e., significand is even - significand >>= 1; - exponent++; - } - int scl = 0; + int nTrailingZeros = Long.numberOfTrailingZeros(significand); + significand >>= nTrailingZeros; + exponent += nTrailingZeros; // Calculate intVal and scale + int scl = 0; BigInteger rb; long compactVal = sign * significand; if (exponent == 0) { diff --git a/src/java.base/share/classes/java/net/URLStreamHandler.java b/src/java.base/share/classes/java/net/URLStreamHandler.java index f66902a451e..76807d27cee 100644 --- a/src/java.base/share/classes/java/net/URLStreamHandler.java +++ b/src/java.base/share/classes/java/net/URLStreamHandler.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1995, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1995, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -481,14 +481,33 @@ public abstract class URLStreamHandler { * @return a string representation of the {@code URL} argument. */ protected String toExternalForm(URL u) { - String s; + // The fast paths and branch-free concatenations in this method are here for + // a reason and should not be updated without checking performance figures. + + // Optionality, subtly different for authority + boolean emptyAuth = u.getAuthority() == null || u.getAuthority().isEmpty(); + boolean emptyPath = u.getPath() == null; + boolean emptyQuery = u.getQuery() == null; + boolean emptyRef = u.getRef() == null; + var path = emptyPath ? "" : u.getPath(); + // Fast paths for empty components + if (emptyQuery && emptyRef) { + return emptyAuth + ? (u.getProtocol() + ":" + path) + : (u.getProtocol() + "://" + u.getAuthority() + path); + } + // Prefer locals for efficient concatenation + var authSep = emptyAuth ? ":" : "://"; + var auth = emptyAuth ? "" : u.getAuthority(); + var querySep = emptyQuery ? "" : "?"; + var query = emptyQuery ? "" : u.getQuery(); + var refSep = emptyRef ? "" : "#"; + var ref = emptyRef ? "" : u.getRef(); return u.getProtocol() - + ':' - + ((s = u.getAuthority()) != null && !s.isEmpty() - ? "//" + s : "") - + ((s = u.getPath()) != null ? s : "") - + ((s = u.getQuery()) != null ? '?' + s : "") - + ((s = u.getRef()) != null ? '#' + s : ""); + + authSep + auth + + path + + querySep + query + + refSep + ref; } /** diff --git a/src/java.base/share/classes/java/security/Security.java b/src/java.base/share/classes/java/security/Security.java index 30a22b05742..9faa172c8e7 100644 --- a/src/java.base/share/classes/java/security/Security.java +++ b/src/java.base/share/classes/java/security/Security.java @@ -330,6 +330,10 @@ public final class Security { public Properties getInitialProperties() { return initialSecurityProperties; } + @Override + public Properties getCurrentProperties() { + return props; + } }); } diff --git a/src/java.base/share/classes/java/text/SimpleDateFormat.java b/src/java.base/share/classes/java/text/SimpleDateFormat.java index ba73e5b5a86..4c57214dbba 100644 --- a/src/java.base/share/classes/java/text/SimpleDateFormat.java +++ b/src/java.base/share/classes/java/text/SimpleDateFormat.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,7 +41,7 @@ package java.text; import java.io.IOException; import java.io.InvalidObjectException; import java.io.ObjectInputStream; -import static java.text.DateFormatSymbols.*; +import java.time.ZoneOffset; import java.util.Calendar; import java.util.Date; import java.util.GregorianCalendar; @@ -57,6 +57,8 @@ import sun.util.calendar.ZoneInfoFile; import sun.util.locale.provider.LocaleProviderAdapter; import sun.util.locale.provider.TimeZoneNameUtility; +import static java.text.DateFormatSymbols.*; + /** * {@code SimpleDateFormat} is a concrete class for formatting and * parsing dates in a locale-sensitive manner. It allows for formatting @@ -1293,15 +1295,22 @@ public class SimpleDateFormat extends DateFormat { case PATTERN_ZONE_NAME: // 'z' if (current == null) { + TimeZone tz = calendar.getTimeZone(); + String tzid = tz.getID(); + int zoneOffset = calendar.get(Calendar.ZONE_OFFSET); + int dstOffset = calendar.get(Calendar.DST_OFFSET) + zoneOffset; + + // Check if an explicit metazone DST offset exists + String explicitDstOffset = TimeZoneNameUtility.explicitDstOffset(tzid); + boolean daylight = explicitDstOffset != null ? + dstOffset == ZoneOffset.of(explicitDstOffset).getTotalSeconds() * 1_000 : + dstOffset != zoneOffset; if (formatData.locale == null || formatData.isZoneStringsSet) { - int zoneIndex = - formatData.getZoneIndex(calendar.getTimeZone().getID()); + int zoneIndex = formatData.getZoneIndex(tzid); if (zoneIndex == -1) { - value = calendar.get(Calendar.ZONE_OFFSET) + - calendar.get(Calendar.DST_OFFSET); - buffer.append(ZoneInfoFile.toCustomID(value)); + buffer.append(ZoneInfoFile.toCustomID(dstOffset)); } else { - int index = (calendar.get(Calendar.DST_OFFSET) == 0) ? 1: 3; + int index = daylight ? 3 : 1; if (count < 4) { // Use the short name index++; @@ -1310,8 +1319,6 @@ public class SimpleDateFormat extends DateFormat { buffer.append(zoneStrings[zoneIndex][index]); } } else { - TimeZone tz = calendar.getTimeZone(); - boolean daylight = (calendar.get(Calendar.DST_OFFSET) != 0); int tzstyle = (count < 4 ? TimeZone.SHORT : TimeZone.LONG); buffer.append(tz.getDisplayName(daylight, tzstyle, formatData.locale)); } diff --git a/src/java.base/share/classes/java/time/ZoneOffset.java b/src/java.base/share/classes/java/time/ZoneOffset.java index 2a45e7cbf82..3bcb75db3e4 100644 --- a/src/java.base/share/classes/java/time/ZoneOffset.java +++ b/src/java.base/share/classes/java/time/ZoneOffset.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2025, Alibaba Group Holding Limited. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -87,9 +87,9 @@ import java.util.Objects; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicReferenceArray; +import java.util.function.Supplier; import jdk.internal.util.DecimalDigits; -import jdk.internal.vm.annotation.Stable; /** * A time-zone offset from Greenwich/UTC, such as {@code +02:00}. @@ -178,8 +178,13 @@ public final class ZoneOffset /** * The zone rules for an offset will always return this offset. Cache it for efficiency. */ - @Stable - private transient ZoneRules rules; + private final transient LazyConstant rules = + LazyConstant.of(new Supplier() { + @Override + public ZoneRules get() { + return ZoneRules.of(ZoneOffset.this); + } + }); //----------------------------------------------------------------------- /** @@ -521,11 +526,7 @@ public final class ZoneOffset */ @Override public ZoneRules getRules() { - ZoneRules rules = this.rules; - if (rules == null) { - rules = this.rules = ZoneRules.of(this); - } - return rules; + return rules.get(); } @Override diff --git a/src/java.base/share/classes/java/time/format/DateTimeFormatterBuilder.java b/src/java.base/share/classes/java/time/format/DateTimeFormatterBuilder.java index 4708094effb..4594dc6f1dc 100644 --- a/src/java.base/share/classes/java/time/format/DateTimeFormatterBuilder.java +++ b/src/java.base/share/classes/java/time/format/DateTimeFormatterBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2025, Alibaba Group Holding Limited. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -4513,7 +4513,11 @@ public final class DateTimeFormatterBuilder { TemporalAccessor dt = context.getTemporal(); int type = GENERIC; if (!isGeneric) { - if (dt.isSupported(ChronoField.INSTANT_SECONDS)) { + // Check if an explicit metazone DST offset exists + String dstOffset = TimeZoneNameUtility.explicitDstOffset(zname); + if (dt.isSupported(OFFSET_SECONDS) && dstOffset != null) { + type = ZoneOffset.from(dt).equals(ZoneOffset.of(dstOffset)) ? DST : STD; + } else if (dt.isSupported(ChronoField.INSTANT_SECONDS)) { type = zone.getRules().isDaylightSavings(Instant.from(dt)) ? DST : STD; } else if (dt.isSupported(ChronoField.EPOCH_DAY) && dt.isSupported(ChronoField.NANO_OF_DAY)) { diff --git a/src/java.base/share/classes/java/util/Base64.java b/src/java.base/share/classes/java/util/Base64.java index ed1a4a8d638..fd714050149 100644 --- a/src/java.base/share/classes/java/util/Base64.java +++ b/src/java.base/share/classes/java/util/Base64.java @@ -32,6 +32,8 @@ import java.io.OutputStream; import java.nio.ByteBuffer; import sun.nio.cs.ISO_8859_1; +import jdk.internal.access.JavaLangAccess; +import jdk.internal.access.SharedSecrets; import jdk.internal.util.Preconditions; import jdk.internal.vm.annotation.IntrinsicCandidate; @@ -201,6 +203,7 @@ public final class Base64 { * @since 1.8 */ public static class Encoder { + private static final JavaLangAccess JLA = SharedSecrets.getJavaLangAccess(); private final byte[] newline; private final int linemax; @@ -344,10 +347,9 @@ public final class Base64 { * the byte array to encode * @return A String containing the resulting Base64 encoded characters */ - @SuppressWarnings("deprecation") public String encodeToString(byte[] src) { byte[] encoded = encode(src); - return new String(encoded, 0, 0, encoded.length); + return JLA.uncheckedNewStringWithLatin1Bytes(encoded); } /** diff --git a/src/java.base/share/classes/java/util/HexFormat.java b/src/java.base/share/classes/java/util/HexFormat.java index aebb8b9af52..7d9fe08108d 100644 --- a/src/java.base/share/classes/java/util/HexFormat.java +++ b/src/java.base/share/classes/java/util/HexFormat.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2024, Alibaba Group Holding Limited. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -26,6 +26,7 @@ package java.util; +import jdk.internal.ValueBased; import jdk.internal.access.JavaLangAccess; import jdk.internal.access.SharedSecrets; import jdk.internal.util.HexDigits; @@ -134,7 +135,7 @@ import java.nio.CharBuffer; * @since 17 */ - +@ValueBased public final class HexFormat { // Access to create strings from a byte array. diff --git a/src/java.base/share/classes/java/util/Locale.java b/src/java.base/share/classes/java/util/Locale.java index f45a52c14fa..6b071cd15b2 100644 --- a/src/java.base/share/classes/java/util/Locale.java +++ b/src/java.base/share/classes/java/util/Locale.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -176,7 +176,10 @@ import sun.util.locale.provider.TimeZoneNameUtility; * SUBTAG (('_'|'-') SUBTAG)*} where {@code SUBTAG = * [0-9][0-9a-zA-Z]{3} | [0-9a-zA-Z]{5,8}}. *

BCP 47 deviation: BCP 47 only - * uses hyphen ('-') as a delimiter, {@code Locale} is more lenient.
+ * uses hyphen ('-') as a delimiter and APIs provided by {@code Locale} which accept + * BCP 47 language tags expect as such. However, for backwards compatibility, + * {@link Locale.Builder#setVariant(String)} also accepts underscore ('_'). + * {@link Locale#of(String, String, String)} accepts only underscore ('_'). * *
Example: "polyton" (Polytonic Greek), "POSIX"
* @@ -1929,7 +1932,7 @@ public final class Locale implements Cloneable, Serializable { } /** - * Returns a name for the locale's language that is appropriate for display to the + * Returns a name for {@code this} locale's language that is appropriate for display to the * user. * If possible, the name returned will be localized for the default * {@link Locale.Category#DISPLAY DISPLAY} locale. @@ -1943,14 +1946,15 @@ public final class Locale implements Cloneable, Serializable { * this function falls back on the English name, and uses the ISO code as a last-resort * value. If the locale doesn't specify a language, this function returns the empty string. * - * @return The name of the display language. + * @return The name of the display language appropriate to the default + * {@link Locale.Category#DISPLAY DISPLAY} locale. */ - public final String getDisplayLanguage() { + public String getDisplayLanguage() { return getDisplayLanguage(getDefault(Category.DISPLAY)); } /** - * Returns a name for the locale's language that is appropriate for display to the + * Returns a name for {@code this} locale's language that is appropriate for display to the * user. * If possible, the name returned will be localized according to inLocale. * For example, if the locale is fr_FR and inLocale @@ -1961,7 +1965,7 @@ public final class Locale implements Cloneable, Serializable { * on the ISO code as a last-resort value. If the locale doesn't specify a language, * this function returns the empty string. * - * @param inLocale The locale for which to retrieve the display language. + * @param inLocale The locale in which to localize the display language. * @return The name of the display language appropriate to the given locale. * @throws NullPointerException if {@code inLocale} is {@code null} */ @@ -1970,13 +1974,13 @@ public final class Locale implements Cloneable, Serializable { } /** - * Returns a name for the locale's script that is appropriate for display to + * Returns a name for {@code this} locale's script that is appropriate for display to * the user. If possible, the name will be localized for the default * {@link Locale.Category#DISPLAY DISPLAY} locale. Returns * the empty string if this locale doesn't specify a script code. * - * @return the display name of the script code for the current default - * {@link Locale.Category#DISPLAY DISPLAY} locale + * @return The display name of the script code appropriate to the default + * {@link Locale.Category#DISPLAY DISPLAY} locale. * @since 1.7 */ public String getDisplayScript() { @@ -1984,14 +1988,13 @@ public final class Locale implements Cloneable, Serializable { } /** - * Returns a name for the locale's script that is appropriate + * Returns a name for {@code this} locale's script that is appropriate * for display to the user. If possible, the name will be * localized for the given locale. Returns the empty string if * this locale doesn't specify a script code. * - * @param inLocale The locale for which to retrieve the display script. - * @return the display name of the script code for the current default - * {@link Locale.Category#DISPLAY DISPLAY} locale + * @param inLocale The locale in which to localize the display script. + * @return The display name of the script code appropriate to the given locale. * @throws NullPointerException if {@code inLocale} is {@code null} * @since 1.7 */ @@ -2000,7 +2003,7 @@ public final class Locale implements Cloneable, Serializable { } /** - * Returns a name for the locale's country that is appropriate for display to the + * Returns a name for {@code this} locale's country that is appropriate for display to the * user. * If possible, the name returned will be localized for the default * {@link Locale.Category#DISPLAY DISPLAY} locale. @@ -2014,14 +2017,15 @@ public final class Locale implements Cloneable, Serializable { * this function falls back on the English name, and uses the ISO code as a last-resort * value. If the locale doesn't specify a country, this function returns the empty string. * - * @return The name of the country appropriate to the locale. + * @return The name of the country appropriate to the default + * {@link Locale.Category#DISPLAY DISPLAY} locale. */ - public final String getDisplayCountry() { + public String getDisplayCountry() { return getDisplayCountry(getDefault(Category.DISPLAY)); } /** - * Returns a name for the locale's country that is appropriate for display to the + * Returns a name for {@code this} locale's country that is appropriate for display to the * user. * If possible, the name returned will be localized according to inLocale. * For example, if the locale is fr_FR and inLocale @@ -2032,7 +2036,7 @@ public final class Locale implements Cloneable, Serializable { * on the ISO code as a last-resort value. If the locale doesn't specify a country, * this function returns the empty string. * - * @param inLocale The locale for which to retrieve the display country. + * @param inLocale The locale in which to localize the display country. * @return The name of the country appropriate to the given locale. * @throws NullPointerException if {@code inLocale} is {@code null} */ @@ -2058,23 +2062,24 @@ public final class Locale implements Cloneable, Serializable { } /** - * Returns a name for the locale's variant code that is appropriate for display to the + * Returns a name for {@code this} locale's variant code that is appropriate for display to the * user. If possible, the name will be localized for the default * {@link Locale.Category#DISPLAY DISPLAY} locale. If the locale * doesn't specify a variant code, this function returns the empty string. * - * @return The name of the display variant code appropriate to the locale. + * @return The name of the display variant code appropriate to the default + * {@link Locale.Category#DISPLAY DISPLAY} locale. */ - public final String getDisplayVariant() { + public String getDisplayVariant() { return getDisplayVariant(getDefault(Category.DISPLAY)); } /** - * Returns a name for the locale's variant code that is appropriate for display to the + * Returns a name for {@code this} locale's variant code that is appropriate for display to the * user. If possible, the name will be localized for inLocale. If the locale * doesn't specify a variant code, this function returns the empty string. * - * @param inLocale The locale for which to retrieve the display variant code. + * @param inLocale The locale in which to localize the display variant code. * @return The name of the display variant code appropriate to the given locale. * @throws NullPointerException if {@code inLocale} is {@code null} */ @@ -2095,7 +2100,7 @@ public final class Locale implements Cloneable, Serializable { } /** - * Returns a name for the locale that is appropriate for display to the + * Returns a name for {@code this} locale that is appropriate for display to the * user. This will be the values returned by getDisplayLanguage(), * getDisplayScript(), getDisplayCountry(), getDisplayVariant() and * optional {@linkplain ##def_locale_extension Unicode extensions} @@ -2113,14 +2118,15 @@ public final class Locale implements Cloneable, Serializable { * be localized depending on the locale. If the language, script, country, * and variant fields are all empty, this function returns the empty string. * - * @return The name of the locale appropriate to display. + * @return The display name appropriate to the default + * {@link Locale.Category#DISPLAY DISPLAY} locale. */ - public final String getDisplayName() { + public String getDisplayName() { return getDisplayName(getDefault(Category.DISPLAY)); } /** - * Returns a name for the locale that is appropriate for display + * Returns a name for {@code this} locale that is appropriate for display * to the user. This will be the values returned by * getDisplayLanguage(), getDisplayScript(), getDisplayCountry(), * getDisplayVariant(), and optional {@linkplain ##def_locale_extension @@ -2139,8 +2145,8 @@ public final class Locale implements Cloneable, Serializable { * be localized depending on the locale. If the language, script, country, * and variant fields are all empty, this function returns the empty string. * - * @param inLocale The locale for which to retrieve the display name. - * @return The name of the locale appropriate to display. + * @param inLocale The locale in which to localize the display name. + * @return The display name appropriate to the given locale. * @throws NullPointerException if {@code inLocale} is {@code null} */ public String getDisplayName(Locale inLocale) { diff --git a/src/java.base/share/classes/java/util/ResourceBundle.java b/src/java.base/share/classes/java/util/ResourceBundle.java index db19eda6399..f91db79891b 100644 --- a/src/java.base/share/classes/java/util/ResourceBundle.java +++ b/src/java.base/share/classes/java/util/ResourceBundle.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1119,15 +1119,15 @@ public abstract class ResourceBundle { * sequence of bundle names generated by truncating the last underscore and * the part following it is inserted after a candidate bundle name with the * original variant. For example, for a locale with language "en", script - * "Latn, country "US" and variant "WINDOWS_VISTA", and bundle base name + * "Latn", country "US" and variant "WINDOWS_WIN11", and bundle base name * "MyResource", the list of candidate bundle names below is generated: * *
-     * MyResource_en_Latn_US_WINDOWS_VISTA
+     * MyResource_en_Latn_US_WINDOWS_WIN11
      * MyResource_en_Latn_US_WINDOWS
      * MyResource_en_Latn_US
      * MyResource_en_Latn
-     * MyResource_en_US_WINDOWS_VISTA
+     * MyResource_en_US_WINDOWS_WIN11
      * MyResource_en_US_WINDOWS
      * MyResource_en_US
      * MyResource_en
diff --git a/src/java.base/share/classes/java/util/ServiceLoader.java b/src/java.base/share/classes/java/util/ServiceLoader.java
index 5137adc1c08..5e4fa4ed2ef 100644
--- a/src/java.base/share/classes/java/util/ServiceLoader.java
+++ b/src/java.base/share/classes/java/util/ServiceLoader.java
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -621,9 +621,9 @@ public final class ServiceLoader
         Constructor ctor = null;
         try {
             ctor = clazz.getConstructor();
-        } catch (NoSuchMethodException ex) {
+        } catch (NoSuchMethodException | LinkageError e) {
             String cn = clazz.getName();
-            fail(service, cn + " Unable to get public no-arg constructor", ex);
+            fail(service, cn + " Unable to get public no-arg constructor", e);
         }
         if (inExplicitModule(clazz))
             ctor.setAccessible(true);
@@ -1086,8 +1086,8 @@ public final class ServiceLoader
             String cn = pending.next();
             try {
                 return Class.forName(cn, false, loader);
-            } catch (ClassNotFoundException x) {
-                fail(service, "Provider " + cn + " not found");
+            } catch (ClassNotFoundException | LinkageError e) {
+                fail(service, "Provider " + cn + " not found", e);
                 return null;
             }
         }
diff --git a/src/java.base/share/classes/java/util/WeakHashMap.java b/src/java.base/share/classes/java/util/WeakHashMap.java
index b5a27593840..1412d8f6ff9 100644
--- a/src/java.base/share/classes/java/util/WeakHashMap.java
+++ b/src/java.base/share/classes/java/util/WeakHashMap.java
@@ -25,8 +25,8 @@
 
 package java.util;
 
-import java.lang.ref.WeakReference;
 import java.lang.ref.ReferenceQueue;
+import java.lang.ref.WeakReference;
 import java.util.function.BiConsumer;
 import java.util.function.BiFunction;
 import java.util.function.Consumer;
diff --git a/src/java.base/share/classes/java/util/concurrent/ForkJoinPool.java b/src/java.base/share/classes/java/util/concurrent/ForkJoinPool.java
index f289186e0ad..e83a92e5e6f 100644
--- a/src/java.base/share/classes/java/util/concurrent/ForkJoinPool.java
+++ b/src/java.base/share/classes/java/util/concurrent/ForkJoinPool.java
@@ -560,70 +560,89 @@ public class ForkJoinPool extends AbstractExecutorService
      * access (which is usually needed anyway).
      *
      * Signalling. Signals (in signalWork) cause new or reactivated
-     * workers to scan for tasks.  SignalWork is invoked in two cases:
-     * (1) When a task is pushed onto an empty queue, and (2) When a
-     * worker takes a top-level task from a queue that has additional
-     * tasks. Together, these suffice in O(log(#threads)) steps to
-     * fully activate with at least enough workers, and ideally no
-     * more than required.  This ideal is unobtainable: Callers do not
-     * know whether another worker will finish its current task and
-     * poll for others without need of a signal (which is otherwise an
-     * advantage of work-stealing vs other schemes), and also must
-     * conservatively estimate the triggering conditions of emptiness
-     * or non-emptiness; all of which usually cause more activations
-     * than necessary (see below). (Method signalWork is also used as
-     * failsafe in case of Thread failures in deregisterWorker, to
-     * activate or create a new worker to replace them).
+     * workers to scan for tasks.  Method signalWork and its callers
+     * try to approximate the unattainable goal of having the right
+     * number of workers activated for the tasks at hand, but must err
+     * on the side of too many workers vs too few to avoid stalls:
      *
-     * Top-Level scheduling
-     * ====================
+     *  * If computations are purely tree structured, it suffices for
+     *    every worker to activate another when it pushes a task into
+     *    an empty queue, resulting in O(log(#threads)) steps to full
+     *    activation. Emptiness must be conservatively approximated,
+     *    which may result in unnecessary signals.  Also, to reduce
+     *    resource usages in some cases, at the expense of slower
+     *    startup in others, activation of an idle thread is preferred
+     *    over creating a new one, here and elsewhere.
+     *
+     *  * At the other extreme, if "flat" tasks (those that do not in
+     *    turn generate others) come in serially from only a single
+     *    producer, each worker taking a task from a queue should
+     *    propagate a signal if there are more tasks in that
+     *    queue. This is equivalent to, but generally faster than,
+     *    arranging the stealer take multiple tasks, re-pushing one or
+     *    more on its own queue, and signalling (because its queue is
+     *    empty), also resulting in logarithmic full activation
+     *    time. If tasks do not not engage in unbounded loops based on
+     *    the actions of other workers with unknown dependencies loop,
+     *    this form of proagation can be limited to one signal per
+     *    activation (phase change). We distinguish the cases by
+     *    further signalling only if the task is an InterruptibleTask
+     *    (see below), which are the only supported forms of task that
+     *    may do so.
+     *
+     * * Because we don't know about usage patterns (or most commonly,
+     *    mixtures), we use both approaches, which present even more
+     *    opportunities to over-signal. (Failure to distinguish these
+     *    cases in terms of submission methods was arguably an early
+     *    design mistake.)  Note that in either of these contexts,
+     *    signals may be (and often are) unnecessary because active
+     *    workers continue scanning after running tasks without the
+     *    need to be signalled (which is one reason work stealing is
+     *    often faster than alternatives), so additional workers
+     *    aren't needed.
+     *
+     * * For rapidly branching tasks that require full pool resources,
+     *   oversignalling is OK, because signalWork will soon have no
+     *   more workers to create or reactivate. But for others (mainly
+     *   externally submitted tasks), overprovisioning may cause very
+     *   noticeable slowdowns due to contention and resource
+     *   wastage. We reduce impact by deactivating workers when
+     *   queues don't have accessible tasks, but reactivating and
+     *   rescanning if other tasks remain.
+     *
+     * * Despite these, signal contention and overhead effects still
+     *   occur during ramp-up and ramp-down of small computations.
      *
      * Scanning. Method runWorker performs top-level scanning for (and
      * execution of) tasks by polling a pseudo-random permutation of
      * the array (by starting at a given index, and using a constant
      * cyclically exhaustive stride.)  It uses the same basic polling
      * method as WorkQueue.poll(), but restarts with a different
-     * permutation on each rescan.  The pseudorandom generator need
-     * not have high-quality statistical properties in the long
+     * permutation on each invocation.  The pseudorandom generator
+     * need not have high-quality statistical properties in the long
      * term. We use Marsaglia XorShifts, seeded with the Weyl sequence
-     * from ThreadLocalRandom probes, which are cheap and suffice.
+     * from ThreadLocalRandom probes, which are cheap and
+     * suffice. Each queue's polling attempts to avoid becoming stuck
+     * when other scanners/pollers stall.  Scans do not otherwise
+     * explicitly take into account core affinities, loads, cache
+     * localities, etc, However, they do exploit temporal locality
+     * (which usually approximates these) by preferring to re-poll
+     * from the same queue after a successful poll before trying
+     * others, which also reduces bookkeeping, cache traffic, and
+     * scanning overhead. But it also reduces fairness, which is
+     * partially counteracted by giving up on detected interference
+     * (which also reduces contention when too many workers try to
+     * take small tasks from the same queue).
      *
      * Deactivation. When no tasks are found by a worker in runWorker,
-     * it invokes deactivate, that first deactivates (to an IDLE
-     * phase).  Avoiding missed signals during deactivation requires a
-     * (conservative) rescan, reactivating if there may be tasks to
-     * poll. Because idle workers are often not yet blocked (parked),
-     * we use a WorkQueue field to advertise that a waiter actually
-     * needs unparking upon signal.
-     *
-     * When tasks are constructed as (recursive) DAGs, top-level
-     * scanning is usually infrequent, and doesn't encounter most
-     * of the following problems addressed by runWorker and awaitWork:
-     *
-     * Locality. Polls are organized into "runs", continuing until
-     * empty or contended, while also minimizing interference by
-     * postponing bookeeping to ends of runs. This may reduce
-     * fairness.
-     *
-     * Contention. When many workers try to poll few queues, they
-     * often collide, generating CAS failures and disrupting locality
-     * of workers already running their tasks. This also leads to
-     * stalls when tasks cannot be taken because other workers have
-     * not finished poll operations, which is detected by reading
-     * ahead in queue arrays. In both cases, workers restart scans in a
-     * way that approximates randomized backoff.
-     *
-     * Oversignalling. When many short top-level tasks are present in
-     * a small number of queues, the above signalling strategy may
-     * activate many more workers than needed, worsening locality and
-     * contention problems, while also generating more global
-     * contention (field ctl is CASed on every activation and
-     * deactivation). We filter out (both in runWorker and
-     * signalWork) attempted signals that are surely not needed
-     * because the signalled tasks are already taken.
-     *
-     * Shutdown and Quiescence
-     * =======================
+     * it tries to deactivate()), giving up (and rescanning) on "ctl"
+     * contention. To avoid missed signals during deactivation, the
+     * method rescans and reactivates if there may have been a missed
+     * signal during deactivation. To reduce false-alarm reactivations
+     * while doing so, we scan multiple times (analogously to method
+     * quiescent()) before trying to reactivate.  Because idle workers
+     * are often not yet blocked (parked), we use a WorkQueue field to
+     * advertise that a waiter actually needs unparking upon signal.
      *
      * Quiescence. Workers scan looking for work, giving up when they
      * don't find any, without being sure that none are available.
@@ -873,7 +892,9 @@ public class ForkJoinPool extends AbstractExecutorService
      * shutdown, runners are interrupted so they can cancel. Since
      * external joining callers never run these tasks, they must await
      * cancellation by others, which can occur along several different
-     * paths.
+     * paths. The inability to rely on caller-runs may also require
+     * extra signalling (resulting in scanning and contention) so is
+     * done only conditionally in methods push and runworker.
      *
      * Across these APIs, rules for reporting exceptions for tasks
      * with results accessed via join() differ from those via get(),
@@ -940,13 +961,9 @@ public class ForkJoinPool extends AbstractExecutorService
      * less-contended applications. To help arrange this, some
      * non-reference fields are declared as "long" even when ints or
      * shorts would suffice.  For class WorkQueue, an
-     * embedded @Contended isolates the very busy top index, along
-     * with status and bookkeeping fields written (mostly) by owners,
-     * that otherwise interfere with reading array and base
-     * fields. There are other variables commonly contributing to
-     * false-sharing-related performance issues (including fields of
-     * class Thread), but we can't do much about this except try to
-     * minimize access.
+     * embedded @Contended region segregates fields most heavily
+     * updated by owners from those most commonly read by stealers or
+     * other management.
      *
      * Initial sizing and resizing of WorkQueue arrays is an even more
      * delicate tradeoff because the best strategy systematically
@@ -955,11 +972,13 @@ public class ForkJoinPool extends AbstractExecutorService
      * direct false-sharing and indirect cases due to GC bookkeeping
      * (cardmarks etc), and reduce the number of resizes, which are
      * not especially fast because they require atomic transfers.
-     * Currently, arrays are initialized to be just large enough to
-     * avoid resizing in most tree-structured tasks, but grow rapidly
-     * until large.  (Maintenance note: any changes in fields, queues,
-     * or their uses, or JVM layout policies, must be accompanied by
-     * re-evaluation of these placement and sizing decisions.)
+     * Currently, arrays for workers are initialized to be just large
+     * enough to avoid resizing in most tree-structured tasks, but
+     * larger for external queues where both false-sharing problems
+     * and the need for resizing are more common. (Maintenance note:
+     * any changes in fields, queues, or their uses, or JVM layout
+     * policies, must be accompanied by re-evaluation of these
+     * placement and sizing decisions.)
      *
      * Style notes
      * ===========
@@ -1042,11 +1061,17 @@ public class ForkJoinPool extends AbstractExecutorService
     static final int DEFAULT_COMMON_MAX_SPARES = 256;
 
     /**
-     * Initial capacity of work-stealing queue array.
+     * Initial capacity of work-stealing queue array for workers.
      * Must be a power of two, at least 2. See above.
      */
     static final int INITIAL_QUEUE_CAPACITY = 1 << 6;
 
+    /**
+     * Initial capacity of work-stealing queue array for external queues.
+     * Must be a power of two, at least 2. See above.
+     */
+    static final int INITIAL_EXTERNAL_QUEUE_CAPACITY = 1 << 9;
+
     // conversions among short, int, long
     static final int  SMASK           = 0xffff;      // (unsigned) short bits
     static final long LMASK           = 0xffffffffL; // lower 32 bits of long
@@ -1186,11 +1211,11 @@ public class ForkJoinPool extends AbstractExecutorService
         @jdk.internal.vm.annotation.Contended("w")
         int stackPred;             // pool stack (ctl) predecessor link
         @jdk.internal.vm.annotation.Contended("w")
-        volatile int parking;      // nonzero if parked in awaitWork
-        @jdk.internal.vm.annotation.Contended("w")
         volatile int source;       // source queue id (or DROPPED)
         @jdk.internal.vm.annotation.Contended("w")
         int nsteals;               // number of steals from other queues
+        @jdk.internal.vm.annotation.Contended("w")
+        volatile int parking;      // nonzero if parked in awaitWork
 
         // Support for atomic operations
         private static final Unsafe U;
@@ -1223,11 +1248,11 @@ public class ForkJoinPool extends AbstractExecutorService
          */
         WorkQueue(ForkJoinWorkerThread owner, int id, int cfg,
                   boolean clearThreadLocals) {
+            array = new ForkJoinTask[owner == null ?
+                                        INITIAL_EXTERNAL_QUEUE_CAPACITY :
+                                        INITIAL_QUEUE_CAPACITY];
+            this.owner = owner;
             this.config = (clearThreadLocals) ? cfg | CLEAR_TLS : cfg;
-            if ((this.owner = owner) == null) {
-                array = new ForkJoinTask[INITIAL_QUEUE_CAPACITY];
-                phase = id | IDLE;
-            }
         }
 
         /**
@@ -1254,27 +1279,27 @@ public class ForkJoinPool extends AbstractExecutorService
          * @throws RejectedExecutionException if array could not be resized
          */
         final void push(ForkJoinTask task, ForkJoinPool pool, boolean internal) {
-            int s = top, b = base, m, cap, room; ForkJoinTask[] a, na;
-            if ((a = array) != null && (cap = a.length) > 0) { // else disabled
-                int k = (m = cap - 1) & s;
-                if ((room = m - (s - b)) >= 0) {
+            int s = top, b = base, m, cap, room; ForkJoinTask[] a;
+            if ((a = array) != null && (cap = a.length) > 0 && // else disabled
+                task != null) {
+                int pk = task.noUserHelp() + 1;             // prev slot offset
+                if ((room = (m = cap - 1) - (s - b)) >= 0) {
                     top = s + 1;
-                    long pos = slotOffset(k);
+                    long pos = slotOffset(m & s);
                     if (!internal)
                         U.putReference(a, pos, task);       // inside lock
                     else
                         U.getAndSetReference(a, pos, task); // fully fenced
-                    if (room == 0 && (na = growArray(a, cap, s)) != null)
-                        k = ((a = na).length - 1) & s;      // resize
+                    if (room == 0)                          // resize
+                        growArray(a, cap, s);
                 }
                 if (!internal)
                     unlockPhase();
                 if (room < 0)
                     throw new RejectedExecutionException("Queue capacity exceeded");
-                if (pool != null &&
-                    (room == 0 ||
-                     U.getReferenceAcquire(a, slotOffset(m & (s - 1))) == null))
-                    pool.signalWork(a, k);    // may have appeared empty
+                if ((room == 0 || U.getReferenceAcquire(a, slotOffset(m & (s - pk))) == null) &&
+                    pool != null)
+                    pool.signalWork();   // may have appeared empty
             }
         }
 
@@ -1283,12 +1308,11 @@ public class ForkJoinPool extends AbstractExecutorService
          * @param a old array
          * @param cap old array capacity
          * @param s current top
-         * @return new array, or null on failure
          */
-        private ForkJoinTask[] growArray(ForkJoinTask[] a, int cap, int s) {
-            int newCap = (cap >= 1 << 16) ? cap << 1 : cap << 2;
-            ForkJoinTask[] newArray = null;
+        private void growArray(ForkJoinTask[] a, int cap, int s) {
+            int newCap = cap << 1;
             if (a != null && a.length == cap && cap > 0 && newCap > 0) {
+                ForkJoinTask[] newArray = null;
                 try {
                     newArray = new ForkJoinTask[newCap];
                 } catch (OutOfMemoryError ex) {
@@ -1305,45 +1329,34 @@ public class ForkJoinPool extends AbstractExecutorService
                     updateArray(newArray);           // fully fenced
                 }
             }
-            return newArray;
         }
 
         /**
-         * Takes next task, if one exists, in lifo order.
+         * Takes next task, if one exists, in order specified by mode,
+         * so acts as either local-pop or local-poll. Called only by owner.
+         * @param fifo nonzero if FIFO mode
          */
-        private ForkJoinTask localPop() {
+        private ForkJoinTask nextLocalTask(int fifo) {
             ForkJoinTask t = null;
-            int s = top - 1, cap; long k; ForkJoinTask[] a;
-            if ((a = array) != null && (cap = a.length) > 0 &&
-                U.getReference(a, k = slotOffset((cap - 1) & s)) != null &&
-                (t = (ForkJoinTask)U.getAndSetReference(a, k, null)) != null)
-                updateTop(s);
-            return t;
-        }
-
-        /**
-         * Takes next task, if one exists, in fifo order.
-         */
-        private ForkJoinTask localPoll() {
-            ForkJoinTask t = null;
-            int p = top, cap; ForkJoinTask[] a;
-            if ((a = array) != null && (cap = a.length) > 0) {
-                for (int b = base; p - b > 0; ) {
-                    int nb = b + 1;
-                    long k = slotOffset((cap - 1) & b);
-                    if (U.getReference(a, k) == null) {
-                        if (nb == p)
-                            break;          // else base is lagging
-                        while (b == (b = U.getIntAcquire(this, BASE)))
-                            Thread.onSpinWait(); // spin to reduce memory traffic
+            ForkJoinTask[] a = array;
+            int b = base, p = top, cap;
+            if (p - b > 0 && a != null && (cap = a.length) > 0) {
+                for (int m = cap - 1, s, nb;;) {
+                    if (fifo == 0 || (nb = b + 1) == p) {
+                        if ((t = (ForkJoinTask)U.getAndSetReference(
+                                 a, slotOffset(m & (s = p - 1)), null)) != null)
+                            updateTop(s);       // else lost race for only task
+                        break;
                     }
-                    else if ((t = (ForkJoinTask)
-                              U.getAndSetReference(a, k, null)) != null) {
+                    if ((t = (ForkJoinTask)U.getAndSetReference(
+                             a, slotOffset(m & b), null)) != null) {
                         updateBase(nb);
                         break;
                     }
-                    else
-                        b = base;
+                    while (b == (b = U.getIntAcquire(this, BASE)))
+                        Thread.onSpinWait();    // spin to reduce memory traffic
+                    if (p - b <= 0)
+                        break;
                 }
             }
             return t;
@@ -1351,9 +1364,10 @@ public class ForkJoinPool extends AbstractExecutorService
 
         /**
          * Takes next task, if one exists, using configured mode.
+         * (Always internal, never called for Common pool.)
          */
         final ForkJoinTask nextLocalTask() {
-            return (config & FIFO) == 0 ? localPop() : localPoll();
+            return nextLocalTask(config & FIFO);
         }
 
         /**
@@ -1429,12 +1443,12 @@ public class ForkJoinPool extends AbstractExecutorService
         // specialized execution methods
 
         /**
-         * Runs the given task, as well as remaining local tasks
+         * Runs the given task, as well as remaining local tasks.
          */
         final void topLevelExec(ForkJoinTask task, int fifo) {
             while (task != null) {
                 task.doExec();
-                task = (fifo != 0) ? localPoll() : localPop();
+                task = nextLocalTask(fifo);
             }
         }
 
@@ -1564,7 +1578,7 @@ public class ForkJoinPool extends AbstractExecutorService
          * Cancels all local tasks. Called only by owner.
          */
         final void cancelTasks() {
-            for (ForkJoinTask t; (t = localPop()) != null; ) {
+            for (ForkJoinTask t; (t = nextLocalTask(0)) != null; ) {
                 try {
                     t.cancel(false);
                 } catch (Throwable ignore) {
@@ -1766,8 +1780,7 @@ public class ForkJoinPool extends AbstractExecutorService
      * @param w caller's WorkQueue
      */
     final void registerWorker(WorkQueue w) {
-        if (w != null) {
-            w.array = new ForkJoinTask[INITIAL_QUEUE_CAPACITY];
+        if (w != null && (runState & STOP) == 0L) {
             ThreadLocalRandom.localInit();
             int seed = w.stackPred = ThreadLocalRandom.getProbe();
             int phaseSeq = seed & ~((IDLE << 1) - 1); // initial phase tag
@@ -1845,18 +1858,17 @@ public class ForkJoinPool extends AbstractExecutorService
         }
         if ((tryTerminate(false, false) & STOP) == 0L &&
             phase != 0 && w != null && w.source != DROPPED) {
+            signalWork();                  // possibly replace
             w.cancelTasks();               // clean queue
-            signalWork(null, 0);           // possibly replace
         }
         if (ex != null)
             ForkJoinTask.rethrow(ex);
     }
 
     /**
-     * Releases an idle worker, or creates one if not enough exist,
-     * giving up if array a is nonnull and task at a[k] already taken.
+     * Releases an idle worker, or creates one if not enough exist.
      */
-    final void signalWork(ForkJoinTask[] a, int k) {
+    final void signalWork() {
         int pc = parallelism;
         for (long c = ctl;;) {
             WorkQueue[] qs = queues;
@@ -1872,15 +1884,13 @@ public class ForkJoinPool extends AbstractExecutorService
             if (sp == 0) {
                 if ((short)(c >>> TC_SHIFT) >= pc)
                     break;
-                nc = ((c + TC_UNIT) & TC_MASK) | ac;
+                nc = ((c + TC_UNIT) & TC_MASK);
             }
             else if ((v = w) == null)
                 break;
             else
-                nc = (v.stackPred & LMASK) | (c & TC_MASK) | ac;
-            if (a != null && k < a.length && k >= 0 && a[k] == null)
-                break;
-            if (c == (c = ctl) && c == (c = compareAndExchangeCtl(c, nc))) {
+                nc = (v.stackPred & LMASK) | (c & TC_MASK);
+            if (c == (c = compareAndExchangeCtl(c, nc | ac))) {
                 if (v == null)
                     createWorker();
                 else {
@@ -1963,196 +1973,178 @@ public class ForkJoinPool extends AbstractExecutorService
      * @param w caller's WorkQueue (may be null on failed initialization)
      */
     final void runWorker(WorkQueue w) {
-        if (w != null && w.phase != 0) {                  // else unregistered
-            WorkQueue[] qs;
-            int r = w.stackPred;                          // seed from registerWorker
-            int fifo = (int)config & FIFO, rescans = 0, inactive = 0, taken = 0, n;
-            while ((runState & STOP) == 0L && (qs = queues) != null &&
-                   (n = qs.length) > 0) {
-                int i = r, step = (r >>> 16) | 1;
+        if (w != null) {
+            int phase = w.phase, r = w.stackPred;     // seed from registerWorker
+            int fifo = w.config & FIFO, nsteals = 0, src = -1;
+            for (;;) {
+                WorkQueue[] qs;
                 r ^= r << 13; r ^= r >>> 17; r ^= r << 5; // xorshift
-                scan: for (int j = n; j != 0; --j, i += step) {
-                    WorkQueue q; int qid;
-                    if ((q = qs[qid = i & (n - 1)]) != null) {
-                        ForkJoinTask[] a; int cap;     // poll queue
-                        while ((a = q.array) != null && (cap = a.length) > 0) {
-                            int b, nb, nk; long bp; ForkJoinTask t;
+                if ((runState & STOP) != 0L || (qs = queues) == null)
+                    break;
+                int n = qs.length, i = r, step = (r >>> 16) | 1;
+                boolean rescan = false;
+                scan: for (int l = n; l > 0; --l, i += step) {  // scan queues
+                    int j, cap; WorkQueue q; ForkJoinTask[] a;
+                    if ((q = qs[j = i & (n - 1)]) != null &&
+                        (a = q.array) != null && (cap = a.length) > 0) {
+                        for (int m = cap - 1, pb = -1, b = q.base;;) {
+                            ForkJoinTask t; long k;
                             t = (ForkJoinTask)U.getReferenceAcquire(
-                                a, bp = slotOffset((cap - 1) & (b = q.base)));
-                            long np = slotOffset(nk = (nb = b + 1) & (cap - 1));
-                            if (q.base == b) {            // else inconsistent
-                                if (t == null) {
-                                    if (q.array == a) {   // else resized
-                                        if (rescans > 0)  // ran or stalled
-                                            break scan;
-                                        if (U.getReference(a, np) == null &&
-                                            (rescans >= 0 ||
-                                             (U.getReferenceAcquire(a, bp) == null &&
-                                              q.top == q.base)))
-                                            break;
-                                        rescans = 1;      // may be stalled
+                                a, k = slotOffset(m & b));
+                            if (b != (b = q.base) || t == null ||
+                                !U.compareAndSetReference(a, k, t, null)) {
+                                if (a[b & m] == null) {
+                                    if (rescan)           // end of run
+                                        break scan;
+                                    if (a[(b + 1) & m] == null &&
+                                        a[(b + 2) & m] == null) {
+                                        break;            // probably empty
                                     }
-                                }
-                                else if (inactive != 0) {
-                                    if ((inactive = tryReactivate(w)) != 0) {
-                                        rescans = 1;      // can't take yet
+                                    if (pb == (pb = b)) { // track progress
+                                        rescan = true;    // stalled; reorder scan
                                         break scan;
                                     }
                                 }
-                                else if (U.compareAndSetReference(a, bp, t, null)) {
-                                    q.base = nb;
-                                    Object nt = U.getReferenceAcquire(a, np);
-                                    w.source = qid;
-                                    rescans = 1;
-                                    ++taken;
-                                    if (nt != null &&     // confirm a[nk]
-                                        U.getReferenceAcquire(a, np) == nt)
-                                        signalWork(a, nk); // propagate
-                                    w.topLevelExec(t, fifo);
-                                }
+                            }
+                            else {
+                                boolean propagate;
+                                int nb = q.base = b + 1, prevSrc = src;
+                                w.nsteals = ++nsteals;
+                                w.source = src = j;       // volatile
+                                rescan = true;
+                                int nh = t.noUserHelp();
+                                if (propagate =
+                                    (prevSrc != src || nh != 0) && a[nb & m] != null)
+                                    signalWork();
+                                w.topLevelExec(t, fifo);
+                                if ((b = q.base) != nb && !propagate)
+                                    break scan;          // reduce interference
                             }
                         }
                     }
                 }
-                if (rescans >= 0)
-                    --rescans;
-                else if (inactive == 0) {
-                    if ((inactive = deactivate(w, taken)) != 0)
-                        taken = 0;
+                if (!rescan) {
+                    if (((phase = deactivate(w, phase)) & IDLE) != 0)
+                        break;
+                    src = -1;                            // re-enable propagation
                 }
-                else if (awaitWork(w) == 0)
-                    inactive = rescans = 0;
-                else
-                    break;
             }
         }
     }
 
     /**
-     * Tries to deactivate worker, keeping active on contention
+     * Deactivates and if necessary awaits signal or termination.
      *
-     * @param w the work queue
-     * @param taken number of stolen tasks since last deactivation
-     * @return nonzero if inactive
+     * @param w the worker
+     * @param phase current phase
+     * @return current phase, with IDLE set if worker should exit
      */
-    private int deactivate(WorkQueue w, int taken) {
-        int inactive = 0, phase;
-        if (w != null && (inactive = (phase = w.phase) & IDLE) == 0) {
-            long sp = (phase + (IDLE << 1)) & LMASK, pc, c;
-            w.phase = phase | IDLE;
-            w.stackPred = (int)(pc = ctl);    // set ctl stack link
-            if (!compareAndSetCtl(            // try to enqueue
-                    pc, c = ((pc - RC_UNIT) & UMASK) | sp))
-                w.phase = phase;              // back out on contention
-            else {
-                if (taken != 0) {
-                    w.nsteals += taken;
-                    if ((w.config & CLEAR_TLS) != 0 &&
-                        (Thread.currentThread() instanceof ForkJoinWorkerThread f))
-                        f.resetThreadLocals(); // (instanceof check always true)
-                }
-                if (((c & RC_MASK) == 0L && quiescent() > 0) || taken == 0)
-                    inactive = w.phase & IDLE; // check quiescent termination
-                else {                         // spin for approx 1 scan cost
-                    int tc = (short)(c >>> TC_SHIFT);
-                    int spins = Math.max((tc << 1) + tc, SPIN_WAITS);
-                    while ((inactive = w.phase & IDLE) != 0 && --spins != 0)
-                        Thread.onSpinWait();
-                }
-            }
-        }
-        return inactive;
-    }
+    private int deactivate(WorkQueue w, int phase) {
+        if (w == null)                        // currently impossible
+            return IDLE;
+        int p = phase | IDLE, activePhase = phase + (IDLE << 1);
+        long pc = ctl, qc = (activePhase & LMASK) | ((pc - RC_UNIT) & UMASK);
+        int sp = w.stackPred = (int)pc;       // set ctl stack link
+        w.phase = p;
+        if (!compareAndSetCtl(pc, qc))        // try to enqueue
+            return w.phase = phase;           // back out on possible signal
+        int ac = (short)(qc >>> RC_SHIFT), n; long e; WorkQueue[] qs;
+        if (((e = runState) & STOP) != 0L ||
+            ((e & SHUTDOWN) != 0L && ac == 0 && quiescent() > 0) ||
+            (qs = queues) == null || (n = qs.length) <= 0)
+            return IDLE;                      // terminating
 
-    /**
-     * Reactivates worker w if it is currently top of ctl stack
-     *
-     * @param w the work queue
-     * @return 0 if now active
-     */
-    private int tryReactivate(WorkQueue w) {
-        int inactive = 0;
-        if (w != null) {                         // always true; hoist checks
-            int sp = w.stackPred, phase, activePhase; long c;
-            if ((inactive = (phase = w.phase) & IDLE) != 0 &&
-                (int)(c = ctl) == (activePhase = phase + IDLE) &&
-                compareAndSetCtl(c, (sp & LMASK) | ((c + RC_UNIT) & UMASK))) {
-                w.phase = activePhase;
-                inactive = 0;
-            }
+        for (int prechecks = Math.min(ac, 2), // reactivation threshold
+             k = Math.max(n + (n << 1), SPIN_WAITS << 1);;) {
+            WorkQueue q; int cap; ForkJoinTask[] a; long c;
+            if (w.phase == activePhase)
+                return activePhase;
+            if (--k < 0)
+                return awaitWork(w, p);       // block, drop, or exit
+            if ((q = qs[k & (n - 1)]) == null)
+                Thread.onSpinWait();
+            else if ((a = q.array) != null && (cap = a.length) > 0 &&
+                     a[q.base & (cap - 1)] != null && --prechecks < 0 &&
+                     (int)(c = ctl) == activePhase &&
+                     compareAndSetCtl(c, (sp & LMASK) | ((c + RC_UNIT) & UMASK)))
+                return w.phase = activePhase; // reactivate
         }
-        return inactive;
     }
 
     /**
      * Awaits signal or termination.
      *
      * @param w the work queue
-     * @return 0 if now active
+     * @param p current phase (known to be idle)
+     * @return current phase, with IDLE set if worker should exit
      */
-    private int awaitWork(WorkQueue w) {
-        int inactive = 0, phase;
-        if (w != null) {                          // always true; hoist checks
-            long waitTime = (w.source == INVALID_ID) ? 0L : keepAlive;
-            if ((inactive = (phase = w.phase) & IDLE) != 0) {
+    private int awaitWork(WorkQueue w, int p) {
+        if (w != null) {
+            ForkJoinWorkerThread t; long deadline;
+            if ((w.config & CLEAR_TLS) != 0 && (t = w.owner) != null)
+                t.resetThreadLocals();          // clear before reactivate
+            if ((ctl & RC_MASK) > 0L)
+                deadline = 0L;
+            else if ((deadline =
+                      (((w.source != INVALID_ID) ? keepAlive : TIMEOUT_SLOP)) +
+                      System.currentTimeMillis()) == 0L)
+                deadline = 1L;                 // avoid zero
+            int activePhase = p + IDLE;
+            if ((p = w.phase) != activePhase && (runState & STOP) == 0L) {
                 LockSupport.setCurrentBlocker(this);
-                int activePhase = phase + IDLE;
-                for (long deadline = 0L;;) {
-                    Thread.interrupted();         // clear status
+                w.parking = 1;                 // enable unpark
+                while ((p = w.phase) != activePhase) {
+                    boolean trimmable = false; int trim;
+                    Thread.interrupted();      // clear status
                     if ((runState & STOP) != 0L)
                         break;
-                    boolean trimmable = false;    // use timed wait if trimmable
-                    long d = 0L, c;
-                    if (((c = ctl) & RC_MASK) == 0L && (int)c == activePhase) {
-                        long now = System.currentTimeMillis();
-                        if (deadline == 0L)
-                            deadline = waitTime + now;
-                        if (deadline - now <= TIMEOUT_SLOP) {
-                            if (tryTrim(w, c, activePhase))
-                                break;
-                            continue;             // lost race to trim
-                        }
-                        d = deadline;
-                        trimmable = true;
+                    if (deadline != 0L) {
+                        if ((trim = tryTrim(w, p, deadline)) > 0)
+                            break;
+                        else if (trim < 0)
+                            deadline = 0L;
+                        else
+                            trimmable = true;
                     }
-                    w.parking = 1;                // enable unpark and recheck
-                    if ((inactive = w.phase & IDLE) != 0)
-                        U.park(trimmable, d);
-                    w.parking = 0;                // close unpark window
-                    if (inactive == 0 || (inactive = w.phase & IDLE) == 0)
-                        break;
+                    U.park(trimmable, deadline);
                 }
+                w.parking = 0;
                 LockSupport.setCurrentBlocker(null);
             }
         }
-        return inactive;
+        return p;
     }
 
     /**
      * Tries to remove and deregister worker after timeout, and release
-     * another to do the same unless new tasks are found.
+     * another to do the same.
+     * @return > 0: trimmed, < 0 : not trimmable, else 0
      */
-    private boolean tryTrim(WorkQueue w, long c, int activePhase) {
-        if (w != null) {
-            int vp, i; WorkQueue[] vs; WorkQueue v;
-            long nc = ((w.stackPred & LMASK) |
-                       ((RC_MASK & c) | (TC_MASK & (c - TC_UNIT))));
-            if (compareAndSetCtl(c, nc)) {
-                w.source = DROPPED;
-                w.phase = activePhase;
-                if ((vp = (int)nc) != 0 && (vs = queues) != null &&
-                    vs.length > (i = vp & SMASK) && (v = vs[i]) != null &&
-                    compareAndSetCtl(           // try to wake up next waiter
-                        nc, ((v.stackPred & LMASK) |
-                             ((UMASK & (nc + RC_UNIT)) | (nc & TC_MASK))))) {
-                    v.source = INVALID_ID;      // enable cascaded timeouts
-                    v.phase = vp;
-                    U.unpark(v.owner);
-                }
-                return true;
+    private int tryTrim(WorkQueue w, int phase, long deadline) {
+        long c, nc; int stat, activePhase, vp, i; WorkQueue[] vs; WorkQueue v;
+        if ((activePhase = phase + IDLE) != (int)(c = ctl) || w == null)
+            stat = -1;                      // no longer ctl top
+        else if (deadline - System.currentTimeMillis() >= TIMEOUT_SLOP)
+            stat = 0;                       // spurious wakeup
+        else if (!compareAndSetCtl(
+                     c, nc = ((w.stackPred & LMASK) | (RC_MASK & c) |
+                               (TC_MASK & (c - TC_UNIT)))))
+            stat = -1;                      // lost race to signaller
+        else {
+            stat = 1;
+            w.source = DROPPED;
+            w.phase = activePhase;
+            if ((vp = (int)nc) != 0 && (vs = queues) != null &&
+                vs.length > (i = vp & SMASK) && (v = vs[i]) != null &&
+                compareAndSetCtl(           // try to wake up next waiter
+                    nc, ((UMASK & (nc + RC_UNIT)) |
+                         (nc & TC_MASK) | (v.stackPred & LMASK)))) {
+                v.source = INVALID_ID;      // enable cascaded timeouts
+                v.phase = vp;
+                U.unpark(v.owner);
             }
         }
-        return false;
+        return stat;
     }
 
     /**
@@ -2569,35 +2561,52 @@ public class ForkJoinPool extends AbstractExecutorService
 
     /**
      * Finds and locks a WorkQueue for an external submitter, or
-     * throws RejectedExecutionException if shutdown
+     * throws RejectedExecutionException if shutdown or terminating.
+     * @param r current ThreadLocalRandom.getProbe() value
      * @param rejectOnShutdown true if RejectedExecutionException
-     *        should be thrown when shutdown
+     *        should be thrown when shutdown (else only if terminating)
      */
-    final WorkQueue externalSubmissionQueue(boolean rejectOnShutdown) {
-        int r;
-        if ((r = ThreadLocalRandom.getProbe()) == 0) {
-            ThreadLocalRandom.localInit();   // initialize caller's probe
+    private WorkQueue submissionQueue(int r, boolean rejectOnShutdown) {
+        int reuse;                                   // nonzero if prefer create
+        if ((reuse = r) == 0) {
+            ThreadLocalRandom.localInit();           // initialize caller's probe
             r = ThreadLocalRandom.getProbe();
         }
-        for (;;) {
-            WorkQueue q; WorkQueue[] qs; int n, id, i;
-            if ((qs = queues) == null || (n = qs.length) <= 0)
+        for (int probes = 0; ; ++probes) {
+            int n, i, id; WorkQueue[] qs; WorkQueue q;
+            if ((qs = queues) == null)
+                break;
+            if ((n = qs.length) <= 0)
                 break;
             if ((q = qs[i = (id = r & EXTERNAL_ID_MASK) & (n - 1)]) == null) {
-                WorkQueue newq = new WorkQueue(null, id, 0, false);
-                lockRunState();
-                if (qs[i] == null && queues == qs)
-                    q = qs[i] = newq;         // else lost race to install
+                WorkQueue w = new WorkQueue(null, id, 0, false);
+                w.phase = id;
+                boolean reject = ((lockRunState() & SHUTDOWN) != 0 &&
+                                  rejectOnShutdown);
+                if (!reject && queues == qs && qs[i] == null)
+                    q = qs[i] = w;                   // else lost race to install
                 unlockRunState();
-            }
-            if (q != null && q.tryLockPhase()) {
-                if (rejectOnShutdown && (runState & SHUTDOWN) != 0L) {
-                    q.unlockPhase();          // check while q lock held
+                if (q != null)
+                    return q;
+                if (reject)
                     break;
-                }
-                return q;
+                reuse = 0;
             }
-            r = ThreadLocalRandom.advanceProbe(r); // move
+            if (reuse == 0 || !q.tryLockPhase()) {   // move index
+                if (reuse == 0) {
+                    if (probes >= n >> 1)
+                        reuse = r;                   // stop prefering free slot
+                }
+                else if (q != null)
+                    reuse = 0;                       // probe on collision
+                r = ThreadLocalRandom.advanceProbe(r);
+            }
+            else if (rejectOnShutdown && (runState & SHUTDOWN) != 0L) {
+                q.unlockPhase();                     // check while q lock held
+                break;
+            }
+            else
+                return q;
         }
         throw new RejectedExecutionException();
     }
@@ -2611,12 +2620,24 @@ public class ForkJoinPool extends AbstractExecutorService
         }
         else {                     // find and lock queue
             internal = false;
-            q = externalSubmissionQueue(true);
+            q = submissionQueue(ThreadLocalRandom.getProbe(), true);
         }
         q.push(task, signalIfEmpty ? this : null, internal);
         return task;
     }
 
+    /**
+     * Returns queue for an external submission, bypassing call to
+     * submissionQueue if already established and unlocked.
+     */
+    final WorkQueue externalSubmissionQueue(boolean rejectOnShutdown) {
+        WorkQueue[] qs; WorkQueue q; int n;
+        int r = ThreadLocalRandom.getProbe();
+        return (((qs = queues) != null && (n = qs.length) > 0 &&
+                 (q = qs[r & EXTERNAL_ID_MASK & (n - 1)]) != null && r != 0 &&
+                 q.tryLockPhase()) ? q : submissionQueue(r, rejectOnShutdown));
+    }
+
     /**
      * Returns queue for an external thread, if one exists that has
      * possibly ever submitted to the given pool (nonzero probe), or
@@ -3295,7 +3316,7 @@ public class ForkJoinPool extends AbstractExecutorService
         if ((config & PRESET_SIZE) != 0)
             throw new UnsupportedOperationException("Cannot override System property");
         if ((prevSize = getAndSetParallelism(size)) < size)
-            signalWork(null, 0); // trigger worker activation
+            signalWork(); // trigger worker activation
         return prevSize;
     }
 
diff --git a/src/java.base/share/classes/java/util/spi/LocaleServiceProvider.java b/src/java.base/share/classes/java/util/spi/LocaleServiceProvider.java
index 5e7b61e6c57..abb9f3aca38 100644
--- a/src/java.base/share/classes/java/util/spi/LocaleServiceProvider.java
+++ b/src/java.base/share/classes/java/util/spi/LocaleServiceProvider.java
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -166,7 +166,7 @@ import java.util.Locale;
  * Common Locale Data Repository (CLDR)
  * to implement locale-sensitive APIs in the {@code java.util} and
  * {@code java.text} packages. This locale data derives the set of locales
- * supported by the Java runtime environment. The following table lists the
+ * supported by the Java runtime environment. The following tables list the
  * version of CLDR used in each JDK release. Unless otherwise specified, all
  * update releases in a given JDK release family use the same CLDR version.
  * Note that the CLDR locale data are subject to change. Users should not assume
@@ -175,6 +175,9 @@ import java.util.Locale;
  * Refer to CLDR Releases
  * for the deltas between their releases.
  * 
+ * 
  * 
  * 
  * 
@@ -185,22 +188,38 @@ import java.util.Locale;
  *     
  * 
  *     
+ * 
+ *     
+ * 
+ *     
+ * 
+ *     
+ * 
+ *     
+ * 
+ * 
JDK releases and supported CLDR versions
JDK releaseCLDR 48
JDK 25CLDR 47
JDK 21CLDR 43
JDK 17CLDR 39
JDK 11CLDR 33
JDK 8CLDR 21.0.1
+ *
+ * Show other JDK releases + * + * + * + * + * + * + * * * * * * * - * - * * * * * * * - * - * * * * @@ -211,16 +230,13 @@ import java.util.Locale; * * * - * - * * * * * - * - * * *
Other JDK releases and supported CLDR + * versions
JDK releaseCLDR version
JDK 24CLDR 46
JDK 23CLDR 45
JDK 22CLDR 44
JDK 21CLDR 43
JDK 20CLDR 42
JDK 19CLDR 41
JDK 18CLDR 39
JDK 17CLDR 39
JDK 16CLDR 38
JDK 15CLDR 35.1
JDK 12CLDR 33
JDK 11CLDR 33
JDK 10CLDR 29
JDK 9CLDR 29
JDK 8CLDR 21.0.1
+ *
* * @since 1.6 */ diff --git a/src/java.base/share/classes/java/util/stream/Stream.java b/src/java.base/share/classes/java/util/stream/Stream.java index 1dd13133fe1..645f4f033b7 100644 --- a/src/java.base/share/classes/java/util/stream/Stream.java +++ b/src/java.base/share/classes/java/util/stream/Stream.java @@ -1002,8 +1002,8 @@ public interface Stream extends BaseStream> { /** * Performs a reduction on the - * elements of this stream, using the provided identity, accumulation and - * combining functions. This is equivalent to: + * elements of this stream using the provided identity value, accumulation + * function, and combining function. This is equivalent to: *
{@code
      *     U result = identity;
      *     for (T element : this stream)
diff --git a/src/java.base/share/classes/java/util/zip/GZIPInputStream.java b/src/java.base/share/classes/java/util/zip/GZIPInputStream.java
index ebcb9e3204c..72fb8036f08 100644
--- a/src/java.base/share/classes/java/util/zip/GZIPInputStream.java
+++ b/src/java.base/share/classes/java/util/zip/GZIPInputStream.java
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1996, 2026, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1996, 2024, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -79,11 +79,7 @@ public class GZIPInputStream extends InflaterInputStream {
         super(in, createInflater(in, size), size);
         usesDefaultInflater = true;
         try {
-            // we don't expect the stream to be at EOF
-            // and if it is, then we want readHeader to
-            // raise an exception, so we pass "true" for
-            // the "failOnEOF" param.
-            readHeader(in, true);
+            readHeader(in);
         } catch (IOException ioe) {
             this.inf.end();
             throw ioe;
@@ -194,40 +190,12 @@ public class GZIPInputStream extends InflaterInputStream {
     /*
      * Reads GZIP member header and returns the total byte number
      * of this member header.
-     * If failOnEOF is false and if the given InputStream has already
-     * reached EOF when this method was invoked, then this method returns
-     * -1 (indicating that there's no GZIP member header).
-     * In all other cases of malformed header or EOF being detected
-     * when reading the header, this method will throw an IOException.
      */
-    private int readHeader(InputStream this_in, boolean failOnEOF) throws IOException {
+    private int readHeader(InputStream this_in) throws IOException {
         CheckedInputStream in = new CheckedInputStream(this_in, crc);
         crc.reset();
-
-        int magic;
-        if (!failOnEOF) {
-            // read an unsigned short value representing the GZIP magic header.
-            // this is the same as calling readUShort(in), except that here,
-            // when reading the first byte, we don't raise an EOFException
-            // if the stream has already reached EOF.
-
-            // read unsigned byte
-            int b = in.read();
-            if (b == -1) { // EOF
-                crc.reset();
-                return -1; // represents no header bytes available
-            }
-            checkUnexpectedByte(b);
-            // read the next unsigned byte to form the unsigned
-            // short. we throw the usual EOFException/ZipException
-            // from this point on if there is no more data or
-            // the data doesn't represent a header.
-            magic = (readUByte(in) << 8) | b;
-        } else {
-            magic = readUShort(in);
-        }
         // Check header magic
-        if (magic != GZIP_MAGIC) {
+        if (readUShort(in) != GZIP_MAGIC) {
             throw new ZipException("Not in GZIP format");
         }
         // Check compression method
@@ -290,21 +258,23 @@ public class GZIPInputStream extends InflaterInputStream {
             (readUInt(in) != (inf.getBytesWritten() & 0xffffffffL)))
             throw new ZipException("Corrupt GZIP trailer");
 
+        // If there are more bytes available in "in" or
+        // the leftover in the "inf" is > 26 bytes:
+        // this.trailer(8) + next.header.min(10) + next.trailer(8)
         // try concatenated case
-        int m = 8;                  // this.trailer
-        try {
-            int numNextHeaderBytes = readHeader(in, false); // next.header (if available)
-            if (numNextHeaderBytes == -1) {
-                return true; // end of stream reached
+        if (this.in.available() > 0 || n > 26) {
+            int m = 8;                  // this.trailer
+            try {
+                m += readHeader(in);    // next.header
+            } catch (IOException ze) {
+                return true;  // ignore any malformed, do nothing
             }
-            m += numNextHeaderBytes;
-        } catch (IOException ze) {
-            return true;  // ignore any malformed, do nothing
+            inf.reset();
+            if (n > m)
+                inf.setInput(buf, len - n + m, n - m);
+            return false;
         }
-        inf.reset();
-        if (n > m)
-            inf.setInput(buf, len - n + m, n - m);
-        return false;
+        return true;
     }
 
     /*
@@ -331,16 +301,12 @@ public class GZIPInputStream extends InflaterInputStream {
         if (b == -1) {
             throw new EOFException();
         }
-        checkUnexpectedByte(b);
-        return b;
-    }
-
-    private void checkUnexpectedByte(final int b) throws IOException {
         if (b < -1 || b > 255) {
-            // report the InputStream type which returned this unexpected byte
+            // Report on this.in, not argument in; see read{Header, Trailer}.
             throw new IOException(this.in.getClass().getName()
-                    + ".read() returned value out of range -1..255: " + b);
+                + ".read() returned value out of range -1..255: " + b);
         }
+        return b;
     }
 
     private byte[] tmpbuf = new byte[128];
diff --git a/src/java.base/share/classes/java/util/zip/InflaterOutputStream.java b/src/java.base/share/classes/java/util/zip/InflaterOutputStream.java
index 31c51509a76..abe4e069915 100644
--- a/src/java.base/share/classes/java/util/zip/InflaterOutputStream.java
+++ b/src/java.base/share/classes/java/util/zip/InflaterOutputStream.java
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2026, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,14 +31,14 @@ import java.io.OutputStream;
 import java.util.Objects;
 
 /**
- * Implements an output stream filter for uncompressing data stored in the
+ * Implements an output stream filter for decompressing data stored in the
  * "deflate" compression format.
  *
  * 

Decompressor Usage

* An {@code InflaterOutputStream} created without * specifying a {@linkplain Inflater decompressor} will create a decompressor * at construction time, and close the decompressor when the output stream - * is {@linkplain #close closed}. + * is {@linkplain #close closed} or when {@link #finish()} is called. *

* If a decompressor is specified when creating a {@code InflaterOutputStream}, it is the * responsibility of the caller to {@linkplain Inflater#close close} the @@ -49,7 +49,6 @@ import java.util.Objects; * stream, either directly, or with the {@code try}-with-resources statement. * * @since 1.6 - * @author David R Tribble (david@tribble.com) * * @see InflaterInputStream * @see DeflaterInputStream @@ -60,7 +59,7 @@ public class InflaterOutputStream extends FilterOutputStream { /** Decompressor for this stream. */ protected final Inflater inf; - /** Output buffer for writing uncompressed data. */ + /** Output buffer for writing decompressed data. */ protected final byte[] buf; /** Temporary write buffer. */ @@ -72,6 +71,10 @@ public class InflaterOutputStream extends FilterOutputStream { /** true iff {@link #close()} has been called. */ private boolean closed = false; + // set to true if finish() was called and this InflaterOutputStream + // had created its own Inflater at construction time. + private boolean defaultInflaterClosed; + /** * Checks to make sure that this stream has not been closed. */ @@ -88,7 +91,7 @@ public class InflaterOutputStream extends FilterOutputStream { * The decompressor will be closed when this output stream * is {@linkplain #close() closed}. * - * @param out output stream to write the uncompressed data to + * @param out output stream to write the decompressed data to * @throws NullPointerException if {@code out} is null */ public InflaterOutputStream(OutputStream out) { @@ -104,7 +107,7 @@ public class InflaterOutputStream extends FilterOutputStream { * {@linkplain ##decompressor-usage will not close} the given * {@linkplain Inflater decompressor}. * - * @param out output stream to write the uncompressed data to + * @param out output stream to write the decompressed data to * @param infl decompressor ("inflater") for this stream * @throws NullPointerException if {@code out} or {@code infl} is null */ @@ -120,7 +123,7 @@ public class InflaterOutputStream extends FilterOutputStream { * {@linkplain ##decompressor-usage will not close} the given * {@linkplain Inflater decompressor}. * - * @param out output stream to write the uncompressed data to + * @param out output stream to write the decompressed data to * @param infl decompressor ("inflater") for this stream * @param bufLen decompression buffer size * @throws IllegalArgumentException if {@code bufLen <= 0} @@ -143,27 +146,45 @@ public class InflaterOutputStream extends FilterOutputStream { } /** - * Writes any remaining uncompressed data to the output stream and closes + * Writes any remaining decompressed data to the output stream and closes * the underlying output stream. * + * @implSpec If not already closed, this method calls {@link #finish()} before + * closing the underlying output stream. + * * @throws IOException if an I/O error occurs */ @Override public void close() throws IOException { - if (!closed) { - // Complete the uncompressed output + if (closed) { + return; + } + IOException toThrow = null; + // Complete the decompressed output + try { + finish(); + } catch (IOException ioe) { + toThrow = ioe; + } finally { try { - finish(); - } finally { out.close(); - closed = true; + } catch (IOException ioe) { + if (toThrow == null) { + toThrow = ioe; + } else if (toThrow != ioe) { + toThrow.addSuppressed(ioe); + } } + closed = true; + } + if (toThrow != null) { + throw toThrow; } } /** - * Flushes this output stream, forcing any pending buffered output bytes to be - * written. + * Flushes this output stream, writing any pending buffered decompressed data to + * the underlying output stream. * * @throws IOException if an I/O error occurs or this stream is already * closed @@ -184,7 +205,7 @@ public class InflaterOutputStream extends FilterOutputStream { break; } - // Write the uncompressed output data block + // Write the decompressed output data block out.write(buf, 0, n); } super.flush(); @@ -200,12 +221,18 @@ public class InflaterOutputStream extends FilterOutputStream { } /** - * Finishes writing uncompressed data to the output stream without closing - * the underlying stream. Use this method when applying multiple filters in - * succession to the same output stream. + * Writes any pending buffered decompressed data to the underlying output stream, + * without closing the underlying stream. * - * @throws IOException if an I/O error occurs or this stream is already - * closed + * @implSpec This method calls {@link #flush()} to write any pending buffered + * decompressed data. + *

+ * If this {@code InflaterOutputStream} was created without specifying + * a {@linkplain Inflater decompressor}, then this method closes the decompressor + * that was created at construction time. The {@code InflaterOutputStream} cannot + * then be used for any further writes. + * + * @throws IOException if an I/O error occurs or this stream is already closed */ public void finish() throws IOException { ensureOpen(); @@ -214,11 +241,12 @@ public class InflaterOutputStream extends FilterOutputStream { flush(); if (usesDefaultInflater) { inf.end(); + this.defaultInflaterClosed = true; } } /** - * Writes a byte to the uncompressed output stream. + * Writes a byte to the decompressed output stream. * * @param b a single byte of compressed data to decompress and write to * the output stream @@ -234,7 +262,7 @@ public class InflaterOutputStream extends FilterOutputStream { } /** - * Writes an array of bytes to the uncompressed output stream. + * Writes an array of bytes to the decompressed output stream. * * @param b buffer containing compressed data to decompress and write to * the output stream @@ -251,15 +279,22 @@ public class InflaterOutputStream extends FilterOutputStream { public void write(byte[] b, int off, int len) throws IOException { // Sanity checks ensureOpen(); + // check if this InflaterOutputStream had constructed its own + // Inflater at construction time and has been + // rendered unusable for writes due to finish() being called + // on it. + if (usesDefaultInflater && defaultInflaterClosed) { + throw new IOException("Inflater closed"); + } if (b == null) { - throw new NullPointerException("Null buffer for read"); + throw new NullPointerException("Null input buffer"); } Objects.checkFromIndexSize(off, len, b.length); if (len == 0) { return; } - // Write uncompressed data to the output stream + // Write decompressed data to the output stream try { for (;;) { int n; diff --git a/src/java.base/share/classes/java/util/zip/ZipEntry.java b/src/java.base/share/classes/java/util/zip/ZipEntry.java index bf0bf55ff98..0206d2a5154 100644 --- a/src/java.base/share/classes/java/util/zip/ZipEntry.java +++ b/src/java.base/share/classes/java/util/zip/ZipEntry.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1995, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1995, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -651,8 +651,9 @@ public class ZipEntry implements ZipConstants, Cloneable { } /** - * Sets the optional comment string for the entry. - * @param comment the comment string + * Sets the optional comment string for the entry. If {@code comment} is an + * empty string or {@code null} then the entry will have no comment. + * @param comment the comment string, or an empty string or null for no comment * @throws IllegalArgumentException if the combined length * of the specified entry comment, the {@linkplain #getName() entry name}, * the {@linkplain #getExtra() extra field data}, and the diff --git a/src/java.base/share/classes/java/util/zip/ZipOutputStream.java b/src/java.base/share/classes/java/util/zip/ZipOutputStream.java index 47499858a37..d79b0a1bd9c 100644 --- a/src/java.base/share/classes/java/util/zip/ZipOutputStream.java +++ b/src/java.base/share/classes/java/util/zip/ZipOutputStream.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -43,6 +43,10 @@ import sun.nio.cs.UTF_8; *

Unless otherwise noted, passing a {@code null} argument to a constructor * or method in this class will cause a {@link NullPointerException} to be * thrown. + *

By default, the UTF-8 charset is used to encode entry names and comments. + * {@link #ZipOutputStream(OutputStream, Charset)} may be be used to specify + * an alternative charset. + * * @author David Connelly * @since 1.1 */ @@ -110,10 +114,8 @@ public class ZipOutputStream extends DeflaterOutputStream implements ZipConstant public static final int DEFLATED = ZipEntry.DEFLATED; /** - * Creates a new ZIP output stream. - * - *

The UTF-8 {@link java.nio.charset.Charset charset} is used - * to encode the entry names and comments. + * Creates a new ZIP output stream using the UTF-8 + * {@link Charset charset} to encode entry names and comments. * * @param out the actual output stream */ @@ -122,12 +124,13 @@ public class ZipOutputStream extends DeflaterOutputStream implements ZipConstant } /** - * Creates a new ZIP output stream. + * Creates a new ZIP output stream using the specified + * {@link Charset charset} to encode entry names and comments. * * @param out the actual output stream * * @param charset the {@linkplain java.nio.charset.Charset charset} - * to be used to encode the entry names and comments + * to be used to encode entry names and comments * * @since 1.7 */ @@ -140,10 +143,15 @@ public class ZipOutputStream extends DeflaterOutputStream implements ZipConstant } /** - * Sets the ZIP file comment. - * @param comment the comment string - * @throws IllegalArgumentException if the length of the specified - * ZIP file comment is greater than 0xFFFF bytes + * Sets the ZIP file comment. If {@code comment} is an empty string or + * {@code null} then the output will have no ZIP file comment. + * + * @param comment the comment string, or an empty string or null for no comment + * + * @throws IllegalArgumentException if the length of the specified ZIP file + * comment is greater than 0xFFFF bytes or if the {@code comment} + * contains characters that cannot be mapped by the {@code Charset} + * used to encode entry names and comments */ public void setComment(String comment) { byte[] bytes = null; @@ -257,6 +265,11 @@ public class ZipOutputStream extends DeflaterOutputStream implements ZipConstant default: throw new ZipException("unsupported compression method"); } + // Verify that entry name and comment can be encoded + byte[] nameBytes = checkEncodable(e.name, "unmappable character in ZIP entry name"); + if (e.comment != null) { + checkEncodable(e.comment, "unmappable character in ZIP entry comment"); + } if (! names.add(e.name)) { throw new ZipException("duplicate entry: " + e.name); } @@ -270,7 +283,16 @@ public class ZipOutputStream extends DeflaterOutputStream implements ZipConstant } current = new XEntry(e, written); xentries.add(current); - writeLOC(current); + writeLOC(current, nameBytes); + } + + // Throws ZipException if the given string cannot be encoded + private byte[] checkEncodable(String str, String msg) throws ZipException { + try { + return zc.getBytes(str); + } catch (IllegalArgumentException ex) { + throw (ZipException) new ZipException(msg).initCause(ex); + } } /** @@ -424,7 +446,7 @@ public class ZipOutputStream extends DeflaterOutputStream implements ZipConstant /* * Writes local file (LOC) header for specified entry. */ - private void writeLOC(XEntry xentry) throws IOException { + private void writeLOC(XEntry xentry, byte[] nameBytes) throws IOException { ZipEntry e = xentry.entry; int flag = e.flag; boolean hasZip64 = false; @@ -461,7 +483,6 @@ public class ZipOutputStream extends DeflaterOutputStream implements ZipConstant writeInt(e.size); // uncompressed size } } - byte[] nameBytes = zc.getBytes(e.name); writeShort(nameBytes.length); int elenEXTT = 0; // info-zip extended timestamp diff --git a/src/java.base/share/classes/jdk/internal/access/JavaSecurityPropertiesAccess.java b/src/java.base/share/classes/jdk/internal/access/JavaSecurityPropertiesAccess.java index a4875f357e3..2d9dbea052a 100644 --- a/src/java.base/share/classes/jdk/internal/access/JavaSecurityPropertiesAccess.java +++ b/src/java.base/share/classes/jdk/internal/access/JavaSecurityPropertiesAccess.java @@ -29,4 +29,5 @@ import java.util.Properties; public interface JavaSecurityPropertiesAccess { Properties getInitialProperties(); + Properties getCurrentProperties(); } diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/DirectClassBuilder.java b/src/java.base/share/classes/jdk/internal/classfile/impl/DirectClassBuilder.java index 0e82c545359..79c623bc31d 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/DirectClassBuilder.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/DirectClassBuilder.java @@ -168,7 +168,6 @@ public final class DirectClassBuilder this.sizeHint = sizeHint; } - public byte[] build() { // The logic of this is very carefully ordered. We want to avoid diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/verifier/VerificationTable.java b/src/java.base/share/classes/jdk/internal/classfile/impl/verifier/VerificationTable.java index 04276b8eeb8..eb3f5ee913d 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/verifier/VerificationTable.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/verifier/VerificationTable.java @@ -321,7 +321,7 @@ class VerificationTable { return frame; } int offset_delta = _stream.get_u2(); - if (frame_type < SAME_LOCALS_1_STACK_ITEM_EXTENDED) { + if (frame_type <= RESERVED_END) { _verifier.classError("reserved frame type"); } if (frame_type == SAME_LOCALS_1_STACK_ITEM_EXTENDED) { diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/verifier/VerifierImpl.java b/src/java.base/share/classes/jdk/internal/classfile/impl/verifier/VerifierImpl.java index 07406b2ee7f..adc595813ee 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/verifier/VerifierImpl.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/verifier/VerifierImpl.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1047,13 +1047,11 @@ public final class VerifierImpl { no_control_flow = false; break; case IF_ACMPEQ : case IF_ACMPNE : - current_frame.pop_stack( - VerificationType.reference_check); + current_frame.pop_stack(object_type()); // fall through case IFNULL : case IFNONNULL : - current_frame.pop_stack( - VerificationType.reference_check); + current_frame.pop_stack(object_type()); target = bcs.dest(); stackmap_table.check_jump_target (current_frame, target); diff --git a/src/java.base/share/classes/jdk/internal/javac/PreviewFeature.java b/src/java.base/share/classes/jdk/internal/javac/PreviewFeature.java index 5942cefa2a1..a698440c15d 100644 --- a/src/java.base/share/classes/jdk/internal/javac/PreviewFeature.java +++ b/src/java.base/share/classes/jdk/internal/javac/PreviewFeature.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -64,25 +64,6 @@ public @interface PreviewFeature { * Values should be annotated with the feature's {@code JEP}. */ public enum Feature { - // The JDK build process involves creating an interim javac which is then - // used to compile the rest of the JDK. The jdk.internal.javac.PreviewFeature - // annotation from the current sources is used when compiling interim javac. - // That's because the javac APIs of the current sources may be annotated with - // this annotation and they may be using the enum constants of the current sources. - // Furthermore, when compiling interim javac, the class files from the bootstrap JDK get - // used and those may also contain the PreviewFeature annotation. However, they may be - // using the enum constants of the bootstrap JDK's PreviewFeature annotation. - // If javac sees an annotation with an unknown enum constant, it produces a warning, - // and that in turn fails the build. - // So, in the current sources, we need to preserve the PreviewFeature enum constants - // for as long as the interim javac build needs it. As a result, we retain PreviewFeature - // enum constants for preview features that are present in the bootstrap JDK. - // Older constants can be removed. - // - // For example, Class-File API became final in JDK 24. As soon as JDK 23 was dropped as - // the bootstrap JDK, the CLASSFILE_API enum constant became eligible for removal. - - //--- @JEP(number=525, title="Structured Concurrency", status="Sixth Preview") STRUCTURED_CONCURRENCY, @JEP(number = 526, title = "Lazy Constants", status = "Second Preview") diff --git a/src/java.base/share/classes/jdk/internal/jimage/ImageReader.java b/src/java.base/share/classes/jdk/internal/jimage/ImageReader.java index c36e265ee2f..4c358820166 100644 --- a/src/java.base/share/classes/jdk/internal/jimage/ImageReader.java +++ b/src/java.base/share/classes/jdk/internal/jimage/ImageReader.java @@ -821,6 +821,7 @@ public final class ImageReader implements AutoCloseable { this.children = Collections.unmodifiableList(children); } } + /** * Resource node (e.g. a ".class" entry, or any other data resource). * diff --git a/src/java.base/share/classes/jdk/internal/lang/CaseFolding.java.template b/src/java.base/share/classes/jdk/internal/lang/CaseFolding.java.template index 24a183c8da0..24f48151f21 100644 --- a/src/java.base/share/classes/jdk/internal/lang/CaseFolding.java.template +++ b/src/java.base/share/classes/jdk/internal/lang/CaseFolding.java.template @@ -1,5 +1,5 @@ /* - * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -188,6 +188,12 @@ public final class CaseFolding { } private static long getDefined(int cp) { + // Exclude code point U+0000, which is guaranteed to have no + // case-folding mapping. + if (cp == 0) { + return -1; + } + var hashes = CASE_FOLDING_HASHES; var length = CASE_FOLDING_CPS.length; // hashed based on total defined. var hash = cp % length; diff --git a/src/java.base/share/classes/jdk/internal/misc/MethodFinder.java b/src/java.base/share/classes/jdk/internal/misc/MethodFinder.java index 60895b8115a..1ee608f2caf 100644 --- a/src/java.base/share/classes/jdk/internal/misc/MethodFinder.java +++ b/src/java.base/share/classes/jdk/internal/misc/MethodFinder.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ package jdk.internal.misc; import java.lang.reflect.Method; import java.lang.reflect.Modifier; +import java.util.Objects; import jdk.internal.access.JavaLangAccess; import jdk.internal.access.SharedSecrets; @@ -88,21 +89,27 @@ public class MethodFinder { mainMethod = JLA.findMethod(cls, false, "main", String[].class); } - if (mainMethod == null || !isValidMainMethod(mainMethod)) { + if (mainMethod == null || !isValidMainMethod(cls, mainMethod)) { mainMethod = JLA.findMethod(cls, false, "main"); } - if (mainMethod == null || !isValidMainMethod(mainMethod)) { + if (mainMethod == null || !isValidMainMethod(cls, mainMethod)) { return null; } return mainMethod; } - private static boolean isValidMainMethod(Method mainMethodCandidate) { + private static boolean isValidMainMethod(Class initialClass, Method mainMethodCandidate) { return mainMethodCandidate.getReturnType() == void.class && - !Modifier.isPrivate(mainMethodCandidate.getModifiers()); - + !Modifier.isPrivate(mainMethodCandidate.getModifiers()) && + (Modifier.isPublic(mainMethodCandidate.getModifiers()) || + Modifier.isProtected(mainMethodCandidate.getModifiers()) || + isInSameRuntimePackage(initialClass, mainMethodCandidate.getDeclaringClass())); } + private static boolean isInSameRuntimePackage(Class c1, Class c2) { + return Objects.equals(c1.getPackageName(), c2.getPackageName()) && + c1.getClassLoader() == c2.getClassLoader(); + } } diff --git a/src/java.base/share/classes/jdk/internal/util/ArraysSupport.java b/src/java.base/share/classes/jdk/internal/util/ArraysSupport.java index c220455e80b..3bd2486fa39 100644 --- a/src/java.base/share/classes/jdk/internal/util/ArraysSupport.java +++ b/src/java.base/share/classes/jdk/internal/util/ArraysSupport.java @@ -478,7 +478,7 @@ public class ArraysSupport { // Bytes /** - * Find the index of a mismatch between two arrays. + * Find the smallest index of a mismatch between two arrays. * *

This method does not perform bounds checks. It is the responsibility * of the caller to perform such bounds checks before calling this method. @@ -486,9 +486,9 @@ public class ArraysSupport { * @param a the first array to be tested for a mismatch * @param b the second array to be tested for a mismatch * @param length the number of bytes from each array to check - * @return the index of a mismatch between the two arrays, otherwise -1 if - * no mismatch. The index will be within the range of (inclusive) 0 to - * (exclusive) the smaller of the two array lengths. + * @return the smallest index of a mismatch between the two arrays, + * otherwise -1 if no mismatch. The index will be within the range of + * (inclusive) 0 to (exclusive) the smaller of the two array lengths. */ public static int mismatch(byte[] a, byte[] b, @@ -520,8 +520,8 @@ public class ArraysSupport { } /** - * Find the relative index of a mismatch between two arrays starting from - * given indexes. + * Find the smallest relative index of a mismatch between two arrays + * starting from given indexes. * *

This method does not perform bounds checks. It is the responsibility * of the caller to perform such bounds checks before calling this method. @@ -533,7 +533,7 @@ public class ArraysSupport { * @param bFromIndex the index of the first element (inclusive) in the * second array to be compared * @param length the number of bytes from each array to check - * @return the relative index of a mismatch between the two arrays, + * @return the smallest relative index of a mismatch between the two arrays, * otherwise -1 if no mismatch. The index will be within the range of * (inclusive) 0 to (exclusive) the smaller of the two array bounds. */ diff --git a/src/java.base/share/classes/jdk/internal/vm/ThreadDumper.java b/src/java.base/share/classes/jdk/internal/vm/ThreadDumper.java index 276c379a564..fa7d4bab076 100644 --- a/src/java.base/share/classes/jdk/internal/vm/ThreadDumper.java +++ b/src/java.base/share/classes/jdk/internal/vm/ThreadDumper.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -250,18 +250,43 @@ public class ThreadDumper { } } + /** + * JSON is schema-less and the thread dump format will evolve over time. + * {@code HotSpotDiagnosticMXBean.dumpThreads} links to a JSON file that documents + * the latest/current format. A system property can be used to generate the thread + * dump in older formats if necessary. + */ + private static class JsonFormat { + private static final String JSON_FORMAT_VERSION_PROP = + "com.sun.management.HotSpotDiagnosticMXBean.dumpThreads.format"; + static final int JSON_FORMAT_V1 = 1; + static final int JSON_FORMAT_V2 = 2; + private static final int JSON_FORMAT_LATEST = JSON_FORMAT_V2; + private static final int JSON_FORMAT; + static { + int ver = Integer.getInteger(JSON_FORMAT_VERSION_PROP, JSON_FORMAT_LATEST); + JSON_FORMAT = Math.clamp(ver, JSON_FORMAT_V1, JSON_FORMAT_LATEST); + } + + static int formatVersion() { + return JSON_FORMAT; + } + } + /** * Generate a thread dump to the given text stream in JSON format. * @throws UncheckedIOException if an I/O error occurs */ private static void dumpThreadsToJson(TextWriter textWriter) { - var jsonWriter = new JsonWriter(textWriter); - + int format = JsonFormat.formatVersion(); + var jsonWriter = new JsonWriter(textWriter, (format == JsonFormat.JSON_FORMAT_V1)); jsonWriter.startObject(); // top-level object - jsonWriter.startObject("threadDump"); + if (format > JsonFormat.JSON_FORMAT_V1) { + jsonWriter.writeProperty("formatVersion", format); + } - jsonWriter.writeProperty("processId", processId()); + jsonWriter.writeLongProperty("processId", processId()); jsonWriter.writeProperty("time", Instant.now()); jsonWriter.writeProperty("runtimeVersion", Runtime.version()); @@ -284,7 +309,11 @@ public class ThreadDumper { jsonWriter.writeProperty("parent", container.parent()); Thread owner = container.owner(); - jsonWriter.writeProperty("owner", (owner != null) ? owner.threadId() : null); + if (owner != null) { + jsonWriter.writeLongProperty("owner", owner.threadId()); + } else { + jsonWriter.writeProperty("owner", null); // owner is not optional + } long threadCount = 0; jsonWriter.startArray("threads"); @@ -301,7 +330,7 @@ public class ThreadDumper { if (!ThreadContainers.trackAllThreads()) { threadCount = Long.max(threadCount, container.threadCount()); } - jsonWriter.writeProperty("threadCount", threadCount); + jsonWriter.writeLongProperty("threadCount", threadCount); jsonWriter.endObject(); @@ -324,7 +353,7 @@ public class ThreadDumper { StackTraceElement[] stackTrace = snapshot.stackTrace(); jsonWriter.startObject(); - jsonWriter.writeProperty("tid", thread.threadId()); + jsonWriter.writeLongProperty("tid", thread.threadId()); jsonWriter.writeProperty("time", now); if (thread.isVirtual()) { jsonWriter.writeProperty("virtual", Boolean.TRUE); @@ -339,7 +368,7 @@ public class ThreadDumper { jsonWriter.startObject("parkBlocker"); jsonWriter.writeProperty("object", Objects.toIdentityString(parkBlocker)); if (snapshot.parkBlockerOwner() instanceof Thread owner) { - jsonWriter.writeProperty("owner", owner.threadId()); + jsonWriter.writeLongProperty("owner", owner.threadId()); } jsonWriter.endObject(); } @@ -380,7 +409,7 @@ public class ThreadDumper { // thread identifier of carrier, when mounted if (thread.isVirtual() && snapshot.carrierThread() instanceof Thread carrier) { - jsonWriter.writeProperty("carrier", carrier.threadId()); + jsonWriter.writeLongProperty("carrier", carrier.threadId()); } jsonWriter.endObject(); @@ -411,10 +440,12 @@ public class ThreadDumper { } } private final Deque stack = new ArrayDeque<>(); + private final boolean generateLongsAsString; private final TextWriter writer; - JsonWriter(TextWriter writer) { + JsonWriter(TextWriter writer, boolean generateLongsAsString) { this.writer = writer; + this.generateLongsAsString = generateLongsAsString; } private void indent() { @@ -461,6 +492,7 @@ public class ThreadDumper { */ void writeProperty(String name, Object obj) { Node node = stack.peek(); + assert node != null; if (node.getAndIncrementPropertyCount() > 0) { writer.println(","); } @@ -469,8 +501,6 @@ public class ThreadDumper { writer.print("\"" + name + "\": "); } switch (obj) { - // Long may be larger than safe range of JSON integer value - case Long _ -> writer.print("\"" + obj + "\""); case Number _ -> writer.print(obj); case Boolean _ -> writer.print(obj); case null -> writer.print("null"); @@ -478,6 +508,19 @@ public class ThreadDumper { } } + /** + * Write a property with a long value. If the value is outside the "interop" + * range of IEEE-754 double-precision floating point (64-bit) then it is + * written as a string. + */ + void writeLongProperty(String name, long value) { + if (generateLongsAsString || value < -0x1FFFFFFFFFFFFFL || value > 0x1FFFFFFFFFFFFFL) { + writeProperty(name, Long.toString(value)); + } else { + writeProperty(name, value); + } + } + /** * Write an unnamed property. */ diff --git a/src/java.base/share/classes/jdk/internal/vm/VMSupport.java b/src/java.base/share/classes/jdk/internal/vm/VMSupport.java index 197da0d456c..32c358340af 100644 --- a/src/java.base/share/classes/jdk/internal/vm/VMSupport.java +++ b/src/java.base/share/classes/jdk/internal/vm/VMSupport.java @@ -98,6 +98,11 @@ public class VMSupport { return serializePropertiesToByteArray(onlyStrings(System.getProperties())); } + public static byte[] serializeSecurityPropertiesToByteArray() throws IOException { + Properties p = SharedSecrets.getJavaSecurityPropertiesAccess().getCurrentProperties(); + return serializePropertiesToByteArray(onlyStrings(p)); + } + public static byte[] serializeAgentPropertiesToByteArray() throws IOException { return serializePropertiesToByteArray(onlyStrings(getAgentProperties())); } diff --git a/src/java.base/share/classes/module-info.java b/src/java.base/share/classes/module-info.java index d20f6311bca..665b3a3b98d 100644 --- a/src/java.base/share/classes/module-info.java +++ b/src/java.base/share/classes/module-info.java @@ -135,7 +135,6 @@ module java.base { exports javax.security.auth.x500; exports javax.security.cert; - // additional qualified exports may be inserted at build time // see make/gensrc/GenModuleInfo.gmk @@ -147,11 +146,11 @@ module java.base { java.security.sasl; exports jdk.internal to jdk.incubator.vector; - // Note: all modules in the exported list participate in preview features - // and therefore if they use preview features they do not need to be - // compiled with "--enable-preview". + // Note: all modules in the exported list participate in preview features, + // normal or reflective. They do not need to be compiled with "--enable-preview" + // to use preview features and do not need to suppress "preview" warnings. // It is recommended for any modules that do participate that their - // module declaration be annotated with jdk.internal.javac.ParticipatesInPreview + // module declaration be annotated with jdk.internal.javac.ParticipatesInPreview. exports jdk.internal.javac to java.compiler, jdk.compiler; diff --git a/src/java.base/share/classes/sun/net/www/protocol/http/HttpURLConnection.java b/src/java.base/share/classes/sun/net/www/protocol/http/HttpURLConnection.java index 3a915cf96df..480553e9a62 100644 --- a/src/java.base/share/classes/sun/net/www/protocol/http/HttpURLConnection.java +++ b/src/java.base/share/classes/sun/net/www/protocol/http/HttpURLConnection.java @@ -1924,9 +1924,15 @@ public class HttpURLConnection extends java.net.HttpURLConnection { } statusLine = responses.getValue(0); - StringTokenizer st = new StringTokenizer(statusLine); - st.nextToken(); - respCode = Integer.parseInt(st.nextToken().trim()); + respCode = parseConnectResponseCode(statusLine); + if (respCode == -1) { + // a respCode of -1, due to a invalid status line, + // will (rightly) result in an IOException being thrown + // later in this code. here we merely log the invalid status line. + if (logger.isLoggable(PlatformLogger.Level.FINE)) { + logger.fine("invalid status line: \"" + statusLine + "\""); + } + } if (respCode == HTTP_PROXY_AUTH) { // Read comments labeled "Failed Negotiate" for details. boolean dontUseNegotiate = false; @@ -2027,6 +2033,37 @@ public class HttpURLConnection extends java.net.HttpURLConnection { responses.reset(); } + // parses the status line, that was returned for a CONNECT request, and returns + // the response code from that line. returns -1 if the response code could not be + // parsed. + private static int parseConnectResponseCode(final String statusLine) { + final int invalidStatusLine = -1; + if (statusLine == null || statusLine.isBlank()) { + return invalidStatusLine; + } + // + // status-line = HTTP-version SP status-code SP [ reason-phrase ] + // SP = space character + // + final StringTokenizer st = new StringTokenizer(statusLine, " "); + if (!st.hasMoreTokens()) { + return invalidStatusLine; + } + st.nextToken(); // the HTTP version part (ex: HTTP/1.1) + if (!st.hasMoreTokens()) { + return invalidStatusLine; + } + final String v = st.nextToken().trim(); // status code + try { + return Integer.parseInt(v); + } catch (NumberFormatException nfe) { + if (logger.isLoggable(PlatformLogger.Level.FINE)) { + logger.fine("invalid response code: " + v); + } + } + return invalidStatusLine; + } + /** * Overridden in https to also include the server certificate */ diff --git a/src/java.base/share/classes/sun/nio/cs/UTF_32Coder.java b/src/java.base/share/classes/sun/nio/cs/UTF_32Coder.java index c6f38ec9bfc..72e59d22e2c 100644 --- a/src/java.base/share/classes/sun/nio/cs/UTF_32Coder.java +++ b/src/java.base/share/classes/sun/nio/cs/UTF_32Coder.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -185,5 +185,12 @@ class UTF_32Coder { doneBOM = !doBOM; } + public boolean canEncode(char c) { + return !Character.isSurrogate(c); + } + + public boolean canEncode(CharSequence cs) { + return Unicode.isValidUnicode(cs); + } } } diff --git a/src/java.base/share/classes/sun/nio/cs/UTF_8.java b/src/java.base/share/classes/sun/nio/cs/UTF_8.java index 2928ae6d509..fda8e5eec1f 100644 --- a/src/java.base/share/classes/sun/nio/cs/UTF_8.java +++ b/src/java.base/share/classes/sun/nio/cs/UTF_8.java @@ -424,6 +424,10 @@ public final class UTF_8 extends Unicode { return !Character.isSurrogate(c); } + public boolean canEncode(CharSequence cs) { + return Unicode.isValidUnicode(cs); + } + public boolean isLegalReplacement(byte[] repl) { return ((repl.length == 1 && repl[0] >= 0) || super.isLegalReplacement(repl)); diff --git a/src/java.base/share/classes/sun/nio/cs/Unicode.java b/src/java.base/share/classes/sun/nio/cs/Unicode.java index aac77a13ffb..06a50f125c5 100644 --- a/src/java.base/share/classes/sun/nio/cs/Unicode.java +++ b/src/java.base/share/classes/sun/nio/cs/Unicode.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -95,4 +95,23 @@ abstract class Unicode extends Charset || (cs.name().equals("x-Johab")) || (cs.name().equals("Shift_JIS"))); } + + static boolean isValidUnicode(CharSequence cs) { + int length = cs.length(); + for (int i = 0; i < length;) { + char c = cs.charAt(i++); + if (Character.isHighSurrogate(c)) { + if (i == length) { + return false; + } + char low = cs.charAt(i++); + if (!Character.isLowSurrogate(low)) { + return false; + } + } else if (Character.isLowSurrogate(c)) { + return false; + } + } + return true; + } } diff --git a/src/java.base/share/classes/sun/nio/cs/UnicodeEncoder.java b/src/java.base/share/classes/sun/nio/cs/UnicodeEncoder.java index 7b34fb2d512..6f7413dcbf8 100644 --- a/src/java.base/share/classes/sun/nio/cs/UnicodeEncoder.java +++ b/src/java.base/share/classes/sun/nio/cs/UnicodeEncoder.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -108,4 +108,8 @@ public abstract class UnicodeEncoder extends CharsetEncoder { public boolean canEncode(char c) { return ! Character.isSurrogate(c); } + + public boolean canEncode(CharSequence cs) { + return Unicode.isValidUnicode(cs); + } } diff --git a/src/java.base/share/classes/sun/security/ec/XDHPublicKeyImpl.java b/src/java.base/share/classes/sun/security/ec/XDHPublicKeyImpl.java index e6f8961f412..e161880f883 100644 --- a/src/java.base/share/classes/sun/security/ec/XDHPublicKeyImpl.java +++ b/src/java.base/share/classes/sun/security/ec/XDHPublicKeyImpl.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -54,7 +54,11 @@ public final class XDHPublicKeyImpl extends X509Key implements XECPublicKey { this.paramSpec = new NamedParameterSpec(params.getName()); this.algid = new AlgorithmId(params.getOid()); - this.u = u.mod(params.getP()); + + // RFC 7748 Section 5 requires the MSB of `u` to be zeroed for X25519 + this.u = (params == XECParameters.X448) ? + u.mod(params.getP()) : + u.clearBit(255).mod(params.getP()); byte[] u_arr = this.u.toByteArray(); reverse(u_arr); @@ -72,6 +76,7 @@ public final class XDHPublicKeyImpl extends X509Key implements XECPublicKey { XECParameters params = XECParameters.get(InvalidKeyException::new, algid); this.paramSpec = new NamedParameterSpec(params.getName()); + // construct the BigInteger representation byte[] u_arr = getKey().toByteArray(); reverse(u_arr); diff --git a/src/java.base/share/classes/sun/security/ec/XECOperations.java b/src/java.base/share/classes/sun/security/ec/XECOperations.java index dd8aa482cc3..0ea39a10d40 100644 --- a/src/java.base/share/classes/sun/security/ec/XECOperations.java +++ b/src/java.base/share/classes/sun/security/ec/XECOperations.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -89,7 +89,11 @@ public class XECOperations { */ public byte[] encodedPointMultiply(byte[] k, BigInteger u) { pruneK(k); - ImmutableIntegerModuloP elemU = field.getElement(u); + + ImmutableIntegerModuloP elemU = (params == XECParameters.X448) ? + field.getElement(u) : + field.getElement(u.clearBit(255)); + return pointMultiply(k, elemU).asByteArray(params.getBytes()); } diff --git a/src/java.base/share/classes/sun/security/provider/DigestBase.java b/src/java.base/share/classes/sun/security/provider/DigestBase.java index 2aaf0a2fac6..0bb15ef3efe 100644 --- a/src/java.base/share/classes/sun/security/provider/DigestBase.java +++ b/src/java.base/share/classes/sun/security/provider/DigestBase.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -242,4 +242,21 @@ abstract class DigestBase extends MessageDigestSpi implements Cloneable { padding = new byte[136]; padding[0] = (byte)0x80; } + + /** + * Digest block-length bytes in a single operation. + * Subclasses are expected to override this method. It is intended + * for fixed-length short input where input includes padding bytes. + * @param input byte array to be digested + * @param inLen the length of the input + * @param output the output buffer + * @param outOffset the offset into output buffer where digest should be written + * @param outLen the length of the output buffer + * @throws UnsupportedOperationException if a subclass does not override this method + */ + void implDigestFixedLengthPreprocessed ( + byte[] input, int inLen, byte[] output, int outOffset, int outLen) + throws UnsupportedOperationException { + throw new UnsupportedOperationException("should not be here"); + } } diff --git a/src/java.base/share/classes/sun/security/provider/HSS.java b/src/java.base/share/classes/sun/security/provider/HSS.java index c1cb5ed6a30..50afba7cab8 100644 --- a/src/java.base/share/classes/sun/security/provider/HSS.java +++ b/src/java.base/share/classes/sun/security/provider/HSS.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,16 +24,22 @@ */ package sun.security.provider; +import java.io.ByteArrayOutputStream; +import java.io.InvalidObjectException; +import java.io.Serial; +import java.io.Serializable; +import java.security.SecureRandom; +import java.security.*; +import java.security.spec.AlgorithmParameterSpec; +import java.security.spec.InvalidKeySpecException; +import java.security.spec.KeySpec; +import java.security.spec.X509EncodedKeySpec; +import java.util.Arrays; + import sun.security.util.*; import sun.security.x509.AlgorithmId; import sun.security.x509.X509Key; -import java.io.*; -import java.security.*; -import java.security.SecureRandom; -import java.security.spec.*; -import java.util.Arrays; - /** * Implementation of the Hierarchical Signature System using the * Leighton-Micali Signatures (HSS/LMS) as described in RFC 8554 and @@ -196,42 +202,94 @@ public final class HSS extends SignatureSpi { static class LMSUtils { static final int LMS_RESERVED = 0; - static final int LMS_SHA256_M32_H5 = 5; - static final int LMS_SHA256_M32_H10 = 6; - static final int LMS_SHA256_M32_H15 = 7; - static final int LMS_SHA256_M32_H20 = 8; - static final int LMS_SHA256_M32_H25 = 9; + static final int LMS_SHA256_M32_H5 = 0x05; + static final int LMS_SHA256_M32_H10 = 0x06; + static final int LMS_SHA256_M32_H15 = 0x07; + static final int LMS_SHA256_M32_H20 = 0x08; + static final int LMS_SHA256_M32_H25 = 0x09; + static final int LMS_SHA256_M24_H5 = 0x0a; + static final int LMS_SHA256_M24_H10 = 0x0b; + static final int LMS_SHA256_M24_H15 = 0x0c; + static final int LMS_SHA256_M24_H20 = 0x0d; + static final int LMS_SHA256_M24_H25 = 0x0e; + static final int LMS_SHAKE_M32_H5 = 0x0f; + static final int LMS_SHAKE_M32_H10 = 0x10; + static final int LMS_SHAKE_M32_H15 = 0x11; + static final int LMS_SHAKE_M32_H20 = 0x12; + static final int LMS_SHAKE_M32_H25 = 0x13; + static final int LMS_SHAKE_M24_H5 = 0x14; + static final int LMS_SHAKE_M24_H10 = 0x15; + static final int LMS_SHAKE_M24_H15 = 0x16; + static final int LMS_SHAKE_M24_H20 = 0x17; + static final int LMS_SHAKE_M24_H25 = 0x18; static String lmsType(int type) { - String typeStr; - switch (type) { - case LMS_RESERVED: typeStr = "LMS_RESERVED"; break; - case LMS_SHA256_M32_H5: typeStr = "LMS_SHA256_M32_H5"; break; - case LMS_SHA256_M32_H10: typeStr = "LMS_SHA256_M32_H10"; break; - case LMS_SHA256_M32_H15: typeStr = "LMS_SHA256_M32_H15"; break; - case LMS_SHA256_M32_H20: typeStr = "LMS_SHA256_M32_H20"; break; - case LMS_SHA256_M32_H25: typeStr = "LMS_SHA256_M32_H25"; break; - default: typeStr = "unrecognized"; - } + String typeStr = switch (type) { + case LMS_RESERVED -> "LMS_RESERVED"; + case LMS_SHA256_M32_H5 -> "LMS_SHA256_M32_H5"; + case LMS_SHA256_M32_H10 -> "LMS_SHA256_M32_H10"; + case LMS_SHA256_M32_H15 -> "LMS_SHA256_M32_H15"; + case LMS_SHA256_M32_H20 -> "LMS_SHA256_M32_H20"; + case LMS_SHA256_M32_H25 -> "LMS_SHA256_M32_H25"; + case LMS_SHA256_M24_H5 -> "LMS_SHA256_M24_H5"; + case LMS_SHA256_M24_H10 -> "LMS_SHA256_M24_H10"; + case LMS_SHA256_M24_H15 -> "LMS_SHA256_M24_H15"; + case LMS_SHA256_M24_H20 -> "LMS_SHA256_M24_H20"; + case LMS_SHA256_M24_H25 -> "LMS_SHA256_M24_H25"; + case LMS_SHAKE_M32_H5 -> "LMS_SHAKE_M32_H5"; + case LMS_SHAKE_M32_H10 -> "LMS_SHAKE_M32_H10"; + case LMS_SHAKE_M32_H15 -> "LMS_SHAKE_M32_H15"; + case LMS_SHAKE_M32_H20 -> "LMS_SHAKE_M32_H20"; + case LMS_SHAKE_M32_H25 -> "LMS_SHAKE_M32_H25"; + case LMS_SHAKE_M24_H5 -> "LMS_SHAKE_M24_H5"; + case LMS_SHAKE_M24_H10 -> "LMS_SHAKE_M24_H10"; + case LMS_SHAKE_M24_H15 -> "LMS_SHAKE_M24_H15"; + case LMS_SHAKE_M24_H20 -> "LMS_SHAKE_M24_H20"; + case LMS_SHAKE_M24_H25 -> "LMS_SHAKE_M24_H25"; + default -> "unrecognized"; + }; return typeStr; } static final int LMOTS_RESERVED = 0; - static final int LMOTS_SHA256_N32_W1 = 1; - static final int LMOTS_SHA256_N32_W2 = 2; - static final int LMOTS_SHA256_N32_W4 = 3; - static final int LMOTS_SHA256_N32_W8 = 4; + static final int LMOTS_SHA256_N32_W1 = 0x01; + static final int LMOTS_SHA256_N32_W2 = 0x02; + static final int LMOTS_SHA256_N32_W4 = 0x03; + static final int LMOTS_SHA256_N32_W8 = 0x04; + static final int LMOTS_SHA256_N24_W1 = 0x05; + static final int LMOTS_SHA256_N24_W2 = 0x06; + static final int LMOTS_SHA256_N24_W4 = 0x07; + static final int LMOTS_SHA256_N24_W8 = 0x08; + static final int LMOTS_SHAKE_N32_W1 = 0x09; + static final int LMOTS_SHAKE_N32_W2 = 0x0a; + static final int LMOTS_SHAKE_N32_W4 = 0x0b; + static final int LMOTS_SHAKE_N32_W8 = 0x0c; + static final int LMOTS_SHAKE_N24_W1 = 0x0d; + static final int LMOTS_SHAKE_N24_W2 = 0x0e; + static final int LMOTS_SHAKE_N24_W4 = 0x0f; + static final int LMOTS_SHAKE_N24_W8 = 0x10; static String lmotsType(int type) { - String typeStr; - switch (type) { - case LMOTS_RESERVED: typeStr = "LMOTS_RESERVED"; break; - case LMOTS_SHA256_N32_W1: typeStr = "LMOTS_SHA256_N32_W1"; break; - case LMOTS_SHA256_N32_W2: typeStr = "LMOTS_SHA256_N32_W2"; break; - case LMOTS_SHA256_N32_W4: typeStr = "LMOTS_SHA256_N32_W4"; break; - case LMOTS_SHA256_N32_W8: typeStr = "LMOTS_SHA256_N32_W8"; break; - default: typeStr = "unrecognized"; - } + String typeStr = switch (type) { + case LMOTS_RESERVED -> "LMOTS_RESERVED"; + case LMOTS_SHA256_N32_W1 -> "LMOTS_SHA256_N32_W1"; + case LMOTS_SHA256_N32_W2 -> "LMOTS_SHA256_N32_W2"; + case LMOTS_SHA256_N32_W4 -> "LMOTS_SHA256_N32_W4"; + case LMOTS_SHA256_N32_W8 -> "LMOTS_SHA256_N32_W8"; + case LMOTS_SHA256_N24_W1 -> "LMOTS_SHA256_N24_W1"; + case LMOTS_SHA256_N24_W2 -> "LMOTS_SHA256_N24_W2"; + case LMOTS_SHA256_N24_W4 -> "LMOTS_SHA256_N24_W4"; + case LMOTS_SHA256_N24_W8 -> "LMOTS_SHA256_N24_W8"; + case LMOTS_SHAKE_N32_W1 -> "LMOTS_SHAKE_N32_W1"; + case LMOTS_SHAKE_N32_W2 -> "LMOTS_SHAKE_N32_W2"; + case LMOTS_SHAKE_N32_W4 -> "LMOTS_SHAKE_N32_W4"; + case LMOTS_SHAKE_N32_W8 -> "LMOTS_SHAKE_N32_W8"; + case LMOTS_SHAKE_N24_W1 -> "LMOTS_SHAKE_N24_W1"; + case LMOTS_SHAKE_N24_W2 -> "LMOTS_SHAKE_N24_W2"; + case LMOTS_SHAKE_N24_W4 -> "LMOTS_SHAKE_N24_W4"; + case LMOTS_SHAKE_N24_W8 -> "LMOTS_SHAKE_N24_W8"; + default -> "unrecognized"; + }; return typeStr; } @@ -352,53 +410,65 @@ public final class HSS extends SignatureSpi { static class LMSParams { final int m; // the number of bytes used from the hash output - final int hashAlg_m = 32; // output length of the LMS tree hash function + final int hashAlg_m; // output length of the LMS tree hash function final int h; // height of the LMS tree final int twoPowh; final String hashAlgStr; - LMSParams(int m, int h, String hashAlgStr) { + private LMSParams(int m, int h, String hashAlgStr, int hashAlg_m) { this.m = m; this.h = h; this.hashAlgStr = hashAlgStr; + this.hashAlg_m = hashAlg_m; twoPowh = 1 << h; } static LMSParams of(int type) { - int m; - int h; - String hashAlgStr; - switch (type) { - case LMSUtils.LMS_SHA256_M32_H5: - m = 32; - h = 5; - hashAlgStr = "SHA-256"; - break; - case LMSUtils.LMS_SHA256_M32_H10: - m = 32; - h = 10; - hashAlgStr = "SHA-256"; - break; - case LMSUtils.LMS_SHA256_M32_H15: - m = 32; - h = 15; - hashAlgStr = "SHA-256"; - break; - case LMSUtils.LMS_SHA256_M32_H20: - m = 32; - h = 20; - hashAlgStr = "SHA-256"; - break; - case LMSUtils.LMS_SHA256_M32_H25: - m = 32; - h = 25; - hashAlgStr = "SHA-256"; - break; - default: + LMSParams params = switch (type) { + case LMSUtils.LMS_SHA256_M32_H5 -> + new LMSParams(32, 5, "SHA-256", 32); + case LMSUtils.LMS_SHA256_M32_H10 -> + new LMSParams(32, 10, "SHA-256", 32); + case LMSUtils.LMS_SHA256_M32_H15 -> + new LMSParams(32, 15, "SHA-256", 32); + case LMSUtils.LMS_SHA256_M32_H20 -> + new LMSParams(32, 20, "SHA-256", 32); + case LMSUtils.LMS_SHA256_M32_H25 -> + new LMSParams(32, 25, "SHA-256", 32); + case LMSUtils.LMS_SHA256_M24_H5 -> + new LMSParams(24, 5, "SHA-256", 32); + case LMSUtils.LMS_SHA256_M24_H10 -> + new LMSParams(24, 10, "SHA-256", 32); + case LMSUtils.LMS_SHA256_M24_H15 -> + new LMSParams(24, 15, "SHA-256", 32); + case LMSUtils.LMS_SHA256_M24_H20 -> + new LMSParams(24, 20, "SHA-256", 32); + case LMSUtils.LMS_SHA256_M24_H25 -> + new LMSParams(24, 25, "SHA-256", 32); + case LMSUtils.LMS_SHAKE_M32_H5 -> + new LMSParams(32, 5, "SHAKE256-512", 64); + case LMSUtils.LMS_SHAKE_M32_H10 -> + new LMSParams(32, 10, "SHAKE256-512", 64); + case LMSUtils.LMS_SHAKE_M32_H15 -> + new LMSParams(32, 15, "SHAKE256-512", 64); + case LMSUtils.LMS_SHAKE_M32_H20 -> + new LMSParams(32, 20, "SHAKE256-512", 64); + case LMSUtils.LMS_SHAKE_M32_H25 -> + new LMSParams(32, 25, "SHAKE256-512", 64); + case LMSUtils.LMS_SHAKE_M24_H5 -> + new LMSParams(24, 5, "SHAKE256-512", 64); + case LMSUtils.LMS_SHAKE_M24_H10 -> + new LMSParams(24, 10, "SHAKE256-512", 64); + case LMSUtils.LMS_SHAKE_M24_H15 -> + new LMSParams(24, 15, "SHAKE256-512", 64); + case LMSUtils.LMS_SHAKE_M24_H20 -> + new LMSParams(24, 20, "SHAKE256-512", 64); + case LMSUtils.LMS_SHAKE_M24_H25 -> + new LMSParams(24, 25, "SHAKE256-512", 64); + default -> throw new IllegalArgumentException("Unsupported or bad LMS type"); - } - - return new LMSParams(m, h, hashAlgStr); + }; + return params; } boolean hasSameHash(LMSParams other) { @@ -495,7 +565,7 @@ public final class HSS extends SignatureSpi { static class LMOTSParams { final int lmotSigType; final int n; // the number of bytes used from the hash output - final int hashAlg_n = 32; // the output length of the hash function + int hashAlg_n; // the output length of the hash function final int w; final int twoPowWMinus1; final int ls; @@ -511,6 +581,7 @@ public final class HSS extends SignatureSpi { // back into the buffer. This way, we avoid memory allocations and some // computations that would have to be done otherwise. final byte[] hashBuf; + // Precomputed block for SHA256 when the message size is 55 bytes // (i.e. when SHA256 is used) private static final byte[] hashbufSha256_32 = { @@ -523,10 +594,64 @@ public final class HSS extends SignatureSpi { 0, 0, 0, 0, 0, 0, 0, (byte) 0x80, 0, 0, 0, 0, 0, 0, 1, (byte) 0xb8 }; + // Precomputed block for SHA256 when the message size is 47 bytes + // (i.e. when SHA256-192 is used) + private static final byte[] hashbufSha256_24 = { + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, (byte) 0x80, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1, 0x78 + }; + // Precomputed block for SHAKE256 when the message size is 55 bytes + // (i.e. when SHAKE256 is used) + private static final byte[] hashbufShake256_32 = { + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, (byte) 0x1F, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, (byte) 0x80 + }; + // Precomputed block for SHAKE256 when the message size is 47 bytes + // (i.e. when SHAKE256-192 is used) + private static final byte[] hashbufShake256_24 = { + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, (byte) 0x1F, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, (byte) 0x80 + }; private LMOTSParams( int lmotSigType, int hLen, int w, - int ls, int p, String hashAlgName) { + int ls, int p, String hashAlgName, int hashAlg_n) { this.lmotSigType = lmotSigType; this.n = hLen; this.w = w; @@ -534,32 +659,60 @@ public final class HSS extends SignatureSpi { this.p = p; twoPowWMinus1 = (1 << w) - 1; this.hashAlgName = hashAlgName; - hashBuf = hashbufSha256_32; + this.hashAlg_n = hashAlg_n; + hashBuf = switch (hashAlgName) { + case "SHAKE256-512" -> { + yield this.n == 24 ? + hashbufShake256_24 : hashbufShake256_32; + } + case "SHA-256" -> { + yield this.n == 24 ? + hashbufSha256_24 : hashbufSha256_32; + } + default -> + throw new IllegalArgumentException( + "Unknown hash algorithm "+hashAlgName); + }; } static LMOTSParams of(int lmotsType) { - LMOTSParams params; - switch (lmotsType) { - case LMSUtils.LMOTS_SHA256_N32_W1: - params = new LMOTSParams( - lmotsType, 32, 1, 7, 265, "SHA-256"); - break; - case LMSUtils.LMOTS_SHA256_N32_W2: - params = new LMOTSParams( - lmotsType, 32, 2, 6, 133, "SHA-256"); - break; - case LMSUtils.LMOTS_SHA256_N32_W4: - params = new LMOTSParams( - lmotsType, 32, 4, 4, 67, "SHA-256"); - break; - case LMSUtils.LMOTS_SHA256_N32_W8: - params = new LMOTSParams( - lmotsType, 32, 8, 0, 34, "SHA-256"); - break; - default: + LMOTSParams params = switch (lmotsType) { + case LMSUtils.LMOTS_SHA256_N32_W1 -> + new LMOTSParams(lmotsType, 32, 1, 7, 265, "SHA-256", 32); + case LMSUtils.LMOTS_SHA256_N32_W2 -> + new LMOTSParams(lmotsType, 32, 2, 6, 133, "SHA-256", 32); + case LMSUtils.LMOTS_SHA256_N32_W4 -> + new LMOTSParams(lmotsType, 32, 4, 4, 67, "SHA-256", 32); + case LMSUtils.LMOTS_SHA256_N32_W8 -> + new LMOTSParams(lmotsType, 32, 8, 0, 34, "SHA-256", 32); + case LMSUtils.LMOTS_SHA256_N24_W1 -> + new LMOTSParams(lmotsType, 24, 1, 8, 200, "SHA-256", 32); + case LMSUtils.LMOTS_SHA256_N24_W2 -> + new LMOTSParams(lmotsType, 24, 2, 6, 101, "SHA-256", 32); + case LMSUtils.LMOTS_SHA256_N24_W4 -> + new LMOTSParams(lmotsType, 24, 4, 4, 51, "SHA-256", 32); + case LMSUtils.LMOTS_SHA256_N24_W8 -> + new LMOTSParams(lmotsType, 24, 8, 0, 26, "SHA-256", 32); + case LMSUtils.LMOTS_SHAKE_N32_W1 -> + new LMOTSParams(lmotsType, 32, 1, 7, 265, "SHAKE256-512", 64); + case LMSUtils.LMOTS_SHAKE_N32_W2 -> + new LMOTSParams(lmotsType, 32, 2, 6, 133, "SHAKE256-512", 64); + case LMSUtils.LMOTS_SHAKE_N32_W4 -> + new LMOTSParams(lmotsType, 32, 4, 4, 67, "SHAKE256-512", 64); + case LMSUtils.LMOTS_SHAKE_N32_W8 -> + new LMOTSParams(lmotsType, 32, 8, 0, 34, "SHAKE256-512", 64); + case LMSUtils.LMOTS_SHAKE_N24_W1 -> + new LMOTSParams(lmotsType, 24, 1, 8, 200, "SHAKE256-512", 64); + case LMSUtils.LMOTS_SHAKE_N24_W2 -> + new LMOTSParams(lmotsType, 24, 2, 6, 101, "SHAKE256-512", 64); + case LMSUtils.LMOTS_SHAKE_N24_W4 -> + new LMOTSParams(lmotsType, 24, 4, 4, 51, "SHAKE256-512", 64); + case LMSUtils.LMOTS_SHAKE_N24_W8 -> + new LMOTSParams(lmotsType, 24, 8, 0, 26, "SHAKE256-512", 64); + default -> throw new IllegalArgumentException( "Unsupported or bad OTS Algorithm Identifier."); - } + }; return params; } @@ -580,13 +733,6 @@ public final class HSS extends SignatureSpi { S[len + 1] = (byte) (sum & 0xff); } - void digestFixedLengthPreprocessed( - SHA2.SHA256 sha256, byte[] input, int inLen, - byte[] output, int outOffset, int outLen) { - sha256.implDigestFixedLengthPreprocessed( - input, inLen, output, outOffset, outLen); - } - byte[] lmotsPubKeyCandidate( LMSignature lmSig, byte[] message, LMSPublicKey pKey) throws SignatureException { @@ -625,7 +771,13 @@ public final class HSS extends SignatureSpi { byte[] preZi = hashBuf.clone(); int hashLen = hashBuf.length; - SHA2.SHA256 sha256 = new SHA2.SHA256(); + + DigestBase db; + if (hashAlgName.startsWith("SHAKE")) { + db = new SHA3.SHAKE256Hash(); + } else { + db = new SHA2.SHA256(); + } pKey.getI(preZi, 0); lmSig.getQArr(preZi, 16); @@ -643,11 +795,11 @@ public final class HSS extends SignatureSpi { for (int j = a; j < twoPowWMinus1; j++) { preZi[22] = (byte) j; if (j < twoPowWMinus2) { - digestFixedLengthPreprocessed( - sha256, preZi, hashLen, preZi, 23, n); + db.implDigestFixedLengthPreprocessed(preZi, + hashLen, preZi, 23, n); } else { - digestFixedLengthPreprocessed( - sha256, preZi, hashLen, preCandidate, 22 + i * n, n); + db.implDigestFixedLengthPreprocessed(preZi, + hashLen, preCandidate, 22 + i * n, n); } } } diff --git a/src/java.base/share/classes/sun/security/provider/SHA2.java b/src/java.base/share/classes/sun/security/provider/SHA2.java index e966e6b77f8..7d8c2840de9 100644 --- a/src/java.base/share/classes/sun/security/provider/SHA2.java +++ b/src/java.base/share/classes/sun/security/provider/SHA2.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -117,6 +117,7 @@ abstract class SHA2 extends DigestBase { } + @Override protected void implDigestFixedLengthPreprocessed( byte[] input, int inLen, byte[] output, int outOffset, int outLen) { implReset(); diff --git a/src/java.base/share/classes/sun/security/provider/SHA3.java b/src/java.base/share/classes/sun/security/provider/SHA3.java index a096cac5f50..0578645c1cd 100644 --- a/src/java.base/share/classes/sun/security/provider/SHA3.java +++ b/src/java.base/share/classes/sun/security/provider/SHA3.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -98,6 +98,15 @@ public abstract class SHA3 extends DigestBase { this.suffix = suffix; } + @Override + protected void implDigestFixedLengthPreprocessed( + byte[] input, int inLen, byte[] output, int outOffset, int outLen) { + implReset(); + + implCompress(input, 0); + implDigest0(output, outOffset, outLen); + } + private void implCompressCheck(byte[] b, int ofs) { Objects.requireNonNull(b); Preconditions.checkIndex(ofs + blockSize - 1, b.length, Preconditions.AIOOBE_FORMATTER); @@ -136,9 +145,6 @@ public abstract class SHA3 extends DigestBase { * DigestBase calls implReset() when necessary. */ void implDigest(byte[] out, int ofs) { - // Moving this allocation to the block where it is used causes a little - // performance drop, that is why it is here. - byte[] byteState = new byte[8]; if (engineGetDigestLength() == 0) { // This is an XOF, so the digest() call is illegal. throw new ProviderException("Calling digest() is not allowed in an XOF"); @@ -146,8 +152,12 @@ public abstract class SHA3 extends DigestBase { finishAbsorb(); + implDigest0(out, ofs, engineGetDigestLength()); + } + + void implDigest0(byte[] out, int ofs, int outLen) { int availableBytes = blockSize; - int numBytes = engineGetDigestLength(); + int numBytes = outLen; while (numBytes > availableBytes) { for (int i = 0; i < availableBytes / 8; i++) { @@ -163,6 +173,10 @@ public abstract class SHA3 extends DigestBase { asLittleEndian.set(out, ofs, state[i]); ofs += 8; } + + // Moving this allocation to the block where it is used causes a little + // performance drop, that is why it is here. + byte[] byteState = new byte[8]; if (numBytes % 8 != 0) { asLittleEndian.set(byteState, 0, state[numLongs]); System.arraycopy(byteState, 0, out, ofs, numBytes % 8); diff --git a/src/java.base/share/classes/sun/security/provider/X509Factory.java b/src/java.base/share/classes/sun/security/provider/X509Factory.java index f732c7c0455..4be83d629bb 100644 --- a/src/java.base/share/classes/sun/security/provider/X509Factory.java +++ b/src/java.base/share/classes/sun/security/provider/X509Factory.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -112,9 +112,10 @@ public class X509Factory extends CertificateFactorySpi { if (cert != null) { return cert; } - cert = new X509CertImpl(encoding); - addToCache(certCache, cert.getEncodedInternal(), cert); - return cert; + // Build outside lock + X509CertImpl newCert = new X509CertImpl(encoding); + byte[] enc = newCert.getEncodedInternal(); + return addIfNotPresent(certCache, enc, newCert); } /** @@ -156,7 +157,7 @@ public class X509Factory extends CertificateFactorySpi { * @throws CertificateException if failures occur while obtaining the DER * encoding for certificate data. */ - public static synchronized X509CertImpl intern(X509Certificate c) + public static X509CertImpl intern(X509Certificate c) throws CertificateException { if (c == null) { return null; @@ -168,18 +169,23 @@ public class X509Factory extends CertificateFactorySpi { } else { encoding = c.getEncoded(); } - X509CertImpl newC = getFromCache(certCache, encoding); - if (newC != null) { - return newC; + // First check under per-cache lock + X509CertImpl cached = getFromCache(certCache, encoding); + if (cached != null) { + return cached; } + + // Build outside lock + X509CertImpl newC; + byte[] enc; if (isImpl) { - newC = (X509CertImpl)c; + newC = (X509CertImpl) c; + enc = encoding; } else { newC = new X509CertImpl(encoding); - encoding = newC.getEncodedInternal(); + enc = newC.getEncodedInternal(); } - addToCache(certCache, encoding, newC); - return newC; + return addIfNotPresent(certCache, enc, newC); } /** @@ -192,7 +198,7 @@ public class X509Factory extends CertificateFactorySpi { * @throws CRLException if failures occur while obtaining the DER * encoding for CRL data. */ - public static synchronized X509CRLImpl intern(X509CRL c) + public static X509CRLImpl intern(X509CRL c) throws CRLException { if (c == null) { return null; @@ -204,39 +210,47 @@ public class X509Factory extends CertificateFactorySpi { } else { encoding = c.getEncoded(); } - X509CRLImpl newC = getFromCache(crlCache, encoding); - if (newC != null) { - return newC; + X509CRLImpl cached = getFromCache(crlCache, encoding); + if (cached != null) { + return cached; } + + X509CRLImpl newC; + byte[] enc; if (isImpl) { - newC = (X509CRLImpl)c; + newC = (X509CRLImpl) c; + enc = encoding; } else { newC = new X509CRLImpl(encoding); - encoding = newC.getEncodedInternal(); + enc = newC.getEncodedInternal(); } - addToCache(crlCache, encoding, newC); - return newC; + return addIfNotPresent(crlCache, enc, newC); } /** * Get the X509CertImpl or X509CRLImpl from the cache. */ - private static synchronized V getFromCache(Cache cache, - byte[] encoding) { - Object key = new Cache.EqualByteArray(encoding); - return cache.get(key); + private static V getFromCache(Cache cache, byte[] encoding) { + return cache.get(new Cache.EqualByteArray(encoding)); } /** * Add the X509CertImpl or X509CRLImpl to the cache. */ - private static synchronized void addToCache(Cache cache, - byte[] encoding, V value) { + private static V addIfNotPresent(Cache cache, byte[] encoding, V value) { if (encoding.length > ENC_MAX_LENGTH) { - return; + return value; } Object key = new Cache.EqualByteArray(encoding); - cache.put(key, value); + // Synchronize only to make the "check + insert" decision atomic. + synchronized (cache) { + V existing = cache.get(key); + if (existing != null) { + return existing; + } + cache.put(key, value); + return value; + } } /** @@ -389,13 +403,14 @@ public class X509Factory extends CertificateFactorySpi { try { byte[] encoding = readOneBlock(is); if (encoding != null) { - X509CRLImpl crl = getFromCache(crlCache, encoding); - if (crl != null) { - return crl; + X509CRLImpl cached = getFromCache(crlCache, encoding); + if (cached != null) { + return cached; } - crl = new X509CRLImpl(encoding); - addToCache(crlCache, crl.getEncodedInternal(), crl); - return crl; + // Build outside lock + X509CRLImpl crl = new X509CRLImpl(encoding); + byte[] enc = crl.getEncodedInternal(); + return addIfNotPresent(crlCache, enc, crl); } else { throw new IOException("Empty input"); } diff --git a/src/java.base/share/classes/sun/security/ssl/Alert.java b/src/java.base/share/classes/sun/security/ssl/Alert.java index fb06b02a5d4..e9588a09b3d 100644 --- a/src/java.base/share/classes/sun/security/ssl/Alert.java +++ b/src/java.base/share/classes/sun/security/ssl/Alert.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -281,6 +281,8 @@ public enum Alert { // consumer so the state machine doesn't expect it. tc.handshakeContext.handshakeConsumers.remove( SSLHandshake.CERTIFICATE.id); + tc.handshakeContext.handshakeConsumers.remove( + SSLHandshake.COMPRESSED_CERTIFICATE.id); tc.handshakeContext.handshakeConsumers.remove( SSLHandshake.CERTIFICATE_VERIFY.id); } diff --git a/src/java.base/share/classes/sun/security/ssl/CertificateMessage.java b/src/java.base/share/classes/sun/security/ssl/CertificateMessage.java index c6897d71aa6..af5007d7899 100644 --- a/src/java.base/share/classes/sun/security/ssl/CertificateMessage.java +++ b/src/java.base/share/classes/sun/security/ssl/CertificateMessage.java @@ -781,14 +781,6 @@ final class CertificateMessage { } } - T13CertificateMessage(HandshakeContext handshakeContext, - byte[] requestContext, List certificates) { - super(handshakeContext); - - this.requestContext = requestContext.clone(); - this.certEntries = certificates; - } - T13CertificateMessage(HandshakeContext handshakeContext, ByteBuffer m) throws IOException { super(handshakeContext); @@ -925,16 +917,26 @@ final class CertificateMessage { HandshakeMessage message) throws IOException { // The producing happens in handshake context only. HandshakeContext hc = (HandshakeContext)context; - if (hc.sslConfig.isClientMode) { - return onProduceCertificate( - (ClientHandshakeContext)context, message); - } else { - return onProduceCertificate( + T13CertificateMessage cm = hc.sslConfig.isClientMode ? + onProduceCertificate( + (ClientHandshakeContext)context, message) : + onProduceCertificate( (ServerHandshakeContext)context, message); + + // Output the handshake message. + if (hc.certDeflater == null) { + cm.write(hc.handshakeOutput); + hc.handshakeOutput.flush(); + } else { + // Replace with CompressedCertificate message + CompressedCertificate.handshakeProducer.produce(hc, cm); } + + // The handshake message has been delivered. + return null; } - private byte[] onProduceCertificate(ServerHandshakeContext shc, + private T13CertificateMessage onProduceCertificate(ServerHandshakeContext shc, HandshakeMessage message) throws IOException { ClientHelloMessage clientHello = (ClientHelloMessage)message; @@ -993,12 +995,7 @@ final class CertificateMessage { SSLLogger.fine("Produced server Certificate message", cm); } - // Output the handshake message. - cm.write(shc.handshakeOutput); - shc.handshakeOutput.flush(); - - // The handshake message has been delivered. - return null; + return cm; } private static SSLPossession choosePossession( @@ -1045,7 +1042,7 @@ final class CertificateMessage { return pos; } - private byte[] onProduceCertificate(ClientHandshakeContext chc, + private T13CertificateMessage onProduceCertificate(ClientHandshakeContext chc, HandshakeMessage message) throws IOException { ClientHelloMessage clientHello = (ClientHelloMessage)message; SSLPossession pos = choosePossession(chc, clientHello); @@ -1091,12 +1088,7 @@ final class CertificateMessage { SSLLogger.fine("Produced client Certificate message", cm); } - // Output the handshake message. - cm.write(chc.handshakeOutput); - chc.handshakeOutput.flush(); - - // The handshake message has been delivered. - return null; + return cm; } } @@ -1116,6 +1108,7 @@ final class CertificateMessage { HandshakeContext hc = (HandshakeContext)context; // clean up this consumer + hc.handshakeConsumers.remove(SSLHandshake.COMPRESSED_CERTIFICATE.id); hc.handshakeConsumers.remove(SSLHandshake.CERTIFICATE.id); // Ensure that the Certificate message has not been sent w/o diff --git a/src/java.base/share/classes/sun/security/ssl/CertificateRequest.java b/src/java.base/share/classes/sun/security/ssl/CertificateRequest.java index 039399560cd..2eceb4d9ebd 100644 --- a/src/java.base/share/classes/sun/security/ssl/CertificateRequest.java +++ b/src/java.base/share/classes/sun/security/ssl/CertificateRequest.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -956,6 +956,11 @@ final class CertificateRequest { // update // shc.certRequestContext = crm.requestContext.clone(); + if (shc.certInflaters != null && !shc.certInflaters.isEmpty()) { + shc.handshakeConsumers.put( + SSLHandshake.COMPRESSED_CERTIFICATE.id, + SSLHandshake.COMPRESSED_CERTIFICATE); + } shc.handshakeConsumers.put(SSLHandshake.CERTIFICATE.id, SSLHandshake.CERTIFICATE); shc.handshakeConsumers.put(SSLHandshake.CERTIFICATE_VERIFY.id, diff --git a/src/java.base/share/classes/sun/security/ssl/CompressCertExtension.java b/src/java.base/share/classes/sun/security/ssl/CompressCertExtension.java new file mode 100644 index 00000000000..eff97857ef0 --- /dev/null +++ b/src/java.base/share/classes/sun/security/ssl/CompressCertExtension.java @@ -0,0 +1,306 @@ +/* + * Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package sun.security.ssl; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.text.MessageFormat; +import java.util.Locale; +import java.util.Map; +import java.util.function.Function; +import javax.net.ssl.SSLProtocolException; +import sun.security.ssl.SSLExtension.ExtensionConsumer; +import sun.security.ssl.SSLExtension.SSLExtensionSpec; +import sun.security.ssl.SSLHandshake.HandshakeMessage; + +/** + * Pack of the "compress_certificate" extensions [RFC 5246]. + */ +final class CompressCertExtension { + + static final HandshakeProducer chNetworkProducer = + new CHCompressCertificateProducer(); + static final ExtensionConsumer chOnLoadConsumer = + new CHCompressCertificateConsumer(); + + static final HandshakeProducer crNetworkProducer = + new CRCompressCertificateProducer(); + static final ExtensionConsumer crOnLoadConsumer = + new CRCompressCertificateConsumer(); + + static final SSLStringizer ccStringizer = + new CompressCertificateStringizer(); + + /** + * The "signature_algorithms" extension. + */ + static final class CertCompressionSpec implements SSLExtensionSpec { + + private final int[] compressionAlgorithms; // non-null + + CertCompressionSpec( + Map> certInflaters) { + compressionAlgorithms = new int[certInflaters.size()]; + int i = 0; + for (Integer id : certInflaters.keySet()) { + compressionAlgorithms[i++] = id; + } + } + + CertCompressionSpec(HandshakeContext hc, + ByteBuffer buffer) throws IOException { + if (buffer.remaining() < 2) { // 2: the length of the list + throw hc.conContext.fatal(Alert.DECODE_ERROR, + new SSLProtocolException( + "Invalid compress_certificate: insufficient data")); + } + + byte[] algs = Record.getBytes8(buffer); + if (buffer.hasRemaining()) { + throw hc.conContext.fatal(Alert.DECODE_ERROR, + new SSLProtocolException( + "Invalid compress_certificate: unknown extra data")); + } + + if (algs.length == 0 || (algs.length & 0x01) != 0) { + throw hc.conContext.fatal(Alert.DECODE_ERROR, + new SSLProtocolException( + "Invalid compress_certificate: incomplete data")); + } + + int[] compressionAlgs = new int[algs.length / 2]; + for (int i = 0, j = 0; i < algs.length; ) { + byte hash = algs[i++]; + byte sign = algs[i++]; + compressionAlgs[j++] = ((hash & 0xFF) << 8) | (sign & 0xFF); + } + + this.compressionAlgorithms = compressionAlgs; + } + + @Override + public String toString() { + MessageFormat messageFormat = new MessageFormat( + "\"compression algorithms\": '['{0}']'", Locale.ENGLISH); + + if (compressionAlgorithms.length == 0) { + Object[] messageFields = { + "" + }; + return messageFormat.format(messageFields); + } else { + StringBuilder builder = new StringBuilder(512); + boolean isFirst = true; + for (int ca : compressionAlgorithms) { + if (isFirst) { + isFirst = false; + } else { + builder.append(", "); + } + + builder.append(CompressionAlgorithm.nameOf(ca)); + } + + Object[] messageFields = { + builder.toString() + }; + + return messageFormat.format(messageFields); + } + } + } + + private static final + class CompressCertificateStringizer implements SSLStringizer { + + @Override + public String toString(HandshakeContext hc, ByteBuffer buffer) { + try { + return (new CertCompressionSpec(hc, buffer)).toString(); + } catch (IOException ioe) { + // For debug logging only, so please swallow exceptions. + return ioe.getMessage(); + } + } + } + + /** + * Network data producer of a "compress_certificate" extension in + * the ClientHello handshake message. + */ + private static final + class CHCompressCertificateProducer implements HandshakeProducer { + + // Prevent instantiation of this class. + private CHCompressCertificateProducer() { + // blank + } + + @Override + public byte[] produce(ConnectionContext context, + HandshakeMessage message) throws IOException { + // The producing happens in client side only. + return produceCompCertExt(context, + SSLExtension.CH_COMPRESS_CERTIFICATE); + } + } + + /** + * Network data consumer of a "compress_certificate" extension in + * the ClientHello handshake message. + */ + private static final + class CHCompressCertificateConsumer implements ExtensionConsumer { + + // Prevent instantiation of this class. + private CHCompressCertificateConsumer() { + // blank + } + + @Override + public void consume(ConnectionContext context, + HandshakeMessage message, ByteBuffer buffer) + throws IOException { + // The consuming happens in server side only. + consumeCompCertExt(context, buffer, + SSLExtension.CH_COMPRESS_CERTIFICATE); + } + } + + /** + * Network data producer of a "compress_certificate" extension in + * the CertificateRequest handshake message. + */ + private static final + class CRCompressCertificateProducer implements HandshakeProducer { + + // Prevent instantiation of this class. + private CRCompressCertificateProducer() { + // blank + } + + @Override + public byte[] produce(ConnectionContext context, + HandshakeMessage message) throws IOException { + // The producing happens in server side only. + return produceCompCertExt(context, + SSLExtension.CR_COMPRESS_CERTIFICATE); + } + } + + /** + * Network data consumer of a "compress_certificate" extension in + * the CertificateRequest handshake message. + */ + private static final + class CRCompressCertificateConsumer implements ExtensionConsumer { + + // Prevent instantiation of this class. + private CRCompressCertificateConsumer() { + // blank + } + + @Override + public void consume(ConnectionContext context, + HandshakeMessage message, ByteBuffer buffer) + throws IOException { + // The consuming happens in client side only. + consumeCompCertExt(context, buffer, + SSLExtension.CR_COMPRESS_CERTIFICATE); + } + } + + private static byte[] produceCompCertExt( + ConnectionContext context, SSLExtension extension) + throws IOException { + + HandshakeContext hc = (HandshakeContext) context; + // Is it a supported and enabled extension? + if (!hc.sslConfig.isAvailable(extension)) { + if (SSLLogger.isOn() && SSLLogger.isOn(SSLLogger.Opt.HANDSHAKE)) { + SSLLogger.fine("Ignore unavailable " + + "compress_certificate extension"); + } + return null; + } + + // Produce the extension. + hc.certInflaters = CompressionAlgorithm.getInflaters(); + + if (hc.certInflaters.isEmpty()) { + if (SSLLogger.isOn() && SSLLogger.isOn(SSLLogger.Opt.HANDSHAKE)) { + SSLLogger.warning("Unable to produce the extension: " + + "no certificate compression inflaters defined"); + } + return null; + } + + int vectorLen = CompressionAlgorithm.sizeInRecord() * + hc.certInflaters.size(); + byte[] extData = new byte[vectorLen + 1]; + ByteBuffer m = ByteBuffer.wrap(extData); + Record.putInt8(m, vectorLen); + for (Integer algId : hc.certInflaters.keySet()) { + Record.putInt16(m, algId); + } + + // Update the context. + hc.handshakeExtensions.put( + extension, new CertCompressionSpec(hc.certInflaters)); + + return extData; + } + + private static void consumeCompCertExt(ConnectionContext context, + ByteBuffer buffer, SSLExtension extension) throws IOException { + + HandshakeContext hc = (HandshakeContext) context; + // Is it a supported and enabled extension? + if (!hc.sslConfig.isAvailable(extension)) { + if (SSLLogger.isOn() && SSLLogger.isOn(SSLLogger.Opt.HANDSHAKE)) { + SSLLogger.fine("Ignore unavailable " + + "compress_certificate extension"); + } + return; // ignore the extension + } + + // Parse the extension. + CertCompressionSpec spec = new CertCompressionSpec(hc, buffer); + + // Update the context. + hc.certDeflater = CompressionAlgorithm.selectDeflater( + spec.compressionAlgorithms); + + if (hc.certDeflater == null) { + if (SSLLogger.isOn() && SSLLogger.isOn(SSLLogger.Opt.HANDSHAKE)) { + SSLLogger.fine("Ignore, no supported " + + "certificate compression algorithms"); + } + } + // No impact on session resumption. + } +} diff --git a/src/java.base/share/classes/sun/security/ssl/CompressedCertificate.java b/src/java.base/share/classes/sun/security/ssl/CompressedCertificate.java new file mode 100644 index 00000000000..067a0344c9d --- /dev/null +++ b/src/java.base/share/classes/sun/security/ssl/CompressedCertificate.java @@ -0,0 +1,264 @@ +/* + * Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package sun.security.ssl; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.text.MessageFormat; +import java.util.Locale; +import java.util.function.Function; +import javax.net.ssl.SSLProtocolException; +import sun.security.ssl.SSLHandshake.HandshakeMessage; +import sun.security.util.Cache; +import sun.security.util.Cache.EqualByteArray; +import sun.security.util.HexDumpEncoder; + +/** + * Pack of the CompressedCertificate handshake message. + */ +final class CompressedCertificate { + + static final SSLConsumer handshakeConsumer = + new CompressedCertConsumer(); + static final HandshakeProducer handshakeProducer = + new CompressedCertProducer(); + + record CompCertCacheKey(EqualByteArray eba, int algId) {} + + /** + * The CompressedCertificate handshake message for TLS 1.3. + */ + static final class CompressedCertMessage extends HandshakeMessage { + + private final int algorithmId; + private final int uncompressedLength; + private final byte[] compressedCert; + + CompressedCertMessage(HandshakeContext context, + int algorithmId, int uncompressedLength, + byte[] compressedCert) { + super(context); + + this.algorithmId = algorithmId; + this.uncompressedLength = uncompressedLength; + this.compressedCert = compressedCert; + } + + CompressedCertMessage(HandshakeContext handshakeContext, + ByteBuffer m) throws IOException { + super(handshakeContext); + + // struct { + // CertificateCompressionAlgorithm algorithm; + // uint24 uncompressed_length; + // opaque compressed_certificate_message<1..2^24-1>; + // } CompressedCertificate; + if (m.remaining() < 9) { + throw new SSLProtocolException( + "Invalid CompressedCertificate message: " + + "insufficient data (length=" + m.remaining() + + ")"); + } + this.algorithmId = Record.getInt16(m); + this.uncompressedLength = Record.getInt24(m); + this.compressedCert = Record.getBytes24(m); + + if (m.hasRemaining()) { + throw handshakeContext.conContext.fatal( + Alert.HANDSHAKE_FAILURE, + "Invalid CompressedCertificate message: " + + "unknown extra data"); + } + } + + @Override + public SSLHandshake handshakeType() { + return SSLHandshake.COMPRESSED_CERTIFICATE; + } + + @Override + public int messageLength() { + return 8 + compressedCert.length; + } + + @Override + public void send(HandshakeOutStream hos) throws IOException { + hos.putInt16(algorithmId); + hos.putInt24(uncompressedLength); + hos.putBytes24(compressedCert); + } + + @Override + public String toString() { + MessageFormat messageFormat = new MessageFormat( + """ + "CompressedCertificate": '{' + "algorithm": "{0}", + "uncompressed_length": {1} + "compressed_certificate_message": [ + {2} + ] + '}'""", + Locale.ENGLISH); + + HexDumpEncoder hexEncoder = new HexDumpEncoder(); + Object[] messageFields = { + CompressionAlgorithm.nameOf(algorithmId), + uncompressedLength, + Utilities.indent(hexEncoder.encode(compressedCert), " ") + }; + + return messageFormat.format(messageFields); + } + } + + /** + * The "CompressedCertificate" handshake message producer for TLS 1.3. + */ + private static final + class CompressedCertProducer implements HandshakeProducer { + + // Prevent instantiation of this class. + private CompressedCertProducer() { + // blank + } + + // Note this is a special producer, which can only be called from + // the CertificateMessage producer. The input 'message' parameter + // represents the Certificate handshake message. + @Override + public byte[] produce(ConnectionContext context, + HandshakeMessage message) throws IOException { + // The producing happens in handshake context only. + HandshakeContext hc = (HandshakeContext) context; + + // Compress the Certificate message. + HandshakeOutStream hos = new HandshakeOutStream(null); + message.send(hos); + byte[] certMsg = hos.toByteArray(); + byte[] compressedCertMsg; + + // First byte is the size of certificate_request_context which + // should be random if present. Don't cache a randomized message. + if (certMsg[0] != 0) { + compressedCertMsg = hc.certDeflater.getValue().apply(certMsg); + } else { + Cache cache = + hc.sslContext.getCompCertCache(); + CompCertCacheKey key = new CompCertCacheKey( + new EqualByteArray(certMsg), hc.certDeflater.getKey()); + compressedCertMsg = cache.get(key); + + if (compressedCertMsg == null) { + compressedCertMsg = + hc.certDeflater.getValue().apply(certMsg); + + if (SSLLogger.isOn() + && SSLLogger.isOn(SSLLogger.Opt.HANDSHAKE)) { + SSLLogger.fine("Caching CompressedCertificate message"); + } + + cache.put(key, compressedCertMsg); + } + } + + if (compressedCertMsg == null || compressedCertMsg.length == 0) { + throw hc.conContext.fatal(Alert.HANDSHAKE_FAILURE, + "No compressed Certificate data"); + } + + CompressedCertMessage ccm = new CompressedCertMessage(hc, + hc.certDeflater.getKey(), certMsg.length, + compressedCertMsg); + + if (SSLLogger.isOn() && SSLLogger.isOn(SSLLogger.Opt.HANDSHAKE)) { + SSLLogger.fine( + "Produced CompressedCertificate handshake message", + ccm); + } + + ccm.write(hc.handshakeOutput); + hc.handshakeOutput.flush(); + + // The handshake message has been delivered. + return null; + } + } + + /** + * The "CompressedCertificate" handshake message consumer for TLS 1.3. + */ + private static final class CompressedCertConsumer implements SSLConsumer { + + // Prevent instantiation of this class. + private CompressedCertConsumer() { + // blank + } + + @Override + public void consume(ConnectionContext context, + ByteBuffer message) throws IOException { + // The consuming happens in handshake context only. + HandshakeContext hc = (HandshakeContext) context; + + // clean up this consumer + hc.handshakeConsumers.remove( + SSLHandshake.COMPRESSED_CERTIFICATE.id); + hc.handshakeConsumers.remove(SSLHandshake.CERTIFICATE.id); + + // Parse the handshake message + CompressedCertMessage ccm = new CompressedCertMessage(hc, message); + if (SSLLogger.isOn() && SSLLogger.isOn(SSLLogger.Opt.HANDSHAKE)) { + SSLLogger.fine( + "Consuming CompressedCertificate handshake message", + ccm); + } + + // check the compression algorithm + Function inflater = + hc.certInflaters.get(ccm.algorithmId); + if (inflater == null) { + throw hc.conContext.fatal(Alert.BAD_CERTIFICATE, + "Unsupported certificate compression algorithm"); + } + + // decompress + byte[] certificateMessage = inflater.apply(ccm.compressedCert); + + // check the uncompressed length + if (certificateMessage == null || + certificateMessage.length != ccm.uncompressedLength) { + throw hc.conContext.fatal(Alert.BAD_CERTIFICATE, + "Improper certificate compression"); + } + + // Call the Certificate handshake message consumer. + CertificateMessage.t13HandshakeConsumer.consume(hc, + ByteBuffer.wrap(certificateMessage)); + } + } +} diff --git a/src/java.base/share/classes/sun/security/ssl/CompressionAlgorithm.java b/src/java.base/share/classes/sun/security/ssl/CompressionAlgorithm.java new file mode 100644 index 00000000000..3e9ef154424 --- /dev/null +++ b/src/java.base/share/classes/sun/security/ssl/CompressionAlgorithm.java @@ -0,0 +1,182 @@ +/* + * Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package sun.security.ssl; + +import java.io.ByteArrayOutputStream; +import java.util.AbstractMap; +import java.util.Map; +import java.util.function.Function; +import java.util.zip.Deflater; +import java.util.zip.Inflater; + +/** + * Enum for TLS certificate compression algorithms. + * This class also defines internally supported inflate/deflate functions. + */ + +enum CompressionAlgorithm { + ZLIB(1); // Currently only ZLIB is supported. + + final int id; + + CompressionAlgorithm(int id) { + this.id = id; + } + + static CompressionAlgorithm nameOf(int id) { + for (CompressionAlgorithm ca : CompressionAlgorithm.values()) { + if (ca.id == id) { + return ca; + } + } + + return null; + } + + // Return the size of a compression algorithms structure in TLS record. + static int sizeInRecord() { + return 2; + } + + // The size of compression/decompression buffer. + private static final int BUF_SIZE = 1024; + + private static final Map> DEFLATORS = + Map.of(ZLIB.id, (input) -> { + try (Deflater deflater = new Deflater(); + ByteArrayOutputStream outputStream = + new ByteArrayOutputStream(input.length)) { + + deflater.setInput(input); + deflater.finish(); + byte[] buffer = new byte[BUF_SIZE]; + + while (!deflater.finished()) { + int compressedSize = deflater.deflate(buffer); + outputStream.write(buffer, 0, compressedSize); + } + + return outputStream.toByteArray(); + } catch (Exception e) { + if (SSLLogger.isOn() + && SSLLogger.isOn(SSLLogger.Opt.HANDSHAKE)) { + SSLLogger.warning("Exception during certificate " + + "compression: ", e); + } + return null; + } + }); + + static Map.Entry> selectDeflater( + int[] compressionAlgorithmIds) { + + for (var entry : DEFLATORS.entrySet()) { + for (int id : compressionAlgorithmIds) { + if (id == entry.getKey()) { + return new AbstractMap.SimpleImmutableEntry<>(entry); + } + } + } + + return null; + } + + private static final Map> INFLATORS = + Map.of(ZLIB.id, (input) -> { + try (Inflater inflater = new Inflater(); + ByteArrayOutputStream outputStream = + new ByteArrayOutputStream(input.length)) { + + inflater.setInput(input); + byte[] buffer = new byte[BUF_SIZE]; + + while (!inflater.finished()) { + int decompressedSize = inflater.inflate(buffer); + + if (decompressedSize == 0) { + if (inflater.needsDictionary()) { + if (SSLLogger.isOn() + && SSLLogger.isOn( + SSLLogger.Opt.HANDSHAKE)) { + SSLLogger.warning("Compressed input " + + "requires a dictionary"); + } + + return null; + } + + if (inflater.needsInput()) { + if (SSLLogger.isOn() + && SSLLogger.isOn( + SSLLogger.Opt.HANDSHAKE)) { + SSLLogger.warning( + "Incomplete compressed input"); + } + + return null; + } + + // Else just break the loop. + break; + } + + outputStream.write(buffer, 0, decompressedSize); + + // Bound the memory usage. + if (outputStream.size() + > SSLConfiguration.maxHandshakeMessageSize) { + if (SSLLogger.isOn() + && SSLLogger.isOn( + SSLLogger.Opt.HANDSHAKE)) { + SSLLogger.warning("The size of the " + + "uncompressed certificate message " + + "exceeds maximum allowed size of " + + SSLConfiguration.maxHandshakeMessageSize + + " bytes; compressed size: " + + input.length); + } + + return null; + } + } + + return outputStream.toByteArray(); + } catch (Exception e) { + if (SSLLogger.isOn() + && SSLLogger.isOn(SSLLogger.Opt.HANDSHAKE)) { + SSLLogger.warning( + "Exception during certificate decompression: ", + e); + } + return null; + } + }); + + static Map> getInflaters() { + return INFLATORS; + } +} diff --git a/src/java.base/share/classes/sun/security/ssl/DHasKEM.java b/src/java.base/share/classes/sun/security/ssl/DHasKEM.java index 763013f280c..ef5c5b82f06 100644 --- a/src/java.base/share/classes/sun/security/ssl/DHasKEM.java +++ b/src/java.base/share/classes/sun/security/ssl/DHasKEM.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -101,7 +101,18 @@ public class DHasKEM implements KEMSpi { return new KEM.Encapsulated( sub(dh, from, to), pkEm, null); + + } catch (IllegalArgumentException e) { + // ECDH validation failure + // all-zero shared secret + throw e; + } catch (InvalidKeyException e) { + // Invalid peer public key + // Convert InvalidKeyException to an unchecked exception + throw new IllegalArgumentException("Invalid peer public key", + e); } catch (Exception e) { + // Unexpected internal failure throw new ProviderException("internal error", e); } } @@ -126,6 +137,11 @@ public class DHasKEM implements KEMSpi { PublicKey pkE = params.DeserializePublicKey(encapsulation); SecretKey dh = params.DH(algorithm, skR, pkE); return sub(dh, from, to); + + } catch (IllegalArgumentException e) { + // ECDH validation failure + // all-zero shared secret + throw e; } catch (IOException | InvalidKeyException e) { throw new DecapsulateException("Cannot decapsulate", e); } catch (Exception e) { @@ -248,7 +264,24 @@ public class DHasKEM implements KEMSpi { KeyAgreement ka = KeyAgreement.getInstance(kaAlgorithm); ka.init(skE); ka.doPhase(pkR, true); - return ka.generateSecret(alg); + SecretKey secret = ka.generateSecret(alg); + + // RFC 8446 section 7.4.2: checks for all-zero + // X25519/X448 shared secret. + if (kaAlgorithm.equals("X25519") || + kaAlgorithm.equals("X448")) { + byte[] s = secret.getEncoded(); + for (byte b : s) { + if (b != 0) { + return secret; + } + } + // Trigger ILLEGAL_PARAMETER alert + throw new IllegalArgumentException( + "All-zero shared secret"); + } + + return secret; } } } diff --git a/src/java.base/share/classes/sun/security/ssl/HandshakeContext.java b/src/java.base/share/classes/sun/security/ssl/HandshakeContext.java index 54a2650c058..fbf2c00bbb4 100644 --- a/src/java.base/share/classes/sun/security/ssl/HandshakeContext.java +++ b/src/java.base/share/classes/sun/security/ssl/HandshakeContext.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,6 +33,7 @@ import java.security.AlgorithmConstraints; import java.security.CryptoPrimitive; import java.util.*; import java.util.AbstractMap.SimpleImmutableEntry; +import java.util.function.Function; import javax.crypto.SecretKey; import javax.net.ssl.SNIServerName; import javax.net.ssl.SSLHandshakeException; @@ -131,6 +132,10 @@ abstract class HandshakeContext implements ConnectionContext { List peerRequestedSignatureSchemes; List peerRequestedCertSignSchemes; + // CertificateCompressionAlgorithm + Map> certInflaters; + Map.Entry> certDeflater; + // Known authorities X500Principal[] peerSupportedAuthorities = null; diff --git a/src/java.base/share/classes/sun/security/ssl/Hybrid.java b/src/java.base/share/classes/sun/security/ssl/Hybrid.java index e3e2cfa0b23..43634ce2f34 100644 --- a/src/java.base/share/classes/sun/security/ssl/Hybrid.java +++ b/src/java.base/share/classes/sun/security/ssl/Hybrid.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -355,7 +355,7 @@ public class Hybrid { int to, String algorithm) throws DecapsulateException { int expectedEncSize = engineEncapsulationSize(); if (encapsulation.length != expectedEncSize) { - throw new IllegalArgumentException( + throw new DecapsulateException( "Invalid key encapsulation message length: " + encapsulation.length + ", expected = " + expectedEncSize); diff --git a/src/java.base/share/classes/sun/security/ssl/KAKeyDerivation.java b/src/java.base/share/classes/sun/security/ssl/KAKeyDerivation.java index 39e82b50435..0ca197160a9 100644 --- a/src/java.base/share/classes/sun/security/ssl/KAKeyDerivation.java +++ b/src/java.base/share/classes/sun/security/ssl/KAKeyDerivation.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +26,7 @@ package sun.security.ssl; import sun.security.util.RawKeySpec; +import javax.crypto.DecapsulateException; import javax.crypto.KDF; import javax.crypto.KEM; import javax.crypto.KeyAgreement; @@ -35,6 +36,7 @@ import javax.net.ssl.SSLHandshakeException; import java.io.IOException; import java.security.GeneralSecurityException; +import java.security.InvalidKeyException; import java.security.KeyFactory; import java.security.PrivateKey; import java.security.Provider; @@ -47,6 +49,9 @@ import sun.security.util.KeyUtil; */ public class KAKeyDerivation implements SSLKeyDerivation { + // Algorithm used to derive TLS 1.3 shared secrets + private static final String t13KeyDerivationAlgorithm = + System.getProperty("jdk.tls.t13KeyDerivationAlgorithm", "Generic"); private final String algorithmName; private final HandshakeContext context; private final PrivateKey localPrivateKey; @@ -173,6 +178,9 @@ public class KAKeyDerivation implements SSLKeyDerivation { "encapsulation"); } + // All exceptions thrown during KEM encapsulation are mapped + // to TLS fatal alerts: + // illegal_parameter alert or internal_error alert. try { KeyFactory kf = (provider != null) ? KeyFactory.getInstance(algorithmName, provider) : @@ -189,8 +197,18 @@ public class KAKeyDerivation implements SSLKeyDerivation { SecretKey derived = deriveHandshakeSecret(algorithm, sharedSecret); return new KEM.Encapsulated(derived, enc.encapsulation(), null); - } catch (GeneralSecurityException gse) { - throw new SSLHandshakeException("Could not generate secret", gse); + } catch (IllegalArgumentException | InvalidKeyException e) { + // Peer validation failure + // ECDH all-zero shared secret (RFC 8446 section 7.4.2), + // ML-KEM encapsulation key check failure (FIPS-203 section 7.2) + throw context.conContext.fatal(Alert.ILLEGAL_PARAMETER, e); + } catch (GeneralSecurityException e) { + // Cryptographic failure, + // deriveHandshakeSecret failure. + throw context.conContext.fatal(Alert.INTERNAL_ERROR, e); + } catch (RuntimeException e) { + // unexpected provider/runtime failure + throw context.conContext.fatal(Alert.INTERNAL_ERROR, e); } finally { KeyUtil.destroySecretKeys(sharedSecret); } @@ -208,23 +226,41 @@ public class KAKeyDerivation implements SSLKeyDerivation { // Using KEM: called by the client after receiving the KEM // ciphertext (keyshare) from the server in ServerHello. // The client decapsulates it using its private key. - KEM kem = (provider != null) - ? KEM.getInstance(algorithmName, provider) - : KEM.getInstance(algorithmName); - var decapsulator = kem.newDecapsulator(localPrivateKey); - sharedSecret = decapsulator.decapsulate( - keyshare, 0, decapsulator.secretSize(), - "TlsPremasterSecret"); + + // All exceptions thrown during KEM decapsulation are mapped + // to TLS fatal alerts: + // illegal_parameter alert or internal_error alert. + try { + KEM kem = (provider != null) + ? KEM.getInstance(algorithmName, provider) + : KEM.getInstance(algorithmName); + var decapsulator = kem.newDecapsulator(localPrivateKey); + sharedSecret = decapsulator.decapsulate( + keyshare, 0, decapsulator.secretSize(), + t13KeyDerivationAlgorithm); + } catch (IllegalArgumentException | InvalidKeyException | + DecapsulateException e) { + // Peer validation failure + // ECDH all-zero shared secret (RFC 8446 section 7.4.2) + throw context.conContext.fatal(Alert.ILLEGAL_PARAMETER, e); + } catch (GeneralSecurityException e) { + // cryptographic failure + throw context.conContext.fatal(Alert.INTERNAL_ERROR, e); + } catch (RuntimeException e) { + // unexpected provider/runtime failure + throw context.conContext.fatal(Alert.INTERNAL_ERROR, e); + } } else { // Using traditional DH-style Key Agreement KeyAgreement ka = KeyAgreement.getInstance(algorithmName); ka.init(localPrivateKey); ka.doPhase(peerPublicKey, true); - sharedSecret = ka.generateSecret("TlsPremasterSecret"); + sharedSecret = ka.generateSecret(t13KeyDerivationAlgorithm); } return deriveHandshakeSecret(type, sharedSecret); } catch (GeneralSecurityException gse) { + // deriveHandshakeSecret() failure throw new SSLHandshakeException("Could not generate secret", gse); } finally { KeyUtil.destroySecretKeys(sharedSecret); diff --git a/src/java.base/share/classes/sun/security/ssl/QuicTLSEngineImpl.java b/src/java.base/share/classes/sun/security/ssl/QuicTLSEngineImpl.java index 74975fc1e5b..3384bf5f089 100644 --- a/src/java.base/share/classes/sun/security/ssl/QuicTLSEngineImpl.java +++ b/src/java.base/share/classes/sun/security/ssl/QuicTLSEngineImpl.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -73,6 +73,7 @@ public final class QuicTLSEngineImpl implements QuicTLSEngine, SSLTransport { SSLHandshake.ENCRYPTED_EXTENSIONS.id, HANDSHAKE, SSLHandshake.CERTIFICATE_REQUEST.id, HANDSHAKE, SSLHandshake.CERTIFICATE.id, HANDSHAKE, + SSLHandshake.COMPRESSED_CERTIFICATE.id, HANDSHAKE, SSLHandshake.CERTIFICATE_VERIFY.id, HANDSHAKE, SSLHandshake.FINISHED.id, HANDSHAKE, SSLHandshake.NEW_SESSION_TICKET.id, ONE_RTT); @@ -660,7 +661,7 @@ public final class QuicTLSEngineImpl implements QuicTLSEngine, SSLTransport { } Alert alert = ((QuicEngineOutputRecord) conContext.outputRecord).getAlert(); - throw new QuicTransportException(alert.description, keySpace, 0, + throw new QuicTransportException(e.getMessage(), keySpace, 0, BASE_CRYPTO_ERROR + alert.id, e); } catch (IOException e) { throw new RuntimeException(e); diff --git a/src/java.base/share/classes/sun/security/ssl/SSLContextImpl.java b/src/java.base/share/classes/sun/security/ssl/SSLContextImpl.java index a1cc3ee112f..fdeb94bb496 100644 --- a/src/java.base/share/classes/sun/security/ssl/SSLContextImpl.java +++ b/src/java.base/share/classes/sun/security/ssl/SSLContextImpl.java @@ -34,7 +34,9 @@ import java.util.concurrent.locks.ReentrantLock; import java.util.stream.Collectors; import javax.net.ssl.*; import sun.security.provider.certpath.AlgorithmChecker; +import sun.security.ssl.CompressedCertificate.CompCertCacheKey; import sun.security.ssl.SSLAlgorithmConstraints.SIGNATURE_CONSTRAINTS_MODE; +import sun.security.util.Cache; import sun.security.validator.Validator; /** @@ -73,6 +75,10 @@ public abstract class SSLContextImpl extends SSLContextSpi { private final ReentrantLock contextLock = new ReentrantLock(); + // Avoid compressing local certificates repeatedly for every handshake. + private final Cache compCertCache = + Cache.newSoftMemoryCache(12); + SSLContextImpl() { ephemeralKeyManager = new EphemeralKeyManager(); clientCache = new SSLSessionContextImpl(false); @@ -225,6 +231,10 @@ public abstract class SSLContextImpl extends SSLContextSpi { return ephemeralKeyManager; } + Cache getCompCertCache() { + return compCertCache; + } + // Used for DTLS in server mode only. HelloCookieManager getHelloCookieManager(ProtocolVersion protocolVersion) { if (helloCookieManagerBuilder == null) { diff --git a/src/java.base/share/classes/sun/security/ssl/SSLExtension.java b/src/java.base/share/classes/sun/security/ssl/SSLExtension.java index aacb9420748..b13edc0359c 100644 --- a/src/java.base/share/classes/sun/security/ssl/SSLExtension.java +++ b/src/java.base/share/classes/sun/security/ssl/SSLExtension.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -270,6 +270,27 @@ enum SSLExtension implements SSLStringizer { // Extensions defined in RFC 7924 (TLS Cached Information Extension) CACHED_INFO (0x0019, "cached_info"), + // Extensions defined in RFC 8879 (TLS Certificate Compression) + CH_COMPRESS_CERTIFICATE (0x001B, "compress_certificate", + SSLHandshake.CLIENT_HELLO, + ProtocolVersion.PROTOCOLS_OF_13, + CompressCertExtension.chNetworkProducer, + CompressCertExtension.chOnLoadConsumer, + null, + null, + null, + CompressCertExtension.ccStringizer), + + CR_COMPRESS_CERTIFICATE (0x001B, "compress_certificate", + SSLHandshake.CERTIFICATE_REQUEST, + ProtocolVersion.PROTOCOLS_OF_13, + CompressCertExtension.crNetworkProducer, + CompressCertExtension.crOnLoadConsumer, + null, + null, + null, + CompressCertExtension.ccStringizer), + // Extensions defined in RFC 5077 (TLS Session Resumption without Server-Side State) CH_SESSION_TICKET (0x0023, "session_ticket", SSLHandshake.CLIENT_HELLO, diff --git a/src/java.base/share/classes/sun/security/ssl/SSLHandshake.java b/src/java.base/share/classes/sun/security/ssl/SSLHandshake.java index 7c78f6c3005..2c6b58bafa5 100644 --- a/src/java.base/share/classes/sun/security/ssl/SSLHandshake.java +++ b/src/java.base/share/classes/sun/security/ssl/SSLHandshake.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2006, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -370,7 +370,22 @@ enum SSLHandshake implements SSLConsumer, HandshakeProducer { }), // RFC 8879 - TLS Certificate Compression - COMPRESSED_CERTIFICATE ((byte)0x19, "compressed_certificate"), + @SuppressWarnings({"unchecked", "rawtypes"}) + COMPRESSED_CERTIFICATE ((byte)0x19, "compressed_certificate", + (new Map.Entry[] { + new SimpleImmutableEntry<>( + CompressedCertificate.handshakeConsumer, + ProtocolVersion.PROTOCOLS_OF_13 + ) + }), + (new Map.Entry[] { + // Note that the producing of this message is delegated to + // CertificateMessage producer. + new SimpleImmutableEntry<>( + CertificateMessage.t13HandshakeProducer, + ProtocolVersion.PROTOCOLS_OF_13 + ) + })), // RFC 8870 - Encrypted Key Transport for DTLS/Secure RTP EKT_KEY ((byte)0x1A, "ekt_key"), diff --git a/src/java.base/share/classes/sun/security/ssl/SSLSocketImpl.java b/src/java.base/share/classes/sun/security/ssl/SSLSocketImpl.java index f603cc22949..cef2f43526a 100644 --- a/src/java.base/share/classes/sun/security/ssl/SSLSocketImpl.java +++ b/src/java.base/share/classes/sun/security/ssl/SSLSocketImpl.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,13 +28,13 @@ package sun.security.ssl; import java.io.EOFException; import java.io.IOException; import java.io.InputStream; -import java.io.InterruptedIOException; import java.io.OutputStream; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.Socket; import java.net.SocketAddress; import java.net.SocketException; +import java.net.SocketTimeoutException; import java.nio.ByteBuffer; import java.util.List; import java.util.concurrent.TimeUnit; @@ -77,10 +77,10 @@ public final class SSLSocketImpl /** * ERROR HANDLING GUIDELINES * (which exceptions to throw and catch and which not to throw and catch) - * + *

* - if there is an IOException (SocketException) when accessing the * underlying Socket, pass it through - * + *

* - do not throw IOExceptions, throw SSLExceptions (or a subclass) */ @@ -454,12 +454,12 @@ public final class SSLSocketImpl if (!conContext.isNegotiated) { readHandshakeRecord(); } - } catch (InterruptedIOException iioe) { + } catch (SocketTimeoutException e) { if(resumable){ - handleException(iioe); + handleException(e); } else{ throw conContext.fatal(Alert.HANDSHAKE_FAILURE, - "Couldn't kickstart handshaking", iioe); + "Couldn't kickstart handshaking", e); } } catch (SocketException se) { handleException(se); @@ -1427,7 +1427,7 @@ public final class SSLSocketImpl return 0; } } catch (SSLException | - InterruptedIOException | SocketException se) { + SocketTimeoutException | SocketException se) { // Don't change exception in case of timeouts or interrupts // or SocketException. throw se; @@ -1486,7 +1486,7 @@ public final class SSLSocketImpl return buffer; } } catch (SSLException | - InterruptedIOException | SocketException se) { + SocketTimeoutException | SocketException se) { // Don't change exception in case of timeouts or interrupts // or SocketException. throw se; @@ -1677,40 +1677,23 @@ public final class SSLSocketImpl SSLLogger.warning("handling exception", cause); } - // Don't close the Socket in case of timeouts or interrupts. - if (cause instanceof InterruptedIOException) { - throw (IOException)cause; - } - - // need to perform error shutdown - boolean isSSLException = (cause instanceof SSLException); - Alert alert; - if (isSSLException) { - if (cause instanceof SSLHandshakeException) { - alert = Alert.HANDSHAKE_FAILURE; - } else { - alert = Alert.UNEXPECTED_MESSAGE; + throw switch (cause) { + // Don't close the Socket in case of timeouts. + case SocketTimeoutException ste -> ste; + // Send TLS alert with "fatal", then throw the socket exception. + case SocketException se -> { + try { + throw conContext.fatal(Alert.UNEXPECTED_MESSAGE, se); + } catch (Exception _) { + } + yield se; } - } else { - if (cause instanceof IOException) { - alert = Alert.UNEXPECTED_MESSAGE; - } else { - // RuntimeException - alert = Alert.INTERNAL_ERROR; - } - } - - if (cause instanceof SocketException) { - try { - throw conContext.fatal(alert, cause); - } catch (Exception e) { - // Just delivering the fatal alert, re-throw the socket exception instead. - } - - throw (SocketException)cause; - } - - throw conContext.fatal(alert, cause); + case SSLHandshakeException sslhe -> + conContext.fatal(Alert.HANDSHAKE_FAILURE, sslhe); + case IOException ioe -> + conContext.fatal(Alert.UNEXPECTED_MESSAGE, ioe); + default -> conContext.fatal(Alert.INTERNAL_ERROR, cause); + }; } private Plaintext handleEOF(EOFException eofe) throws IOException { diff --git a/src/java.base/share/classes/sun/security/ssl/SSLSocketInputRecord.java b/src/java.base/share/classes/sun/security/ssl/SSLSocketInputRecord.java index fd9c4b171e7..fc3d9733150 100644 --- a/src/java.base/share/classes/sun/security/ssl/SSLSocketInputRecord.java +++ b/src/java.base/share/classes/sun/security/ssl/SSLSocketInputRecord.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, Azul Systems, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -27,10 +27,10 @@ package sun.security.ssl; import java.io.EOFException; -import java.io.InterruptedIOException; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.net.SocketTimeoutException; import java.nio.ByteBuffer; import java.security.GeneralSecurityException; import java.util.ArrayList; @@ -180,7 +180,7 @@ final class SSLSocketInputRecord extends InputRecord implements SSLRecord { if (plaintext == null) { plaintext = decodeInputRecord(); } - } catch(InterruptedIOException e) { + } catch (SocketTimeoutException e) { // do not clean header and recordBody in case of Socket Timeout cleanInBuffer = false; throw e; diff --git a/src/java.base/share/classes/sun/security/ssl/SSLTransport.java b/src/java.base/share/classes/sun/security/ssl/SSLTransport.java index 50bff1e6d21..02551b4a8c1 100644 --- a/src/java.base/share/classes/sun/security/ssl/SSLTransport.java +++ b/src/java.base/share/classes/sun/security/ssl/SSLTransport.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,8 +27,8 @@ package sun.security.ssl; import java.io.EOFException; import java.io.IOException; -import java.io.InterruptedIOException; import java.net.SocketException; +import java.net.SocketTimeoutException; import java.nio.ByteBuffer; import javax.crypto.AEADBadTagException; import javax.crypto.BadPaddingException; @@ -138,7 +138,7 @@ interface SSLTransport { } catch (EOFException eofe) { // rethrow EOFException, the call will handle it if needed. throw eofe; - } catch (InterruptedIOException | SocketException se) { + } catch (SocketTimeoutException | SocketException se) { // don't close the Socket in case of timeouts or interrupts or SocketException. throw se; } catch (IOException ioe) { diff --git a/src/java.base/share/classes/sun/security/ssl/ServerHello.java b/src/java.base/share/classes/sun/security/ssl/ServerHello.java index 6980c216697..4bd2b0a059f 100644 --- a/src/java.base/share/classes/sun/security/ssl/ServerHello.java +++ b/src/java.base/share/classes/sun/security/ssl/ServerHello.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1461,6 +1461,11 @@ final class ServerHello { chc.handshakeConsumers.put( SSLHandshake.CERTIFICATE_REQUEST.id, SSLHandshake.CERTIFICATE_REQUEST); + if (chc.certInflaters != null && !chc.certInflaters.isEmpty()) { + chc.handshakeConsumers.put( + SSLHandshake.COMPRESSED_CERTIFICATE.id, + SSLHandshake.COMPRESSED_CERTIFICATE); + } chc.handshakeConsumers.put( SSLHandshake.CERTIFICATE.id, SSLHandshake.CERTIFICATE); diff --git a/src/java.base/share/classes/sun/security/ssl/X509KeyManagerCertChecking.java b/src/java.base/share/classes/sun/security/ssl/X509KeyManagerCertChecking.java index 6d26558847c..e0203128962 100644 --- a/src/java.base/share/classes/sun/security/ssl/X509KeyManagerCertChecking.java +++ b/src/java.base/share/classes/sun/security/ssl/X509KeyManagerCertChecking.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -512,13 +512,13 @@ abstract class X509KeyManagerCertChecking extends X509ExtendedKeyManager { chain[1].getPublicKey().getAlgorithm()); } else { // Check the signature algorithm of the certificate itself. - // Look for the "withRSA" in "SHA1withRSA", etc. + // Look for the "withEC" in "SHA256withECDSA", etc. X509Certificate issuer = (X509Certificate) chain[0]; String sigAlgName = issuer.getSigAlgName().toUpperCase(Locale.ENGLISH); String pattern = "WITH" + sigKeyAlgorithm.toUpperCase(Locale.ENGLISH); - return sigAlgName.endsWith(pattern); + return sigAlgName.contains(pattern); } } } diff --git a/src/java.base/share/classes/sun/security/util/KeyChoices.java b/src/java.base/share/classes/sun/security/util/KeyChoices.java index da3c611750e..00c4463d098 100644 --- a/src/java.base/share/classes/sun/security/util/KeyChoices.java +++ b/src/java.base/share/classes/sun/security/util/KeyChoices.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,7 +45,7 @@ import java.util.function.BiFunction; *

* This class supports reading, writing, and converting between them. *

- * Current code follows draft-ietf-lamps-kyber-certificates-11 and RFC 9881. + * Current code follows RFC 9935 and RFC 9881. */ public final class KeyChoices { diff --git a/src/java.base/share/classes/sun/security/util/KeyUtil.java b/src/java.base/share/classes/sun/security/util/KeyUtil.java index 942a91d61b8..e9dabdc5b06 100644 --- a/src/java.base/share/classes/sun/security/util/KeyUtil.java +++ b/src/java.base/share/classes/sun/security/util/KeyUtil.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -444,15 +444,16 @@ public final class KeyUtil { // is the LMS public key for the top-level tree. // Section 5.3: LMS public key is u32str(type) || u32str(otstype) || I || T[1] // Section 8: type is the numeric identifier for an LMS specification. - // This RFC defines 5 SHA-256 based types, value from 5 to 9. if (rawKey.length < 8) { throw new NoSuchAlgorithmException("Cannot decode public key"); } int num = ((rawKey[4] & 0xff) << 24) + ((rawKey[5] & 0xff) << 16) + ((rawKey[6] & 0xff) << 8) + (rawKey[7] & 0xff); return switch (num) { - // RFC 8554 only supports SHA_256 hash algorithm + // RFC 8554 only supports SHA_256 hash algorithms case 5, 6, 7, 8, 9 -> AlgorithmId.SHA256_oid; + // RFC 9858 supports SHAKE_256 hash algorithms + case 15, 16, 17, 18, 19 -> AlgorithmId.SHAKE256_512_oid; default -> throw new NoSuchAlgorithmException("Unknown LMS type: " + num); }; } catch (IOException e) { diff --git a/src/java.base/share/classes/sun/security/util/Password.java b/src/java.base/share/classes/sun/security/util/Password.java index 02cdcaf53fd..c1b44856c8b 100644 --- a/src/java.base/share/classes/sun/security/util/Password.java +++ b/src/java.base/share/classes/sun/security/util/Password.java @@ -29,6 +29,7 @@ import java.io.*; import java.nio.*; import java.nio.charset.*; import java.util.Arrays; +import java.util.Locale; import jdk.internal.access.SharedSecrets; import jdk.internal.io.JdkConsoleImpl; @@ -43,6 +44,22 @@ public class Password { return readPassword(in, false); } + private static final boolean ALLOW_STDIN; + static { + var value = SecurityProperties.getOverridableProperty( + "jdk.security.password.allowSystemIn"); + if (value != null) { + value = value.toLowerCase(Locale.ROOT); + } + ALLOW_STDIN = switch (value) { + case null -> true; // Default true now + case "true" -> true; + case "false" -> false; + default -> throw new IllegalArgumentException( + "Invalid jdk.security.password.allowSystemIn value: " + value); + }; + } + /** Reads user password from given input stream. * @param isEchoOn true if the password should be echoed on the screen */ @@ -66,19 +83,23 @@ public class Password { } consoleBytes = ConsoleHolder.convertToBytes(consoleEntered); in = new ByteArrayInputStream(consoleBytes); - } else if (in == System.in && VM.isBooted() - && System.in.available() == 0) { - // Warn if reading password from System.in but it's empty. - // This may be running in an IDE Run Window or in JShell, - // which acts like an interactive console and echoes the - // entered password. In this case, print a warning that - // the password might be echoed. If available() is not zero, - // it's more likely the input comes from a pipe, such as - // "echo password |" or "cat password_file |" where input - // will be silently consumed without echoing to the screen. - // Warn only if VM is booted and ResourcesMgr is available. - System.err.print(ResourcesMgr.getString - ("warning.input.may.be.visible.on.screen")); + } else if (in == System.in) { + if (!ALLOW_STDIN) { + throw new UnsupportedOperationException("Console not available." + + " Reading passwords from standard input is disallowed."); + } else if (VM.isBooted() && in.available() == 0) { + // Warn if reading password from System.in but it's empty. + // This may be running in an IDE Run Window or in JShell, + // which acts like an interactive console and echoes the + // entered password. In this case, print a warning that + // the password might be echoed. If available() is not zero, + // it's more likely the input comes from a pipe, such as + // "echo password |" or "cat password_file |" where input + // will be silently consumed without echoing to the screen. + // Warn only if VM is booted and ResourcesMgr is available. + System.err.print(ResourcesMgr.getString + ("warning.input.may.be.visible.on.screen")); + } } } diff --git a/src/java.base/share/classes/sun/security/util/math/intpoly/IntegerPolynomial25519.java b/src/java.base/share/classes/sun/security/util/math/intpoly/IntegerPolynomial25519.java new file mode 100644 index 00000000000..c8f23da417e --- /dev/null +++ b/src/java.base/share/classes/sun/security/util/math/intpoly/IntegerPolynomial25519.java @@ -0,0 +1,531 @@ +/* + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package sun.security.util.math.intpoly; + +import java.math.BigInteger; + +public final class IntegerPolynomial25519 extends IntegerPolynomial { + private static final int BITS_PER_LIMB = 51; + private static final int NUM_LIMBS = 5; + private static final int MAX_ADDS = 1; + public static final BigInteger MODULUS = evaluateModulus(); + private static final long CARRY_ADD = 1L << (BITS_PER_LIMB - 1); + private static final long LIMB_MASK = -1L >>> (64 - BITS_PER_LIMB); + + public static final IntegerPolynomial25519 ONE = + new IntegerPolynomial25519(); + + private IntegerPolynomial25519() { + super(BITS_PER_LIMB, NUM_LIMBS, MAX_ADDS, MODULUS); + } + + private static BigInteger evaluateModulus() { + BigInteger result = BigInteger.valueOf(2).pow(255); + result = result.subtract(BigInteger.valueOf(19)); + + return result; + } + + /** + * Carry from a range of limb positions. + * Override for performance (unnesting). + * + * @param limbs [in|out] the limbs for carry operation. + * @param start [in] the starting position of carry. + * @param end [in] the ending position of carry. + */ + @Override + protected void carry(long[] limbs, int start, int end) { + long carry; + + for (int i = start; i < end; i++) { + carry = (limbs[i] + CARRY_ADD) >> BITS_PER_LIMB; + limbs[i] -= (carry << BITS_PER_LIMB); + limbs[i + 1] += carry; + } + } + + /** + * Carry operation for all limb positions. + * Override for performance (unroll and unnesting). + * + * @param limbs [in|out] the limbs for carry operation. + */ + @Override + protected void carry(long[] limbs) { + long carry = (limbs[0] + CARRY_ADD) >> BITS_PER_LIMB; + limbs[0] -= carry << BITS_PER_LIMB; + limbs[1] += carry; + + carry = (limbs[1] + CARRY_ADD) >> BITS_PER_LIMB; + limbs[1] -= carry << BITS_PER_LIMB; + limbs[2] += carry; + + carry = (limbs[2] + CARRY_ADD) >> BITS_PER_LIMB; + limbs[2] -= carry << BITS_PER_LIMB; + limbs[3] += carry; + + carry = (limbs[3] + CARRY_ADD) >> BITS_PER_LIMB; + limbs[3] -= carry << BITS_PER_LIMB; + limbs[4] += carry; + } + + /** + * Multiply limbs by scalar value. + * Superclass assumes that limb primitive radix > (bits per limb * 2) + * + * @param a [in|out] the limbs to multiply a carry operation. 'a' is + * assumed to be reduced. + * @param b [in] the scalar value to be muliplied with the limbs. + */ + @Override + protected void multByInt(long[] a, long b) { + long aa0 = a[0]; + long aa1 = a[1]; + long aa2 = a[2]; + long aa3 = a[3]; + long aa4 = a[4]; + + long bb0 = b; + + final long shift1 = 64 - BITS_PER_LIMB; + final long shift2 = BITS_PER_LIMB; + + long d0; // low digit from multiplication + long dd0; // high digit from multiplication + // multiplication result digits for each column + long c0, c1, c2, c3, c4, c5; + + // Row 0 - multiply by aa0 + d0 = aa0 * bb0; + dd0 = Math.multiplyHigh(aa0, bb0) << shift1 | (d0 >>> shift2); + d0 &= LIMB_MASK; + + c0 = d0; + c1 = dd0; + + // Row 1 - multiply by aa1 + d0 = aa1 * bb0; + dd0 = Math.multiplyHigh(aa1, bb0) << shift1 | (d0 >>> shift2); + d0 &= LIMB_MASK; + + c1 += d0; + c2 = dd0; + + // Row 2 - multiply by aa2 + d0 = aa2 * bb0; + dd0 = Math.multiplyHigh(aa2, bb0) << shift1 | (d0 >>> shift2); + d0 &= LIMB_MASK; + + c2 += d0; + c3 = dd0; + + // Row 3 - multiply by aa3 + d0 = aa3 * bb0; + dd0 = Math.multiplyHigh(aa3, bb0) << shift1 | (d0 >>> shift2); + d0 &= LIMB_MASK; + + c3 += d0; + c4 = dd0; + + // Row 4 - multiply by aa4 + d0 = aa4 * bb0; + dd0 = Math.multiplyHigh(aa4, bb0) << shift1 | (d0 >>> shift2); + d0 &= LIMB_MASK; + + c4 += d0; + c5 = dd0; + + // Perform pseudo-Mersenne reduction + a[0] = c0 + (19 * c5); + + a[1] = c1; + a[2] = c2; + a[3] = c3; + a[4] = c4; + + reduce(a); + } + + /** + * Carry in all positions and reduce high order limb. + * + * @param limbs [in|out] the limbs to carry and reduce. + */ + protected void reduce(long[] limbs) { + long carry = (limbs[3] + CARRY_ADD) >> BITS_PER_LIMB; + limbs[3] -= carry << BITS_PER_LIMB; + limbs[4] += carry; + + carry = (limbs[4] + CARRY_ADD) >> BITS_PER_LIMB; + limbs[4] -= carry << BITS_PER_LIMB; + + limbs[0] += 19 * carry; + + carry = (limbs[0] + CARRY_ADD) >> BITS_PER_LIMB; + limbs[0] -= carry << BITS_PER_LIMB; + limbs[1] += carry; + + carry = (limbs[1] + CARRY_ADD) >> BITS_PER_LIMB; + limbs[1] -= carry << BITS_PER_LIMB; + limbs[2] += carry; + + carry = (limbs[2] + CARRY_ADD) >> BITS_PER_LIMB; + limbs[2] -= carry << BITS_PER_LIMB; + limbs[3] += carry; + + carry = (limbs[3] + CARRY_ADD) >> BITS_PER_LIMB; + limbs[3] -= carry << BITS_PER_LIMB; + limbs[4] += carry; + } + + /** + * Reduces digit 'v' at limb position 'i' to a lower limb. + * + * @param limbs [in|out] the limbs to reduce in. + * @param v [in] the digit to reduce to the lower limb. + * @param i [in] the limbs to reduce from. + */ + protected void reduceIn(long[] limbs, long v, int i) { + limbs[i - NUM_LIMBS] += 19 * v; + } + + /** + * Carry from high order limb and reduce to the lower order limb. Assumed + * to be called two times to propagate the carries. + * + * @param limbs [in|out] the limbs to fully carry and reduce. + */ + protected void finalCarryReduceLast(long[] limbs) { + long carry = limbs[4] >> BITS_PER_LIMB; + + limbs[4] -= carry << BITS_PER_LIMB; + limbs[0] += 19 * carry; + } + + /** + * Multiply two limbs using a high/low digit technique that allows for + * larger limb sizes. It is assumed that both limbs have already been + * reduced. + * + * @param a [in] the limb operand to multiply. + * @param b [in] the limb operand to multiply. + * @param r [out] the product of the limbs operands that is fully reduced. + */ + protected void mult(long[] a, long[] b, long[] r) { + long aa0 = a[0]; + long aa1 = a[1]; + long aa2 = a[2]; + long aa3 = a[3]; + long aa4 = a[4]; + + long bb0 = b[0]; + long bb1 = b[1]; + long bb2 = b[2]; + long bb3 = b[3]; + long bb4 = b[4]; + + final long shift1 = 64 - BITS_PER_LIMB; + final long shift2 = BITS_PER_LIMB; + + long d0, d1, d2, d3, d4; // low digits from multiplication + long dd0, dd1, dd2, dd3, dd4; // high digits from multiplication + // multiplication result digits for each column + long c0, c1, c2, c3, c4, c5, c6, c7, c8, c9; + + // Row 0 - multiply by aa0 + d0 = aa0 * bb0; + dd0 = Math.multiplyHigh(aa0, bb0) << shift1 | (d0 >>> shift2); + d0 &= LIMB_MASK; + + d1 = aa0 * bb1; + dd1 = Math.multiplyHigh(aa0, bb1) << shift1 | (d1 >>> shift2); + d1 &= LIMB_MASK; + + d2 = aa0 * bb2; + dd2 = Math.multiplyHigh(aa0, bb2) << shift1 | (d2 >>> shift2); + d2 &= LIMB_MASK; + + d3 = aa0 * bb3; + dd3 = Math.multiplyHigh(aa0, bb3) << shift1 | (d3 >>> shift2); + d3 &= LIMB_MASK; + + d4 = aa0 * bb4; + dd4 = Math.multiplyHigh(aa0, bb4) << shift1 | (d4 >>> shift2); + d4 &= LIMB_MASK; + + c0 = d0; + c1 = d1 + dd0; + c2 = d2 + dd1; + c3 = d3 + dd2; + c4 = d4 + dd3; + c5 = dd4; + + // Row 1 - multiply by aa1 + d0 = aa1 * bb0; + dd0 = Math.multiplyHigh(aa1, bb0) << shift1 | (d0 >>> shift2); + d0 &= LIMB_MASK; + + d1 = aa1 * bb1; + dd1 = Math.multiplyHigh(aa1, bb1) << shift1 | (d1 >>> shift2); + d1 &= LIMB_MASK; + + d2 = aa1 * bb2; + dd2 = Math.multiplyHigh(aa1, bb2) << shift1 | (d2 >>> shift2); + d2 &= LIMB_MASK; + + d3 = aa1 * bb3; + dd3 = Math.multiplyHigh(aa1, bb3) << shift1 | (d3 >>> shift2); + d3 &= LIMB_MASK; + + d4 = aa1 * bb4; + dd4 = Math.multiplyHigh(aa1, bb4) << shift1 | (d4 >>> shift2); + d4 &= LIMB_MASK; + + c1 += d0; + c2 += d1 + dd0; + c3 += d2 + dd1; + c4 += d3 + dd2; + c5 += d4 + dd3; + c6 = dd4; + + // Row 2 - multiply by aa2 + d0 = aa2 * bb0; + dd0 = Math.multiplyHigh(aa2, bb0) << shift1 | (d0 >>> shift2); + d0 &= LIMB_MASK; + + d1 = aa2 * bb1; + dd1 = Math.multiplyHigh(aa2, bb1) << shift1 | (d1 >>> shift2); + d1 &= LIMB_MASK; + + d2 = aa2 * bb2; + dd2 = Math.multiplyHigh(aa2, bb2) << shift1 | (d2 >>> shift2); + d2 &= LIMB_MASK; + + d3 = aa2 * bb3; + dd3 = Math.multiplyHigh(aa2, bb3) << shift1 | (d3 >>> shift2); + d3 &= LIMB_MASK; + + d4 = aa2 * bb4; + dd4 = Math.multiplyHigh(aa2, bb4) << shift1 | (d4 >>> shift2); + d4 &= LIMB_MASK; + + c2 += d0; + c3 += d1 + dd0; + c4 += d2 + dd1; + c5 += d3 + dd2; + c6 += d4 + dd3; + c7 = dd4; + + // Row 3 - multiply by aa3 + d0 = aa3 * bb0; + dd0 = Math.multiplyHigh(aa3, bb0) << shift1 | (d0 >>> shift2); + d0 &= LIMB_MASK; + + d1 = aa3 * bb1; + dd1 = Math.multiplyHigh(aa3, bb1) << shift1 | (d1 >>> shift2); + d1 &= LIMB_MASK; + + d2 = aa3 * bb2; + dd2 = Math.multiplyHigh(aa3, bb2) << shift1 | (d2 >>> shift2); + d2 &= LIMB_MASK; + + d3 = aa3 * bb3; + dd3 = Math.multiplyHigh(aa3, bb3) << shift1 | (d3 >>> shift2); + d3 &= LIMB_MASK; + + d4 = aa3 * bb4; + dd4 = Math.multiplyHigh(aa3, bb4) << shift1 | (d4 >>> shift2); + d4 &= LIMB_MASK; + + c3 += d0; + c4 += d1 + dd0; + c5 += d2 + dd1; + c6 += d3 + dd2; + c7 += d4 + dd3; + c8 = dd4; + + // Row 4 - multiply by aa4 + d0 = aa4 * bb0; + dd0 = Math.multiplyHigh(aa4, bb0) << shift1 | (d0 >>> shift2); + d0 &= LIMB_MASK; + + d1 = aa4 * bb1; + dd1 = Math.multiplyHigh(aa4, bb1) << shift1 | (d1 >>> shift2); + d1 &= LIMB_MASK; + + d2 = aa4 * bb2; + dd2 = Math.multiplyHigh(aa4, bb2) << shift1 | (d2 >>> shift2); + d2 &= LIMB_MASK; + + d3 = aa4 * bb3; + dd3 = Math.multiplyHigh(aa4, bb3) << shift1 | (d3 >>> shift2); + d3 &= LIMB_MASK; + + d4 = aa4 * bb4; + dd4 = Math.multiplyHigh(aa4, bb4) << shift1 | (d4 >>> shift2); + d4 &= LIMB_MASK; + + c4 += d0; + c5 += d1 + dd0; + c6 += d2 + dd1; + c7 += d3 + dd2; + c8 += d4 + dd3; + c9 = dd4; + + // Perform pseudo-Mersenne reduction + r[0] = c0 + (19 * c5); + r[1] = c1 + (19 * c6); + r[2] = c2 + (19 * c7); + r[3] = c3 + (19 * c8); + r[4] = c4 + (19 * c9); + + reduce(r); + } + + /** + * Takes a single limb and squares it using a high/low digit technique that + * allows for larger limb sizes. It is assumed that the limb input has + * already been reduced. + * + * @param a [in] the limb operand to square. + * @param r [out] the resulting square of the limb which is fully reduced. + */ + protected void square(long[] a, long[] r) { + long aa0 = a[0]; + long aa1 = a[1]; + long aa2 = a[2]; + long aa3 = a[3]; + long aa4 = a[4]; + + final long shift1 = 64 - BITS_PER_LIMB; + final long shift2 = BITS_PER_LIMB; + + long d0, d1, d2, d3, d4; // low digits from multiplication + long dd0, dd1, dd2, dd3, dd4; // high digits from multiplication + // multiplication result digits for each column + long c0, c1, c2, c3, c4, c5, c6, c7, c8, c9; + + // Row 0 - multiply by aa0 + d0 = aa0 * aa0; + dd0 = Math.multiplyHigh(aa0, aa0) << shift1 | (d0 >>> shift2); + d0 &= LIMB_MASK; + + d1 = aa0 * aa1; + dd1 = Math.multiplyHigh(aa0, aa1) << shift1 | (d1 >>> shift2); + d1 &= LIMB_MASK; + + d2 = aa0 * aa2; + dd2 = Math.multiplyHigh(aa0, aa2) << shift1 | (d2 >>> shift2); + d2 &= LIMB_MASK; + + d3 = aa0 * aa3; + dd3 = Math.multiplyHigh(aa0, aa3) << shift1 | (d3 >>> shift2); + d3 &= LIMB_MASK; + + d4 = aa0 * aa4; + dd4 = Math.multiplyHigh(aa0, aa4) << shift1 | (d4 >>> shift2); + d4 &= LIMB_MASK; + + c0 = d0; + c1 = (d1 << 1) + dd0; + c2 = (d2 + dd1) << 1; + c3 = (d3 + dd2) << 1; + c4 = (d4 + dd3) << 1; + c5 = dd4 << 1; + + // Row 1 - multiply by aa1 + d1 = aa1 * aa1; + dd1 = Math.multiplyHigh(aa1, aa1) << shift1 | (d1 >>> shift2); + d1 &= LIMB_MASK; + + d2 = aa1 * aa2; + dd2 = Math.multiplyHigh(aa1, aa2) << shift1 | (d2 >>> shift2); + d2 &= LIMB_MASK; + + d3 = aa1 * aa3; + dd3 = Math.multiplyHigh(aa1, aa3) << shift1 | (d3 >>> shift2); + d3 &= LIMB_MASK; + + d4 = aa1 * aa4; + dd4 = Math.multiplyHigh(aa1, aa4) << shift1 | (d4 >>> shift2); + d4 &= LIMB_MASK; + + c2 += d1; + c3 += (d2 << 1) + dd1; + c4 += (d3 + dd2) << 1; + c5 += (d4 + dd3) << 1; + c6 = dd4 << 1; + + // Row 2 - multiply by aa2 + d2 = aa2 * aa2; + dd2 = Math.multiplyHigh(aa2, aa2) << shift1 | (d2 >>> shift2); + d2 &= LIMB_MASK; + + d3 = aa2 * aa3; + dd3 = Math.multiplyHigh(aa2, aa3) << shift1 | (d3 >>> shift2); + d3 &= LIMB_MASK; + + d4 = aa2 * aa4; + dd4 = Math.multiplyHigh(aa2, aa4) << shift1 | (d4 >>> shift2); + d4 &= LIMB_MASK; + + c4 += d2; + c5 += (d3 << 1) + dd2; + c6 += (d4 + dd3) << 1; + c7 = dd4 << 1; + + // Row 3 - multiply by aa3 + d3 = aa3 * aa3; + dd3 = Math.multiplyHigh(aa3, aa3) << shift1 | (d3 >>> shift2); + d3 &= LIMB_MASK; + + d4 = aa3 * aa4; + dd4 = Math.multiplyHigh(aa3, aa4) << shift1 | (d4 >>> shift2); + d4 &= LIMB_MASK; + + c6 += d3; + c7 += (d4 << 1) + dd3; + c8 = dd4 << 1; + + // Row 4 - multiply by aa4 + d4 = aa4 * aa4; + dd4 = Math.multiplyHigh(aa4, aa4) << shift1 | (d4 >>> shift2); + d4 &= LIMB_MASK; + + c8 += d4; + c9 = dd4; + + // Perform pseudo-Mersenne reduction + r[0] = c0 + (19 * c5); + r[1] = c1 + (19 * c6); + r[2] = c2 + (19 * c7); + r[3] = c3 + (19 * c8); + r[4] = c4 + (19 * c9); + + reduce(r); + } +} diff --git a/src/java.base/share/classes/sun/util/locale/provider/LocaleResources.java b/src/java.base/share/classes/sun/util/locale/provider/LocaleResources.java index ac43b22a3bd..76b383c03e1 100644 --- a/src/java.base/share/classes/sun/util/locale/provider/LocaleResources.java +++ b/src/java.base/share/classes/sun/util/locale/provider/LocaleResources.java @@ -105,6 +105,9 @@ public class LocaleResources { // TimeZoneNamesBundle exemplar city prefix private static final String TZNB_EXCITY_PREFIX = "timezone.excity."; + // TimeZoneNamesBundle explicit metazone dst offset prefix + private static final String TZNB_METAZONE_DSTOFFSET_PREFIX = "metazone.dstoffset."; + // null singleton cache value private static final Object NULLOBJECT = new Object(); @@ -321,7 +324,8 @@ public class LocaleResources { if (Objects.isNull(data) || Objects.isNull(val = data.get())) { TimeZoneNamesBundle tznb = localeData.getTimeZoneNames(locale); - if (key.startsWith(TZNB_EXCITY_PREFIX)) { + if (key.startsWith(TZNB_EXCITY_PREFIX) || + key.startsWith(TZNB_METAZONE_DSTOFFSET_PREFIX)) { if (tznb.containsKey(key)) { val = tznb.getString(key); assert val instanceof String; @@ -378,7 +382,8 @@ public class LocaleResources { Set value = new LinkedHashSet<>(); Set tzIds = new HashSet<>(Arrays.asList(TimeZone.getAvailableIDs())); for (String key : keyset) { - if (!key.startsWith(TZNB_EXCITY_PREFIX)) { + if (!key.startsWith(TZNB_EXCITY_PREFIX) && + !key.startsWith(TZNB_METAZONE_DSTOFFSET_PREFIX)) { value.add(rb.getStringArray(key)); tzIds.remove(key); } diff --git a/src/java.base/share/classes/sun/util/locale/provider/TimeZoneNameUtility.java b/src/java.base/share/classes/sun/util/locale/provider/TimeZoneNameUtility.java index fd3d4965db3..6c684e176c8 100644 --- a/src/java.base/share/classes/sun/util/locale/provider/TimeZoneNameUtility.java +++ b/src/java.base/share/classes/sun/util/locale/provider/TimeZoneNameUtility.java @@ -37,7 +37,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.spi.TimeZoneNameProvider; import sun.util.calendar.ZoneInfo; import sun.util.cldr.CLDRLocaleProviderAdapter; -import static sun.util.locale.provider.LocaleProviderAdapter.Type; +import static sun.util.locale.provider.LocaleProviderAdapter.Type.CLDR; /** * Utility class that deals with the localized time zone names @@ -169,10 +169,22 @@ public final class TimeZoneNameUtility { * Returns the canonical ID for the given ID */ public static Optional canonicalTZID(String id) { - return ((CLDRLocaleProviderAdapter)LocaleProviderAdapter.forType(Type.CLDR)) + return ((CLDRLocaleProviderAdapter)LocaleProviderAdapter.forType(CLDR)) .canonicalTZID(id); } + /** + * {@return the explicit metazone DST offset for the specified time zone ID, if exists} + * @param tzid the time zone ID + */ + public static String explicitDstOffset(String tzid) { + return (String) (LocaleProviderAdapter.forType(CLDR) instanceof CLDRLocaleProviderAdapter ca ? + ca.getLocaleResources(Locale.ROOT) + .getTimeZoneNames("metazone.dstoffset." + + ca.canonicalTZID(tzid).orElse(tzid)) : + null); + } + private static String[] retrieveDisplayNamesImpl(String id, Locale locale) { LocaleServiceProviderPool pool = LocaleServiceProviderPool.getPool(TimeZoneNameProvider.class); diff --git a/src/java.base/share/classes/sun/util/resources/TimeZoneNamesBundle.java b/src/java.base/share/classes/sun/util/resources/TimeZoneNamesBundle.java index a30b84c6872..c5e95c8a404 100644 --- a/src/java.base/share/classes/sun/util/resources/TimeZoneNamesBundle.java +++ b/src/java.base/share/classes/sun/util/resources/TimeZoneNamesBundle.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -43,8 +43,6 @@ package sun.util.resources; import java.util.Map; import java.util.LinkedHashMap; import java.util.LinkedHashSet; -import java.util.MissingResourceException; -import java.util.Objects; import java.util.Set; /** diff --git a/src/java.base/share/conf/security/java.security b/src/java.base/share/conf/security/java.security index ef4d7285f51..976604b5cbc 100644 --- a/src/java.base/share/conf/security/java.security +++ b/src/java.base/share/conf/security/java.security @@ -1704,7 +1704,7 @@ com.sun.security.allowedAIALocations= # # PKCS #8 encoding format for newly created ML-KEM and ML-DSA private keys # -# draft-ietf-lamps-kyber-certificates-11 and RFC 9881 define three possible formats for a private key: +# RFC 9935 and RFC 9881 define three possible formats for a private key: # a seed (64 bytes for ML-KEM, 32 bytes for ML-DSA), an expanded private key, # or a sequence containing both. # @@ -1725,3 +1725,32 @@ com.sun.security.allowedAIALocations= # #jdk.mlkem.pkcs8.encoding = seed #jdk.mldsa.pkcs8.encoding = seed + +# +# Policy for reading passwords from System.in +# +# When Java needs to read a password, whether it's via a tool such as keytool or +# kinit, or by an API such as PasswordCallback with echo off, it normally reads +# directly from the console. If the console is not available, Java falls back +# to reading from the standard input stream ("System.in"), which typically +# represents a redirected file or an inter-process pipe. This fallback is not +# formally specified, and is not widely adopted by tools from other vendors. +# +# This security property determines whether passwords can be read from the +# standard input stream when a console is not available. The value can be set +# to either "true" or "false". If the value is set to "false", attempting +# to read passwords from the standard input stream without a console will +# throw an exception. The default value is "true". This default may change +# in a future release. +# +# If a system property of the same name is also specified, it supersedes the +# security property value defined here. +# +# Note: This property applies only to password reading from the standard input +# stream. It does not affect other supported password sources. For example, the +# JAAS KeyStoreLoginModule allows a password to be read from the user-specified +# "keyStorePasswordURL" option. The keytool and jarsigner commands also support +# options such as "-storepass:env" and "-storepass:file" that read passwords +# from an environment variable or a file. +# +#jdk.security.password.allowSystemIn = true diff --git a/src/java.base/share/data/tzdata/VERSION b/src/java.base/share/data/tzdata/VERSION index ce25e7653b0..a2974d757c8 100644 --- a/src/java.base/share/data/tzdata/VERSION +++ b/src/java.base/share/data/tzdata/VERSION @@ -21,4 +21,4 @@ # or visit www.oracle.com if you need additional information or have any # questions. # -tzdata2025c +tzdata2026a diff --git a/src/java.base/share/data/tzdata/etcetera b/src/java.base/share/data/tzdata/etcetera index 41660b05dba..9b030fdb8d4 100644 --- a/src/java.base/share/data/tzdata/etcetera +++ b/src/java.base/share/data/tzdata/etcetera @@ -43,7 +43,8 @@ # which load the "UTC" file to handle seconds properly. Zone Etc/UTC 0 - UTC -# Functions like gmtime load the "GMT" file to handle leap seconds properly. +# If leap second support is enabled, functions like gmtime +# load the "GMT" file to handle leap seconds properly. # Vanguard section, which works with most .zi parsers. #Zone GMT 0 - GMT # Rearguard section, for TZUpdater 2.3.2 and earlier. diff --git a/src/java.base/share/data/tzdata/europe b/src/java.base/share/data/tzdata/europe index b82ca6f67bb..a66d40834cd 100644 --- a/src/java.base/share/data/tzdata/europe +++ b/src/java.base/share/data/tzdata/europe @@ -1064,9 +1064,19 @@ Zone Atlantic/Faroe -0:27:04 - LMT 1908 Jan 11 # Tórshavn # Greenland # -# From Paul Eggert (2004-10-31): +# From Paul Eggert (2026-01-22): +# During World War II, Greenland was effectively independent of Denmark and +# observed daylight saving time. TIME, volume 37, page 23 (1941-04-21) +# says, +# "Penfield and West made their way to the U.S.'s most northerly consulate. +# They were astonished to find that Greenlanders, with almost 24 hours of +# sunlight a day during the summer, have daylight saving time." +# As the details are unknown they are omitted from the data for now. +# # During World War II, Germany maintained secret manned weather stations in # East Greenland and Franz Josef Land, but we don't know their time zones. +# Also, they're likely out of scope for the database +# as we lack resources to track every bit of military activity. # My source for this is Wilhelm Dege's book mentioned under Svalbard. # # From Paul Eggert (2017-12-10): @@ -1980,7 +1990,6 @@ Zone Europe/Malta 0:58:04 - LMT 1893 Nov 2 # Valletta # From Stepan Golosunov (2016-03-07): # the act of the government of the Republic of Moldova Nr. 132 from 1990-05-04 -# http://lex.justice.md/viewdoc.php?action=view&view=doc&id=298782&lang=2 # ... says that since 1990-05-06 on the territory of the Moldavian SSR # time would be calculated as the standard time of the second time belt # plus one hour of the "summer" time. To implement that clocks would be @@ -2035,9 +2044,61 @@ Zone Europe/Malta 0:58:04 - LMT 1893 Nov 2 # Valletta # says the 2014-03-30 spring-forward transition was at 02:00 local time. # Guess that since 1997 Moldova has switched one hour before the EU. +# From Heitor David Pinto (2026-02-22): +# Soviet Moldovan resolution 132 of 1990 defined the summer time period from +# the last Sunday in March at 2:00 to the last Sunday in September at 3:00, +# matching the dates used in most of Europe at the time: +# https://web.archive.org/web/20211107050832/http://lex.justice.md/viewdoc.php?action=view&view=doc&id=298782&lang=1 +# +# It seems that in 1996 Moldova changed the end date to October like most of +# Europe, but kept the transitions at 2:00 and 3:00 rather than 1:00 UTC, +# which would have been locally 3:00 and 4:00.... +# +# The notices in the Moldovan government website and broadcaster showed the +# transitions at 2:00 and 3:00 until 2021: +# 2015 https://old.gov.md/en/node/7304 +# 2016 https://old.gov.md/en/node/12587 +# 2017 https://old.gov.md/en/node/20654 +# 2017 https://old.gov.md/en/content/moldova-upholds-winter-time-night-28-29-october +# 2018 https://old.gov.md/en/content/moldova-switch-summer-time +# 2018 https://old.gov.md/en/content/cabinet-ministers-informs-about-switch-winter-time-28-october +# 2019 https://old.gov.md/en/content/moldova-switch-summer-time-31-march +# 2019 https://old.gov.md/en/node/31122 +# 2020 https://old.gov.md/en/node/32771 +# 2020 https://old.gov.md/en/node/34497 +# 2021 https://trm.md/ro/social/moldova-trece-in-aceasta-noapte-la-ora-de-vara +# 2021 https://trm.md/en/social/republica-moldova-trece-la-ora-de-iarna1 +# +# However, since 2022, the notices showed the transitions at 3:00 and 4:00, +# matching the EU rule at 1:00 UTC: +# 2022 https://trm.md/en/social/in-acest-weekend-republica-moldova-trece-la-ora-de-vara +# 2022 https://old.gov.md/en/content/moldova-switch-winter-time +# 2023 https://moldova1.md/p/6587/ora-de-vara-2023-cum-schimbam-acele-ceasornicelor-si-cand-trecem-la-ora-de-vara +# 2023 https://old.gov.md/en/node/46662 +# 2024 https://moldova1.md/p/26535/republica-moldova-trece-la-ora-de-vara-in-acest-weekend +# 2024 https://moldova1.md/p/37768/republica-moldova-trece-in-aceasta-noapte-la-ora-de-iarna +# 2025 https://moldova1.md/p/46349/republica-moldova-trece-la-ora-de-vara-pe-30-martie-cum-ne-afecteaza-si-ce-recomanda-medicii +# 2025 https://moldova1.md/p/60469/republica-moldova-trece-la-ora-de-iarna-ceasurile-se-dau-inapoi-cu-o-ora +# +# It seems that the changes to the end date and transition times were just +# done in practice without formally changing the resolution. In late 2025, the +# government said that the Soviet resolution was still in force, and proposed +# a new resolution to replace it and formally establish the EU rule: +# ... based on the notices, it seems that in practice Moldova already +# uses the EU rule since 2022. This was also the year when Moldova applied to +# join the EU. +# +# From Robert Bastian (2026-02-26): +# This has been approved and published in the government gazette: +# https://monitorul.gov.md/ro/monitorul/view/pdf/3234/part/2#page=27 +# +# From Paul Eggert (2026-02-24): +# Also see Svetlana Rudenko, "Moldova abandons the 'Soviet era'", Logos Press, +# 2026-02-21 . + # Rule NAME FROM TO - IN ON AT SAVE LETTER/S -Rule Moldova 1997 max - Mar lastSun 2:00 1:00 S -Rule Moldova 1997 max - Oct lastSun 3:00 0 - +Rule Moldova 1997 2021 - Mar lastSun 2:00 1:00 S +Rule Moldova 1997 2021 - Oct lastSun 3:00 0 - # Zone NAME STDOFF RULES FORMAT [UNTIL] Zone Europe/Chisinau 1:55:20 - LMT 1880 @@ -2050,7 +2111,8 @@ Zone Europe/Chisinau 1:55:20 - LMT 1880 2:00 Russia EE%sT 1992 2:00 E-Eur EE%sT 1997 # See Romania commentary for the guessed 1997 transition to EU rules. - 2:00 Moldova EE%sT + 2:00 Moldova EE%sT 2022 + 2:00 EU EE%sT # Poland @@ -2436,7 +2498,7 @@ Zone Atlantic/Madeira -1:07:36 - LMT 1884 # Funchal # Nine O'clock # (1998-10-23) reports that the switch occurred at # 04:00 local time in fall 1998. For lack of better info, -# assume that Romania and Moldova switched to EU rules in 1997, +# assume that Romania switched to EU rules in 1997, # the same year as Bulgaria. # # Rule NAME FROM TO - IN ON AT SAVE LETTER/S diff --git a/src/java.base/share/data/tzdata/leapseconds b/src/java.base/share/data/tzdata/leapseconds index 9426b40f07e..d431a7d3607 100644 --- a/src/java.base/share/data/tzdata/leapseconds +++ b/src/java.base/share/data/tzdata/leapseconds @@ -93,7 +93,7 @@ Leap 2016 Dec 31 23:59:60 + S # Any additional leap seconds will come after this. # This Expires line is commented out for now, # so that pre-2020a zic implementations do not reject this file. -#Expires 2026 Jun 28 00:00:00 +#Expires 2026 Dec 28 00:00:00 # Here are POSIX timestamps for the data in this file. # "#updated" gives the last time the leap seconds data changed @@ -102,8 +102,8 @@ Leap 2016 Dec 31 23:59:60 + S # "#expires" gives the first time this file might be wrong; # if this file was derived from the IERS leap-seconds.list, # this is typically a bit less than one year after "updated". -#updated 1751846400 (2025-07-07 00:00:00 UTC) -#expires 1782604800 (2026-06-28 00:00:00 UTC) +#updated 1767698058 (2026-01-06 11:14:18 UTC) +#expires 1798416000 (2026-12-28 00:00:00 UTC) # Updated through IERS Bulletin C (https://hpiers.obspm.fr/iers/bul/bulc/bulletinc.dat) -# File expires on 28 June 2026 +# File expires on 28 December 2026 diff --git a/src/java.base/share/man/java.md b/src/java.base/share/man/java.md index 8cfccff5abe..18d64b3a4c2 100644 --- a/src/java.base/share/man/java.md +++ b/src/java.base/share/man/java.md @@ -2944,26 +2944,6 @@ they're used. the configuration of the computer (RAM and CPU). By default, the option is disabled and the heap sizes are configured less aggressively. -[`-XX:+NeverActAsServerClassMachine`]{#-XX__NeverActAsServerClassMachine} -: Enable the "Client VM emulation" mode which only uses the C1 JIT compiler, - a 32Mb CodeCache and the Serial GC. The maximum amount of memory that the - JVM may use is set to 1GB by default. The string "emulated-client" is added - to the JVM version string. - - By default the flag is set to `true` only on Windows in 32-bit mode and - `false` in all other cases. - - The "Client VM emulation" mode will not be enabled if any of the following - flags are used on the command line: - - ``` - -XX:{+|-}TieredCompilation - -XX:CompilationMode=mode - -XX:TieredStopAtLevel=n - -XX:{+|-}EnableJVMCI - -XX:{+|-}UseJVMCICompiler - ``` - ## Obsolete Java Options These `java` options are still accepted but ignored, and a warning is issued @@ -2976,6 +2956,26 @@ when they're used. 396](https://openjdk.org/jeps/396) and made obsolete in JDK 17 by [JEP 403](https://openjdk.org/jeps/403). +[`-XX:+NeverActAsServerClassMachine`]{#-XX__NeverActAsServerClassMachine} +: Enabled the "Client VM emulation" mode, which used only the C1 JIT compiler, + a 32Mb CodeCache, and the Serial GC. The maximum amount of memory that the + JVM could use was set to 1GB by default. The string "emulated-client" was added + to the JVM version string. + + By default the flag was set to `true` only on Windows in 32-bit mode and + `false` in all other cases. + + The "Client VM emulation" mode was not enabled if any of the following + flags were used on the command line: + + ``` + -XX:{+|-}TieredCompilation + -XX:CompilationMode=mode + -XX:TieredStopAtLevel=n + -XX:{+|-}EnableJVMCI + -XX:{+|-}UseJVMCICompiler + ``` + ## Removed Java Options No documented java options have been removed in JDK @@VERSION_SPECIFICATION@@. diff --git a/src/java.base/share/native/include/classfile_constants.h.template b/src/java.base/share/native/include/classfile_constants.h.template index fb022ec1fd4..4f96a0673ef 100644 --- a/src/java.base/share/native/include/classfile_constants.h.template +++ b/src/java.base/share/native/include/classfile_constants.h.template @@ -111,7 +111,7 @@ enum { JVM_CONSTANT_InvokeDynamic = 18, JVM_CONSTANT_Module = 19, JVM_CONSTANT_Package = 20, - JVM_CONSTANT_ExternalMax = 20 + JVM_CONSTANT_ExternalMax = 20 }; /* JVM_CONSTANT_MethodHandle subtypes */ diff --git a/src/java.base/share/native/libfallbackLinker/fallbackLinker.c b/src/java.base/share/native/libfallbackLinker/fallbackLinker.c index 3f57a2b97cf..7519efd93bb 100644 --- a/src/java.base/share/native/libfallbackLinker/fallbackLinker.c +++ b/src/java.base/share/native/libfallbackLinker/fallbackLinker.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -87,14 +87,32 @@ Java_jdk_internal_foreign_abi_fallback_LibFallback_ffi_1get_1struct_1offsets(JNI return ffi_get_struct_offsets((ffi_abi) abi, jlong_to_ptr(type), jlong_to_ptr(offsets)); } -static void do_capture_state(int32_t* value_ptr, int captured_state_mask) { - // keep in synch with jdk.internal.foreign.abi.CapturableState - enum PreservableValues { - NONE = 0, - GET_LAST_ERROR = 1, - WSA_GET_LAST_ERROR = 1 << 1, - ERRNO = 1 << 2 - }; +// keep in synch with jdk.internal.foreign.abi.CapturableState +enum PreservableValues { + NONE = 0, + GET_LAST_ERROR = 1, + WSA_GET_LAST_ERROR = 1 << 1, + ERRNO = 1 << 2 +}; + +static void do_capture_state_pre(int32_t* value_ptr, int captured_state_mask) { +#ifdef _WIN64 + if (captured_state_mask & GET_LAST_ERROR) { + SetLastError(*value_ptr); + } + value_ptr++; + if (captured_state_mask & WSA_GET_LAST_ERROR) { + WSASetLastError(*value_ptr); + *value_ptr = WSAGetLastError(); + } + value_ptr++; +#endif + if (captured_state_mask & ERRNO) { + errno = *value_ptr; + } +} + +static void do_capture_state_post(int32_t* value_ptr, int captured_state_mask) { #ifdef _WIN64 if (captured_state_mask & GET_LAST_ERROR) { *value_ptr = GetLastError(); @@ -142,10 +160,15 @@ Java_jdk_internal_foreign_abi_fallback_LibFallback_doDowncall(JNIEnv* env, jclas } } + if (captured_state_mask != 0) { + // Copy the contents of the capture state buffer into thread local + do_capture_state_pre(captured_state_addr, captured_state_mask); + } + ffi_call(jlong_to_ptr(cif), jlong_to_ptr(fn), jlong_to_ptr(rvalue), jlong_to_ptr(avalues)); if (captured_state_mask != 0) { - do_capture_state(captured_state_addr, captured_state_mask); + do_capture_state_post(captured_state_addr, captured_state_mask); } if (heapBases != NULL) { diff --git a/src/java.base/share/native/libverify/check_code.c b/src/java.base/share/native/libverify/check_code.c index 5a8f50cd0a0..4830fedb97b 100644 --- a/src/java.base/share/native/libverify/check_code.c +++ b/src/java.base/share/native/libverify/check_code.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 1994, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1994, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -2162,8 +2162,7 @@ pop_stack(context_type *context, unsigned int inumber, stack_info_type *new_stac break; if ( (GET_ITEM_TYPE(top_type) == ITEM_NewObject || (GET_ITEM_TYPE(top_type) == ITEM_InitObject)) - && ((opcode == JVM_OPC_astore) || (opcode == JVM_OPC_aload) - || (opcode == JVM_OPC_ifnull) || (opcode == JVM_OPC_ifnonnull))) + && ((opcode == JVM_OPC_astore) || (opcode == JVM_OPC_aload))) break; /* The 2nd edition VM of the specification allows field * initializations before the superclass initializer, diff --git a/src/java.base/unix/classes/java/lang/ProcessImpl.java b/src/java.base/unix/classes/java/lang/ProcessImpl.java index 00b51fb3389..d9a4547848f 100644 --- a/src/java.base/unix/classes/java/lang/ProcessImpl.java +++ b/src/java.base/unix/classes/java/lang/ProcessImpl.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -82,8 +82,7 @@ final class ProcessImpl extends Process { private static enum LaunchMechanism { // order IS important! FORK, - POSIX_SPAWN, - VFORK + POSIX_SPAWN } /** @@ -98,29 +97,16 @@ final class ProcessImpl extends Process { try { // Should be value of a LaunchMechanism enum - LaunchMechanism lm = LaunchMechanism.valueOf(s.toUpperCase(Locale.ROOT)); - switch (OperatingSystem.current()) { - case LINUX: { - // All options are valid for Linux, but VFORK is deprecated and results - // in a warning - if (lm == LaunchMechanism.VFORK) { - System.err.println("VFORK MODE DEPRECATED"); - System.err.println(""" - The VFORK launch mechanism has been deprecated for being dangerous. - It will be removed in a future java version. Either remove the - jdk.lang.Process.launchMechanism property (preferred) or use FORK mode - instead (-Djdk.lang.Process.launchMechanism=FORK). - """); - } - return lm; - } - case AIX: - case MACOS: - if (lm != LaunchMechanism.VFORK) { - return lm; // All but VFORK are valid - } - break; + String launchMechanism = s.toUpperCase(Locale.ROOT); + if (launchMechanism.equals("VFORK") && OperatingSystem.isLinux()) { + launchMechanism = "FORK"; + System.err.println(String.format(""" + The VFORK launch mechanism has been removed. Switching to %s instead. + Please remove the jdk.lang.Process.launchMechanism property (preferred) + or use FORK mode instead (-Djdk.lang.Process.launchMechanism=FORK).%n + """, launchMechanism)); } + return LaunchMechanism.valueOf(launchMechanism); } catch (IllegalArgumentException e) { } @@ -266,7 +252,6 @@ final class ProcessImpl extends Process { *

      *   1 - fork(2) and exec(2)
      *   2 - posix_spawn(3P)
-     *   3 - vfork(2) and exec(2)
      * 
* @param fds an array of three file descriptors. * Indexes 0, 1, and 2 correspond to standard input, diff --git a/src/java.base/unix/classes/sun/nio/fs/UnixConstants.java.template b/src/java.base/unix/classes/sun/nio/fs/UnixConstants.java.template index 6823833582f..7a9a22ac40d 100644 --- a/src/java.base/unix/classes/sun/nio/fs/UnixConstants.java.template +++ b/src/java.base/unix/classes/sun/nio/fs/UnixConstants.java.template @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -173,7 +173,5 @@ class UnixConstants { #ifdef __linux__ // advice flags used with posix_fadvise(2) static final int PREFIX_POSIX_FADV_SEQUENTIAL = POSIX_FADV_SEQUENTIAL; - static final int PREFIX_POSIX_FADV_NOREUSE = POSIX_FADV_NOREUSE; - static final int PREFIX_POSIX_FADV_WILLNEED = POSIX_FADV_WILLNEED; #endif } diff --git a/src/java.base/unix/native/jspawnhelper/jspawnhelper.c b/src/java.base/unix/native/jspawnhelper/jspawnhelper.c index d2302d0c2e7..2523fb30acc 100644 --- a/src/java.base/unix/native/jspawnhelper/jspawnhelper.c +++ b/src/java.base/unix/native/jspawnhelper/jspawnhelper.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,14 +26,16 @@ #include #include #include +#include +#include #include #include #include #include #include -#include #include "childproc.h" +#include "childproc_errorcodes.h" extern int errno; @@ -41,36 +43,34 @@ extern int errno; void *mptr; \ mptr = malloc (Y); \ if (mptr == 0) { \ - error (fdout, ERR_MALLOC); \ + sendErrorCodeAndExit (fdout, ESTEP_JSPAWN_ALLOC_FAILED, (int)Y, errno); \ } \ X = mptr; \ } -#define ERR_MALLOC 1 -#define ERR_PIPE 2 -#define ERR_ARGS 3 - #ifndef VERSION_STRING #error VERSION_STRING must be defined #endif -void error (int fd, int err) { - if (write (fd, &err, sizeof(err)) != sizeof(err)) { - /* Not sure what to do here. I have no one to speak to. */ - exit(0x80 + err); +/* Attempts to send an error code to the parent (which may or may not + * work depending on whether the fail pipe exists); then exits with an + * error code corresponding to the fail step. */ +void sendErrorCodeAndExit(int failpipe_fd, int step, int hint, int errno_) { + errcode_t errcode; + buildErrorCode(&errcode, step, hint, errno_); + if (failpipe_fd == -1 || !sendErrorCode(failpipe_fd, errcode)) { + /* Write error code to stdout, in the hope someone reads this. */ + printf("jspawnhelper fail: " ERRCODE_FORMAT "\n", ERRCODE_FORMAT_ARGS(errcode)); } - exit (1); + exit(exitCodeFromErrorCode(errcode)); } -void shutItDown() { - fprintf(stdout, "jspawnhelper version %s\n", VERSION_STRING); - fprintf(stdout, "This command is not for general use and should "); - fprintf(stdout, "only be run as the result of a call to\n"); - fprintf(stdout, "ProcessBuilder.start() or Runtime.exec() in a java "); - fprintf(stdout, "application\n"); - fflush(stdout); - _exit(1); -} +static const char* usageErrorText = + "jspawnhelper version " VERSION_STRING "\n" + "This command is not for general use and should " + "only be run as the result of a call to\n" + "ProcessBuilder.start() or Runtime.exec() in a java " + "application\n"; /* * read the following off the pipefd @@ -84,22 +84,31 @@ void initChildStuff (int fdin, int fdout, ChildStuff *c) { int bufsize, offset=0; int magic; int res; + const int step = ESTEP_JSPAWN_RCV_CHILDSTUFF_COMM_FAIL; + int substep = 0; res = readFully (fdin, &magic, sizeof(magic)); - if (res != 4 || magic != magicNumber()) { - error (fdout, ERR_PIPE); + if (res != 4) { + sendErrorCodeAndExit(fdout, step, substep, errno); + } + + substep ++; + if (magic != magicNumber()) { + sendErrorCodeAndExit(fdout, step, substep, errno); } #ifdef DEBUG jtregSimulateCrash(0, 5); #endif + substep ++; if (readFully (fdin, c, sizeof(*c)) != sizeof(*c)) { - error (fdout, ERR_PIPE); + sendErrorCodeAndExit(fdout, step, substep, errno); } + substep ++; if (readFully (fdin, &sp, sizeof(sp)) != sizeof(sp)) { - error (fdout, ERR_PIPE); + sendErrorCodeAndExit(fdout, step, substep, errno); } bufsize = sp.argvBytes + sp.envvBytes + @@ -107,8 +116,9 @@ void initChildStuff (int fdin, int fdout, ChildStuff *c) { ALLOC(buf, bufsize); + substep++; if (readFully (fdin, buf, bufsize) != bufsize) { - error (fdout, ERR_PIPE); + sendErrorCodeAndExit(fdout, step, substep, errno); } /* Initialize argv[] */ @@ -139,50 +149,63 @@ void initChildStuff (int fdin, int fdout, ChildStuff *c) { offset += sp.parentPathvBytes; } +#ifdef DEBUG +static void checkIsValid(int fd) { + if (!fdIsValid(fd)) { + puts(usageErrorText); + sendErrorCodeAndExit(-1, ESTEP_JSPAWN_INVALID_FD, fd, errno); + } +} +static void checkIsPipe(int fd) { + checkIsValid(fd); + if (!fdIsPipe(fd)) { + puts(usageErrorText); + sendErrorCodeAndExit(-1, ESTEP_JSPAWN_NOT_A_PIPE, fd, errno); + } +} +static void checkFileDescriptorSetup() { + checkIsValid(STDIN_FILENO); + checkIsValid(STDOUT_FILENO); + checkIsValid(STDERR_FILENO); + checkIsPipe(FAIL_FILENO); + checkIsPipe(CHILDENV_FILENO); +} +#endif // DEBUG + int main(int argc, char *argv[]) { ChildStuff c; - struct stat buf; - /* argv[1] contains the fd number to read all the child info */ - int r, fdinr, fdinw, fdout; #ifdef DEBUG jtregSimulateCrash(0, 4); #endif - if (argc != 3) { - fprintf(stdout, "Incorrect number of arguments: %d\n", argc); - shutItDown(); + if (argc != 2) { + printf("Incorrect number of arguments: %d\n", argc); + puts(usageErrorText); + sendErrorCodeAndExit(-1, ESTEP_JSPAWN_ARG_ERROR, 0, 0); } if (strcmp(argv[1], VERSION_STRING) != 0) { - fprintf(stdout, "Incorrect Java version: %s\n", argv[1]); - shutItDown(); + printf("Incorrect Java version: %s\n", argv[1]); + puts(usageErrorText); + sendErrorCodeAndExit(-1, ESTEP_JSPAWN_VERSION_ERROR, 0, 0); } - r = sscanf (argv[2], "%d:%d:%d", &fdinr, &fdinw, &fdout); - if (r == 3 && fcntl(fdinr, F_GETFD) != -1 && fcntl(fdinw, F_GETFD) != -1) { - fstat(fdinr, &buf); - if (!S_ISFIFO(buf.st_mode)) { - fprintf(stdout, "Incorrect input pipe\n"); - shutItDown(); - } - } else { - fprintf(stdout, "Incorrect FD array data: %s\n", argv[2]); - shutItDown(); - } +#ifdef DEBUG + /* Check expected file descriptors */ + checkFileDescriptorSetup(); +#endif - // Close the writing end of the pipe we use for reading from the parent. - // We have to do this before we start reading from the parent to avoid - // blocking in the case the parent exits before we finished reading from it. - close(fdinw); // Deliberately ignore errors (see https://lwn.net/Articles/576478/). - initChildStuff (fdinr, fdout, &c); - // Now set the file descriptor for the pipe's writing end to -1 - // for the case that somebody tries to close it again. - assert(c.childenv[1] == fdinw); - c.childenv[1] = -1; - // The file descriptor for reporting errors back to our parent we got on the command - // line should be the same like the one in the ChildStuff struct we've just read. - assert(c.fail[1] == fdout); + initChildStuff(CHILDENV_FILENO, FAIL_FILENO, &c); + +#ifdef DEBUG + /* Not needed in spawn mode */ + assert(c.in[0] == -1 && c.in[1] == -1 && + c.out[0] == -1 && c.out[1] == -1 && + c.err[0] == -1 && c.err[1] == -1 && + c.fail[0] == -1 && c.fail[1] == -1 && + c.fds[0] == -1 && c.fds[1] == -1 && c.fds[2] == -1); +#endif childProcess (&c); return 0; /* NOT REACHED */ diff --git a/src/java.base/unix/native/libjava/ProcessImpl_md.c b/src/java.base/unix/native/libjava/ProcessImpl_md.c index 12597fbb650..69af948d2da 100644 --- a/src/java.base/unix/native/libjava/ProcessImpl_md.c +++ b/src/java.base/unix/native/libjava/ProcessImpl_md.c @@ -43,10 +43,13 @@ #include #include #include - +#include +#include +#include #include #include "childproc.h" +#include "childproc_errorcodes.h" /* * @@ -57,51 +60,33 @@ * changing paths... * - then exec(2) the target binary * - * There are three ways to fork off: + * On the OS-side are three ways to fork off, but we only use two of them: * - * A) fork(2). Portable and safe (no side effects) but may fail with ENOMEM on - * all Unices when invoked from a VM with a high memory footprint. On Unices - * with strict no-overcommit policy this problem is most visible. + * A) fork(2). Portable and safe (no side effects) but could fail on very ancient + * Unices that don't employ COW on fork(2). The modern platforms we support + * (Linux, MacOS, AIX) all do. It may have a small performance penalty compared + * to modern posix_spawn(3) implementations - see below. + * fork(2) can be used by specifying -Djdk.lang.Process.launchMechanism=FORK when starting + * the (parent process) JVM. * - * This is because forking the VM will first create a child process with - * theoretically the same memory footprint as the parent - even if you plan - * to follow up with exec'ing a tiny binary. In reality techniques like - * copy-on-write etc mitigate the problem somewhat but we still run the risk - * of hitting system limits. + * B) vfork(2): Portable and fast but very unsafe. For details, see JDK-8357090. + * We supported this mode in older releases but removed support for it in JDK 27. + * Modern posix_spawn(3) implementations use techniques similar to vfork(2), but + * in a much safer way * - * For a Linux centric description of this problem, see the documentation on - * /proc/sys/vm/overcommit_memory in Linux proc(5). - * - * B) vfork(2): Portable and fast but very unsafe. It bypasses the memory - * problems related to fork(2) by starting the child in the memory image of - * the parent. Things that can go wrong include: - * - Programming errors in the child process before the exec(2) call may - * trash memory in the parent process, most commonly the stack of the - * thread invoking vfork. - * - Signals received by the child before the exec(2) call may be at best - * misdirected to the parent, at worst immediately kill child and parent. - * - * This is mitigated by very strict rules about what one is allowed to do in - * the child process between vfork(2) and exec(2), which is basically nothing. - * However, we always broke this rule by doing the pre-exec work between - * vfork(2) and exec(2). - * - * Also note that vfork(2) has been deprecated by the OpenGroup, presumably - * because of its many dangers. - * - * C) clone(2): This is a Linux specific call which gives the caller fine - * grained control about how exactly the process fork is executed. It is - * powerful, but Linux-specific. - * - * Aside from these three possibilities there is a forth option: posix_spawn(3). - * Where fork/vfork/clone all fork off the process and leave pre-exec work and - * calling exec(2) to the user, posix_spawn(3) offers the user fork+exec-like - * functionality in one package, similar to CreateProcess() on Windows. - * - * It is not a system call in itself, but usually a wrapper implemented within - * the libc in terms of one of (fork|vfork|clone)+exec - so whether or not it - * has advantages over calling the naked (fork|vfork|clone) functions depends - * on how posix_spawn(3) is implemented. + * C) posix_spawn(3): Where fork/vfork/clone all fork off the process and leave + * pre-exec work and calling exec(2) to the user, posix_spawn(3) offers the user + * fork+exec-like functionality in one package, similar to CreateProcess() on Windows. + * It is not a system call, but a wrapper implemented in user-space libc in terms + * of one of (fork|vfork|clone)+exec - so whether or not it has advantages over calling + * the naked (fork|vfork|clone) functions depends on how posix_spawn(3) is implemented. + * Modern posix_spawn(3) implementations, on Linux, use clone(2) with CLONE_VM | CLONE_VFORK, + * giving us the best ratio between performance and safety. + * Note however, that posix_spawn(3) can be buggy, depending on the libc implementation. + * E.g., on MacOS, it is still fully not POSIX-compliant. Therefore, we need to retain the + * FORK mode as a backup. + * Posix_spawn mode is used by default, but can be explicitly enabled using + * -Djdk.lang.Process.launchMechanism=POSIX_SPAWN when starting the (parent process) JVM. * * Note that when using posix_spawn(3), we exec twice: first a tiny binary called * the jspawnhelper, then in the jspawnhelper we do the pre-exec work and exec a @@ -114,58 +99,14 @@ * --- Linux-specific --- * * How does glibc implement posix_spawn? - * (see: sysdeps/posix/spawni.c for glibc < 2.24, - * sysdeps/unix/sysv/linux/spawni.c for glibc >= 2.24): * - * 1) Before glibc 2.4 (released 2006), posix_spawn(3) used just fork(2)/exec(2). - * This would be bad for the JDK since we would risk the known memory issues with - * fork(2). But since this only affects glibc variants which have long been - * phased out by modern distributions, this is irrelevant. + * Before glibc 2.4 (released 2006), posix_spawn(3) used just fork(2)/exec(2). From + * glibc 2.4 up to and including 2.23, it used either fork(2) or vfork(2). None of these + * versions still matter. * - * 2) Between glibc 2.4 and glibc 2.23, posix_spawn uses either fork(2) or - * vfork(2) depending on how exactly the user called posix_spawn(3): - * - * - * The child process is created using vfork(2) instead of fork(2) when - * either of the following is true: - * - * * the spawn-flags element of the attributes object pointed to by - * attrp contains the GNU-specific flag POSIX_SPAWN_USEVFORK; or - * - * * file_actions is NULL and the spawn-flags element of the attributes - * object pointed to by attrp does not contain - * POSIX_SPAWN_SETSIGMASK, POSIX_SPAWN_SETSIGDEF, - * POSIX_SPAWN_SETSCHEDPARAM, POSIX_SPAWN_SETSCHEDULER, - * POSIX_SPAWN_SETPGROUP, or POSIX_SPAWN_RESETIDS. - * - * - * Due to the way the JDK calls posix_spawn(3), it would therefore call vfork(2). - * So we would avoid the fork(2) memory problems. However, there still remains the - * risk associated with vfork(2). But it is smaller than were we to call vfork(2) - * directly since we use the jspawnhelper, moving all pre-exec work off to after - * the first exec, thereby reducing the vulnerable time window. - * - * 3) Since glibc >= 2.24, glibc uses clone+exec: - * - * new_pid = CLONE (__spawni_child, STACK (stack, stack_size), stack_size, - * CLONE_VM | CLONE_VFORK | SIGCHLD, &args); - * - * This is even better than (2): - * - * CLONE_VM means we run in the parent's memory image, as with (2) - * CLONE_VFORK means parent waits until we exec, as with (2) - * - * However, error possibilities are further reduced since: - * - posix_spawn(3) passes a separate stack for the child to run on, eliminating - * the danger of trashing the forking thread's stack in the parent process. - * - posix_spawn(3) takes care to temporarily block all incoming signals to the - * child process until the first exec(2) has been called, - * - * TL;DR - * Calling posix_spawn(3) for glibc - * (2) < 2.24 is not perfect but still better than using plain vfork(2), since - * the chance of an error happening is greatly reduced - * (3) >= 2.24 is the best option - portable, fast and as safe as possible. + * Since glibc >= 2.24, glibc uses clone+exec with CLONE_VM | CLONE_VFORK to emulate vfork + * performance but without the inherent dangers (we run inside the parent's memory image + * and stop the parent for as long as it takes the child process to exec). * * --- * @@ -177,7 +118,6 @@ * * * - * * Based on the above analysis, we are currently defaulting to posix_spawn() * on all Unices including Linux. */ @@ -486,28 +426,6 @@ static int copystrings(char *buf, int offset, const char * const *arg) { __attribute_noinline__ #endif -/* vfork(2) is deprecated on Darwin */ -#ifndef __APPLE__ -static pid_t -vforkChild(ChildStuff *c) { - volatile pid_t resultPid; - - /* - * We separate the call to vfork into a separate function to make - * very sure to keep stack of child from corrupting stack of parent, - * as suggested by the scary gcc warning: - * warning: variable 'foo' might be clobbered by 'longjmp' or 'vfork' - */ - resultPid = vfork(); - - if (resultPid == 0) { - childProcess(c); - } - assert(resultPid != 0); /* childProcess never returns */ - return resultPid; -} -#endif - static pid_t forkChild(ChildStuff *c) { pid_t resultPid; @@ -527,27 +445,46 @@ forkChild(ChildStuff *c) { return resultPid; } +/* Given two fds, one of which has to be -1, the other one has to be valid, + * return the valid one. */ +static int eitherOneOf(int fd1, int fd2) { + if (fd2 == -1) { + assert(fdIsValid(fd1)); + return fd1; + } + assert(fd1 == -1); + assert(fdIsValid(fd2)); + return fd2; +} + +static int call_posix_spawn_file_actions_adddup2(posix_spawn_file_actions_t *file_actions, int filedes, int newfiledes) { +#ifdef __APPLE__ + /* MacOS is not POSIX-compliant: dup2 file actions specifying the same fd as source and destination + * should be handled as no-op according to spec, but they cause EBADF. */ + if (filedes == newfiledes) { + return 0; + } +#endif + return posix_spawn_file_actions_adddup2(file_actions, filedes, newfiledes); +} + static pid_t spawnChild(JNIEnv *env, jobject process, ChildStuff *c, const char *helperpath) { pid_t resultPid; - int i, offset, rval, bufsize, magic; - char *buf, buf1[(3 * 11) + 3]; // "%d:%d:%d\0" - char *hlpargs[4]; + int offset, rval, bufsize, magic; + char* buf; + char* hlpargs[3]; SpawnInfo sp; + posix_spawn_file_actions_t file_actions; + int child_stdin, child_stdout, child_stderr, child_childenv, child_fail = -1; - /* need to tell helper which fd is for receiving the childstuff - * and which fd to send response back on - */ - snprintf(buf1, sizeof(buf1), "%d:%d:%d", c->childenv[0], c->childenv[1], c->fail[1]); /* NULL-terminated argv array. * argv[0] contains path to jspawnhelper, to follow conventions. * argv[1] contains the version string as argument to jspawnhelper - * argv[2] contains the fd string as argument to jspawnhelper */ hlpargs[0] = (char*)helperpath; hlpargs[1] = VERSION_STRING; - hlpargs[2] = buf1; - hlpargs[3] = NULL; + hlpargs[2] = NULL; /* Following items are sent down the pipe to the helper * after it is spawned. @@ -570,19 +507,79 @@ spawnChild(JNIEnv *env, jobject process, ChildStuff *c, const char *helperpath) bufsize += sp.dirlen; arraysize(parentPathv, &sp.nparentPathv, &sp.parentPathvBytes); bufsize += sp.parentPathvBytes; - /* We need to clear FD_CLOEXEC if set in the fds[]. - * Files are created FD_CLOEXEC in Java. - * Otherwise, they will be closed when the target gets exec'd */ - for (i=0; i<3; i++) { - if (c->fds[i] != -1) { - int flags = fcntl(c->fds[i], F_GETFD); - if (flags & FD_CLOEXEC) { - fcntl(c->fds[i], F_SETFD, flags & (~FD_CLOEXEC)); - } - } + + /* Prepare file descriptors for jspawnhelper and the target binary. */ + + /* 0: copy of either "in" pipe read fd or the stdin redirect fd */ + child_stdin = eitherOneOf(c->fds[0], c->in[0]); + + /* 1: copy of either "out" pipe write fd or the stdout redirect fd */ + child_stdout = eitherOneOf(c->fds[1], c->out[1]); + + /* 2: redirectErrorStream=1: redirected to child's stdout (Order matters!) + * redirectErrorStream=0: copy of either "err" pipe write fd or stderr redirect fd. */ + if (c->redirectErrorStream) { + child_stderr = STDOUT_FILENO; /* Note: this refers to the future stdout in the child process */ + } else { + child_stderr = eitherOneOf(c->fds[2], c->err[1]); } - rval = posix_spawn(&resultPid, helperpath, 0, 0, (char * const *) hlpargs, environ); + /* 3: copy of the "fail" pipe write fd */ + child_fail = c->fail[1]; + + /* 4: copy of the "childenv" pipe read end */ + child_childenv = c->childenv[0]; + + assert(fdIsValid(child_stdin)); + assert(fdIsValid(child_stdout)); + assert(fdIsValid(child_stderr)); + assert(fdIsPipe(child_fail)); + assert(fdIsPipe(child_childenv)); + /* This must always hold true, unless someone deliberately closed 0, 1, or 2 in the parent JVM. */ + assert(child_fail > STDERR_FILENO); + assert(child_childenv > STDERR_FILENO); + + /* Slot in dup2 file actions. */ + posix_spawn_file_actions_init(&file_actions); + +#ifdef __APPLE__ + /* On MacOS, posix_spawn does not behave in a POSIX-conform way in that the + * kernel closes CLOEXEC file descriptors too early for dup2 file actions to + * copy them after the fork. We have to explicitly prevent that by calling a + * propietary API. */ + posix_spawn_file_actions_addinherit_np(&file_actions, child_stdin); + posix_spawn_file_actions_addinherit_np(&file_actions, child_stdout); + posix_spawn_file_actions_addinherit_np(&file_actions, child_stderr); + posix_spawn_file_actions_addinherit_np(&file_actions, child_fail); + posix_spawn_file_actions_addinherit_np(&file_actions, child_childenv); +#endif + + /* First dup2 stdin/out/err to 0,1,2. After this, we can safely dup2 over the + * original stdin/out/err. */ + if (call_posix_spawn_file_actions_adddup2(&file_actions, child_stdin, STDIN_FILENO) != 0 || + call_posix_spawn_file_actions_adddup2(&file_actions, child_stdout, STDOUT_FILENO) != 0 || + /* Order matters: stderr may be redirected to stdout, so this dup2 must happen after the stdout one. */ + call_posix_spawn_file_actions_adddup2(&file_actions, child_stderr, STDERR_FILENO) != 0) + { + return -1; + } + + /* We dup2 with one intermediary step to prevent accidentally dup2'ing over child_childenv. */ + const int tmp_child_childenv = child_fail < 10 ? 10 : child_fail - 1; + if (call_posix_spawn_file_actions_adddup2(&file_actions, child_childenv, tmp_child_childenv) != 0 || + call_posix_spawn_file_actions_adddup2(&file_actions, child_fail, FAIL_FILENO) != 0 || + call_posix_spawn_file_actions_adddup2(&file_actions, tmp_child_childenv, CHILDENV_FILENO) != 0) + { + return -1; + } + + /* Since we won't use these in jspawnhelper, reset them all */ + c->in[0] = c->in[1] = c->out[0] = c->out[1] = + c->err[0] = c->err[1] = c->fail[0] = c->fail[1] = + c->fds[0] = c->fds[1] = c->fds[2] = -1; + c->redirectErrorStream = false; + + rval = posix_spawn(&resultPid, helperpath, &file_actions, 0, (char * const *) hlpargs, environ); if (rval != 0) { return -1; @@ -652,11 +649,6 @@ spawnChild(JNIEnv *env, jobject process, ChildStuff *c, const char *helperpath) static pid_t startChild(JNIEnv *env, jobject process, ChildStuff *c, const char *helperpath) { switch (c->mode) { -/* vfork(2) is deprecated on Darwin*/ - #ifndef __APPLE__ - case MODE_VFORK: - return vforkChild(c); - #endif case MODE_FORK: return forkChild(c); case MODE_POSIX_SPAWN: @@ -666,6 +658,30 @@ startChild(JNIEnv *env, jobject process, ChildStuff *c, const char *helperpath) } } +static int pipeSafely(int fd[2]) { + /* Pipe filedescriptors must be CLOEXEC as early as possible - ideally from the point of + * creation on - since at any moment a concurrent (third-party) fork() could inherit copies + * of these descriptors and accidentally keep the pipes open. That could cause the parent + * process to hang (see e.g. JDK-8377907). + * We use pipe2(2), if we have it. If we don't, we use pipe(2) + fcntl(2) immediately. + * The latter is still racy and can therefore still cause hangs as described in JDK-8377907, + * but at least the dangerous time window is as short as we can make it. + */ + int rc = -1; +#ifdef HAVE_PIPE2 + rc = pipe2(fd, O_CLOEXEC); +#else + rc = pipe(fd); + if (rc == 0) { + fcntl(fd[0], F_SETFD, FD_CLOEXEC); + fcntl(fd[1], F_SETFD, FD_CLOEXEC); + } +#endif /* HAVE_PIPE2 */ + assert(fdIsCloexec(fd[0])); + assert(fdIsCloexec(fd[1])); + return rc; +} + JNIEXPORT jint JNICALL Java_java_lang_ProcessImpl_forkAndExec(JNIEnv *env, jobject process, @@ -678,7 +694,6 @@ Java_java_lang_ProcessImpl_forkAndExec(JNIEnv *env, jintArray std_fds, jboolean redirectErrorStream) { - int errnum; int resultPid = -1; int in[2], out[2], err[2], fail[2], childenv[2]; jint *fds = NULL; @@ -727,11 +742,11 @@ Java_java_lang_ProcessImpl_forkAndExec(JNIEnv *env, fds = (*env)->GetIntArrayElements(env, std_fds, NULL); if (fds == NULL) goto Catch; - if ((fds[0] == -1 && pipe(in) < 0) || - (fds[1] == -1 && pipe(out) < 0) || - (fds[2] == -1 && !redirectErrorStream && pipe(err) < 0) || // if not redirecting create the pipe - (pipe(childenv) < 0) || - (pipe(fail) < 0)) { + if ((fds[0] == -1 && pipeSafely(in) < 0) || + (fds[1] == -1 && pipeSafely(out) < 0) || + (fds[2] == -1 && !redirectErrorStream && pipeSafely(err) < 0) || + (pipeSafely(childenv) < 0) || + (pipeSafely(fail) < 0)) { throwInternalIOException(env, errno, "Bad file descriptor", mode); goto Catch; } @@ -767,9 +782,6 @@ Java_java_lang_ProcessImpl_forkAndExec(JNIEnv *env, if (resultPid < 0) { char * failMessage = "unknown"; switch (c->mode) { - case MODE_VFORK: - failMessage = "vfork failed"; - break; case MODE_FORK: failMessage = "fork failed"; break; @@ -782,9 +794,11 @@ Java_java_lang_ProcessImpl_forkAndExec(JNIEnv *env, } close(fail[1]); fail[1] = -1; /* See: WhyCantJohnnyExec (childproc.c) */ + errcode_t errcode; + /* If we expect the child to ping aliveness, wait for it. */ if (c->sendAlivePing) { - switch(readFully(fail[0], &errnum, sizeof(errnum))) { + switch(readFully(fail[0], &errcode, sizeof(errcode))) { case 0: /* First exec failed; */ { int tmpStatus = 0; @@ -792,13 +806,15 @@ Java_java_lang_ProcessImpl_forkAndExec(JNIEnv *env, throwExitCause(env, p, tmpStatus, c->mode); goto Catch; } - case sizeof(errnum): - if (errnum != CHILD_IS_ALIVE) { - /* This can happen if the spawn helper encounters an error - * before or during the handshake with the parent. */ - throwInternalIOException(env, 0, - "Bad code from spawn helper (Failed to exec spawn helper)", - c->mode); + case sizeof(errcode): + if (errcode.step != ESTEP_CHILD_ALIVE) { + /* This can happen if the child process encounters an error + * before or during initial handshake with the parent. */ + char msg[256]; + snprintf(msg, sizeof(msg), + "Bad early code from spawn helper " ERRCODE_FORMAT " (Failed to exec spawn helper)", + ERRCODE_FORMAT_ARGS(errcode)); + throwInternalIOException(env, 0, msg, c->mode); goto Catch; } break; @@ -808,11 +824,29 @@ Java_java_lang_ProcessImpl_forkAndExec(JNIEnv *env, } } - switch (readFully(fail[0], &errnum, sizeof(errnum))) { + switch (readFully(fail[0], &errcode, sizeof(errcode))) { case 0: break; /* Exec succeeded */ - case sizeof(errnum): + case sizeof(errcode): + /* Always reap first! */ waitpid(resultPid, NULL, 0); - throwIOException(env, errnum, "Exec failed"); + /* Most of these errors are implementation errors and should result in an internal IOE, but + * a few can be caused by bad user input and need to be communicated to the end user. */ + switch(errcode.step) { + case ESTEP_CHDIR_FAIL: + throwIOException(env, errcode.errno_, "Failed to access working directory"); + break; + case ESTEP_EXEC_FAIL: + throwIOException(env, errcode.errno_, "Exec failed"); + break; + default: { + /* Probably implementation error */ + char msg[256]; + snprintf(msg, sizeof(msg), + "Bad code from spawn helper " ERRCODE_FORMAT " (Failed to exec spawn helper)", + ERRCODE_FORMAT_ARGS(errcode)); + throwInternalIOException(env, 0, msg, c->mode); + } + }; goto Catch; default: throwInternalIOException(env, errno, "Read failed", c->mode); diff --git a/src/java.base/unix/native/libjava/childproc.c b/src/java.base/unix/native/libjava/childproc.c index 9c6334e52d2..6bc15dfb40c 100644 --- a/src/java.base/unix/native/libjava/childproc.c +++ b/src/java.base/unix/native/libjava/childproc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,27 +23,56 @@ * questions. */ +#include #include #include #include #include +#include #include #include #include +#include #include #include #include "childproc.h" +#include "childproc_errorcodes.h" #include "jni_util.h" const char * const *parentPathv; +#ifdef DEBUG +bool fdIsValid(int fd) { + return fcntl(fd, F_GETFD) != -1; +} +bool fdIsPipe(int fd) { + struct stat buf; + errno = 0; + return fstat(fd, &buf) != -1 && S_ISFIFO(buf.st_mode); +} +bool fdIsCloexec(int fd) { + errno = 0; + const int flags = fcntl(fd, F_GETFD); + return flags != -1 && (flags & FD_CLOEXEC); +} +#endif // DEBUG + static int -restartableDup2(int fd_from, int fd_to) +restartableDup2(int fd_from, int fd_to, errcode_t* errcode) +/* All functions taking an errcode_t* as output behave the same: upon error, they populate + * errcode_t::hint and errcode_t::errno, but leave errcode_t::step as ESTEP_UNKNOWN since + * this information will be provided by the outer caller */ { int err; RESTARTABLE(dup2(fd_from, fd_to), err); - return err; + if (err == -1) { + /* use fd_to (the destination descriptor) as hint: it is a bit more telling + * than fd_from in our case */ + buildErrorCode(errcode, ESTEP_UNKNOWN, fd_to, errno); + return false; + } + return true; } int @@ -52,6 +81,16 @@ closeSafely(int fd) return (fd == -1) ? 0 : close(fd); } +/* Like closeSafely, but sets errcode (hint = fd, errno) on error and returns false */ +static bool +closeSafely2(int fd, errcode_t* errcode) { + if (closeSafely(fd) == -1) { + buildErrorCode(errcode, ESTEP_UNKNOWN, fd, errno); + return false; + } + return true; +} + int markCloseOnExec(int fd) { @@ -128,15 +167,19 @@ markDescriptorsCloseOnExec(void) return 0; } -static int -moveDescriptor(int fd_from, int fd_to) +static bool +moveDescriptor(int fd_from, int fd_to, errcode_t* errcode) { if (fd_from != fd_to) { - if ((restartableDup2(fd_from, fd_to) == -1) || - (close(fd_from) == -1)) - return -1; + if (!restartableDup2(fd_from, fd_to, errcode)) { + return false; + } + if (close(fd_from) == -1) { + buildErrorCode(errcode, ESTEP_UNKNOWN, fd_from, errno); + return false; + } } - return 0; + return true; } int @@ -228,31 +271,6 @@ initVectorFromBlock(const char**vector, const char* block, int count) vector[count] = NULL; } -/** - * Exec FILE as a traditional Bourne shell script (i.e. one without #!). - * If we could do it over again, we would probably not support such an ancient - * misfeature, but compatibility wins over sanity. The original support for - * this was imported accidentally from execvp(). - */ -static void -execve_as_traditional_shell_script(const char *file, - const char *argv[], - const char *const envp[]) -{ - /* Use the extra word of space provided for us in argv by caller. */ - const char *argv0 = argv[0]; - const char *const *end = argv; - while (*end != NULL) - ++end; - memmove(argv+2, argv+1, (end-argv) * sizeof(*end)); - argv[0] = "/bin/sh"; - argv[1] = file; - execve(argv[0], (char **) argv, (char **) envp); - /* Can't even exec /bin/sh? Big trouble, but let's soldier on... */ - memmove(argv+1, argv+2, (end-argv) * sizeof(*end)); - argv[0] = argv0; -} - /** * Like execve(2), except that in case of ENOEXEC, FILE is assumed to * be a shell script and the system default shell is invoked to run it. @@ -262,16 +280,9 @@ execve_with_shell_fallback(int mode, const char *file, const char *argv[], const char *const envp[]) { - if (mode == MODE_VFORK) { - /* shared address space; be very careful. */ - execve(file, (char **) argv, (char **) envp); - if (errno == ENOEXEC) - execve_as_traditional_shell_script(file, argv, envp); - } else { - /* unshared address space; we can mutate environ. */ - environ = (char **) envp; - execvp(file, (char **) argv); - } + /* unshared address space; we can mutate environ. */ + environ = (char **) envp; + execvp(file, (char **) argv); } /** @@ -367,54 +378,81 @@ int childProcess(void *arg) { const ChildStuff* p = (const ChildStuff*) arg; - int fail_pipe_fd = p->fail[1]; - if (p->sendAlivePing) { - /* Child shall signal aliveness to parent at the very first - * moment. */ - int code = CHILD_IS_ALIVE; - if (writeFully(fail_pipe_fd, &code, sizeof(code)) != sizeof(code)) { - goto WhyCantJohnnyExec; - } + int fail_pipe_fd = (p->mode == MODE_POSIX_SPAWN) ? + FAIL_FILENO : /* file descriptors already set up by posix_spawn(). */ + p->fail[1]; + + /* error information for WhyCantJohnnyExec */ + errcode_t errcode; + + /* Child shall signal aliveness to parent at the very first + * moment. */ + if (p->sendAlivePing && !sendAlivePing(fail_pipe_fd)) { + buildErrorCode(&errcode, ESTEP_SENDALIVE_FAIL, fail_pipe_fd, errno); + goto WhyCantJohnnyExec; } #ifdef DEBUG jtregSimulateCrash(0, 6); #endif - /* Close the parent sides of the pipes. - Closing pipe fds here is redundant, since markDescriptorsCloseOnExec() - would do it anyways, but a little paranoia is a good thing. */ - if ((closeSafely(p->in[1]) == -1) || - (closeSafely(p->out[0]) == -1) || - (closeSafely(p->err[0]) == -1) || - (closeSafely(p->childenv[0]) == -1) || - (closeSafely(p->childenv[1]) == -1) || - (closeSafely(p->fail[0]) == -1)) - goto WhyCantJohnnyExec; - /* Give the child sides of the pipes the right fileno's. */ - /* Note: it is possible for in[0] == 0 */ - if ((moveDescriptor(p->in[0] != -1 ? p->in[0] : p->fds[0], - STDIN_FILENO) == -1) || - (moveDescriptor(p->out[1]!= -1 ? p->out[1] : p->fds[1], - STDOUT_FILENO) == -1)) - goto WhyCantJohnnyExec; + /* File descriptor setup for non-Posix-spawn mode */ + if (p->mode == MODE_FORK) { - if (p->redirectErrorStream) { - if ((closeSafely(p->err[1]) == -1) || - (restartableDup2(STDOUT_FILENO, STDERR_FILENO) == -1)) + /* Close the parent sides of the pipes. + Closing pipe fds here is redundant, since markDescriptorsCloseOnExec() + would do it anyways, but a little paranoia is a good thing. */ + if (!closeSafely2(p->in[1], &errcode) || + !closeSafely2(p->out[0], &errcode) || + !closeSafely2(p->err[0], &errcode) || + !closeSafely2(p->childenv[0], &errcode) || + !closeSafely2(p->childenv[1], &errcode) || + !closeSafely2(p->fail[0], &errcode)) + { + errcode.step = ESTEP_PIPECLOSE_FAIL; goto WhyCantJohnnyExec; - } else { - if (moveDescriptor(p->err[1] != -1 ? p->err[1] : p->fds[2], - STDERR_FILENO) == -1) + } + + /* Give the child sides of the pipes the right fileno's. */ + /* Note: it is possible for in[0] == 0 */ + if (!moveDescriptor(p->in[0] != -1 ? p->in[0] : p->fds[0], + STDIN_FILENO, &errcode)) { + errcode.step = ESTEP_DUP2_STDIN_FAIL; goto WhyCantJohnnyExec; - } + } - if (moveDescriptor(fail_pipe_fd, FAIL_FILENO) == -1) - goto WhyCantJohnnyExec; + if (!moveDescriptor(p->out[1] != -1 ? p->out[1] : p->fds[1], + STDOUT_FILENO, &errcode)) { + errcode.step = ESTEP_DUP2_STDOUT_FAIL; + goto WhyCantJohnnyExec; + } - /* We moved the fail pipe fd */ - fail_pipe_fd = FAIL_FILENO; + if (p->redirectErrorStream) { + if (!closeSafely2(p->err[1], &errcode) || + !restartableDup2(STDOUT_FILENO, STDERR_FILENO, &errcode)) { + errcode.step = ESTEP_DUP2_STDERR_REDIRECT_FAIL; + goto WhyCantJohnnyExec; + } + } else { + if (!moveDescriptor(p->err[1] != -1 ? p->err[1] : p->fds[2], + STDERR_FILENO, &errcode)) { + errcode.step = ESTEP_DUP2_STDERR_REDIRECT_FAIL; + goto WhyCantJohnnyExec; + } + } + + if (!moveDescriptor(fail_pipe_fd, FAIL_FILENO, &errcode)) { + errcode.step = ESTEP_DUP2_FAILPIPE_FAIL; + goto WhyCantJohnnyExec; + } + + /* We moved the fail pipe fd */ + fail_pipe_fd = FAIL_FILENO; + + } /* end: FORK mode */ + + assert(fail_pipe_fd == FAIL_FILENO); /* For AIX: The code in markDescriptorsCloseOnExec() relies on the current * semantic of this function. When this point here is reached only the @@ -424,46 +462,53 @@ childProcess(void *arg) if (markDescriptorsCloseOnExec() == -1) { /* failed, close the old way */ int max_fd = (int)sysconf(_SC_OPEN_MAX); int fd; - for (fd = STDERR_FILENO + 1; fd < max_fd; fd++) - if (markCloseOnExec(fd) == -1 && errno != EBADF) + for (fd = STDERR_FILENO + 1; fd < max_fd; fd++) { + if (markCloseOnExec(fd) == -1 && errno != EBADF) { + buildErrorCode(&errcode, ESTEP_CLOEXEC_FAIL, fd, errno); goto WhyCantJohnnyExec; + } + } } /* change to the new working directory */ - if (p->pdir != NULL && chdir(p->pdir) < 0) + if (p->pdir != NULL && chdir(p->pdir) < 0) { + buildErrorCode(&errcode, ESTEP_CHDIR_FAIL, 0, errno); goto WhyCantJohnnyExec; - - // Reset any mask signals from parent, but not in VFORK mode - if (p->mode != MODE_VFORK) { - sigset_t unblock_signals; - sigemptyset(&unblock_signals); - sigprocmask(SIG_SETMASK, &unblock_signals, NULL); } + // Reset any mask signals from parent + sigset_t unblock_signals; + sigemptyset(&unblock_signals); + sigprocmask(SIG_SETMASK, &unblock_signals, NULL); + // Children should be started with default signal disposition for SIGPIPE if (signal(SIGPIPE, SIG_DFL) == SIG_ERR) { + buildErrorCode(&errcode, ESTEP_SET_SIGPIPE, 0, errno); goto WhyCantJohnnyExec; } JDK_execvpe(p->mode, p->argv[0], p->argv, p->envv); + /* Still here. Hmm. */ + buildErrorCode(&errcode, ESTEP_EXEC_FAIL, 0, errno); + WhyCantJohnnyExec: /* We used to go to an awful lot of trouble to predict whether the * child would fail, but there is no reliable way to predict the * success of an operation without *trying* it, and there's no way * to try a chdir or exec in the parent. Instead, all we need is a * way to communicate any failure back to the parent. Easy; we just - * send the errno back to the parent over a pipe in case of failure. + * send the errorcode back to the parent over a pipe in case of failure. * The tricky thing is, how do we communicate the *success* of exec? * We use FD_CLOEXEC together with the fact that a read() on a pipe * yields EOF when the write ends (we have two of them!) are closed. */ - { - int errnum = errno; - writeFully(fail_pipe_fd, &errnum, sizeof(errnum)); + if (!sendErrorCode(fail_pipe_fd, errcode)) { + printf("childproc fail: " ERRCODE_FORMAT "\n", ERRCODE_FORMAT_ARGS(errcode)); } + int exitcode = exitCodeFromErrorCode(errcode); close(fail_pipe_fd); - _exit(-1); + _exit(exitcode); return 0; /* Suppress warning "no return value from function" */ } diff --git a/src/java.base/unix/native/libjava/childproc.h b/src/java.base/unix/native/libjava/childproc.h index 974fac3bddd..0b02df7f3dd 100644 --- a/src/java.base/unix/native/libjava/childproc.h +++ b/src/java.base/unix/native/libjava/childproc.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ #define CHILDPROC_MD_H #include +#include #ifdef __APPLE__ #include @@ -72,12 +73,14 @@ extern char **environ; #define FAIL_FILENO (STDERR_FILENO + 1) +/* For POSIX_SPAWN mode */ +#define CHILDENV_FILENO (FAIL_FILENO + 1) + /* These numbers must be the same as the Enum in ProcessImpl.java * Must be a better way of doing this. */ #define MODE_FORK 1 #define MODE_POSIX_SPAWN 2 -#define MODE_VFORK 3 typedef struct _ChildStuff { @@ -107,13 +110,6 @@ typedef struct _SpawnInfo { int parentPathvBytes; /* total number of bytes in parentPathv array */ } SpawnInfo; -/* If ChildStuff.sendAlivePing is true, child shall signal aliveness to - * the parent the moment it gains consciousness, before any subsequent - * pre-exec errors could happen. - * This code must fit into an int and not be a valid errno value on any of - * our platforms. */ -#define CHILD_IS_ALIVE 65535 - /** * The cached and split version of the JDK's effective PATH. * (We don't support putenv("PATH=...") in native code) @@ -136,6 +132,17 @@ int childProcess(void *arg); * See: test/jdk/java/lang/ProcessBuilder/JspawnhelperProtocol.java */ void jtregSimulateCrash(pid_t child, int stage); +/* Helper functions to check the state of fds */ +bool fdIsValid(int fd); +bool fdIsPipe(int fd); +bool fdIsCloexec(int fd); #endif +#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) +#define HAVE_PIPE2 +#else +// Neither MacOS nor AIX support pipe2, unfortunately +#undef HAVE_PIPE2 #endif + +#endif /* CHILDPROC_MD_H */ diff --git a/src/java.base/unix/native/libjava/childproc_errorcodes.c b/src/java.base/unix/native/libjava/childproc_errorcodes.c new file mode 100644 index 00000000000..4dc8e927616 --- /dev/null +++ b/src/java.base/unix/native/libjava/childproc_errorcodes.c @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include +#include + +#include +#include "childproc.h" +#include "childproc_errorcodes.h" + +void buildErrorCode(errcode_t* errcode, int step, int hint, int errno_) { + errcode_t e; + + assert(step < (1 << 8)); + e.step = step; + + assert(errno_ < (1 << 8)); + e.errno_ = errno_; + + const int maxhint = (1 << 16); + e.hint = hint < maxhint ? hint : maxhint; + + (*errcode) = e; +} + +int exitCodeFromErrorCode(errcode_t errcode) { + /* We use the fail step number as exit code, but avoid 0 and 1 + * and try to avoid the [128..256) range since that one is used by + * shells to codify abnormal kills by signal. */ + return 0x10 + errcode.step; +} + +bool sendErrorCode(int fd, errcode_t errcode) { + return writeFully(fd, &errcode, sizeof(errcode)) == sizeof(errcode); +} + +bool sendAlivePing(int fd) { + errcode_t errcode; + buildErrorCode(&errcode, ESTEP_CHILD_ALIVE, getpid(), 0); + return sendErrorCode(fd, errcode); +} diff --git a/src/java.base/unix/native/libjava/childproc_errorcodes.h b/src/java.base/unix/native/libjava/childproc_errorcodes.h new file mode 100644 index 00000000000..8379db4ad2b --- /dev/null +++ b/src/java.base/unix/native/libjava/childproc_errorcodes.h @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef CHILDPROC_ERRORCODES_H +#define CHILDPROC_ERRORCODES_H + +#include +#include + +typedef struct errcode_t_ { + unsigned step : 8; + unsigned hint : 16; + unsigned errno_ : 8; +} errcode_t; + +/* Helper macros for printing an errcode_t */ +#define ERRCODE_FORMAT "(%u-%u-%u)" +#define ERRCODE_FORMAT_ARGS(errcode) errcode.step, errcode.hint, errcode.errno_ + + +/* Builds up an error code. + * Note: + * - hint will be capped at 2^16 + * - both step and errno_ must fit into 8 bits. */ +void buildErrorCode(errcode_t* errcode, int step, int hint, int errno_); + +/* Sends an error code down a pipe. Returns true if sent successfully. */ +bool sendErrorCode(int fd, errcode_t errcode); + +/* Build an exit code for an errcode (used as child process exit code + * in addition to the errcode being sent to parent). */ +int exitCodeFromErrorCode(errcode_t errcode); + +/* Sends alive ping down a pipe. Returns true if sent successfully. */ +bool sendAlivePing(int fd); + +#define ESTEP_UNKNOWN 0 + +/* not an error code, but an "I am alive" ping from the child. + * hint is child pid, errno is 0. */ +#define ESTEP_CHILD_ALIVE 255 + +/* JspawnHelper */ +#define ESTEP_JSPAWN_ARG_ERROR 1 +#define ESTEP_JSPAWN_VERSION_ERROR 2 + +/* Checking file descriptor setup + * hint is the (16-bit-capped) fd number */ +#define ESTEP_JSPAWN_INVALID_FD 3 +#define ESTEP_JSPAWN_NOT_A_PIPE 4 + +/* Allocation fail in jspawnhelper. + * hint is the (16-bit-capped) fail size */ +#define ESTEP_JSPAWN_ALLOC_FAILED 5 + +/* Receiving Childstuff from parent, communication error. + * hint is the substep. */ +#define ESTEP_JSPAWN_RCV_CHILDSTUFF_COMM_FAIL 6 + +/* Expand if needed ... */ + +/* childproc() */ + +/* Failed to send aliveness ping + * hint is the (16-bit-capped) fd. */ +#define ESTEP_SENDALIVE_FAIL 10 + +/* Failed to close a pipe in fork mode + * hint is the (16-bit-capped) fd. */ +#define ESTEP_PIPECLOSE_FAIL 11 + +/* Failed to dup2 a file descriptor in fork mode. + * hint is the (16-bit-capped) fd_to (!) */ +#define ESTEP_DUP2_STDIN_FAIL 13 +#define ESTEP_DUP2_STDOUT_FAIL 14 +#define ESTEP_DUP2_STDERR_REDIRECT_FAIL 15 +#define ESTEP_DUP2_STDERR_FAIL 16 +#define ESTEP_DUP2_FAILPIPE_FAIL 17 + +/* Failed to mark a file descriptor as CLOEXEC + * hint is the (16-bit-capped) fd */ +#define ESTEP_CLOEXEC_FAIL 18 + +/* Failed to chdir into the target working directory */ +#define ESTEP_CHDIR_FAIL 19 + +/* Failed to change signal disposition for SIGPIPE to default */ +#define ESTEP_SET_SIGPIPE 20 + +/* Expand if needed ... */ + +/* All modes: exec() failed */ +#define ESTEP_EXEC_FAIL 30 + +#endif /* CHILDPROC_MD_H */ diff --git a/src/java.base/windows/native/libjava/java_props_md.c b/src/java.base/windows/native/libjava/java_props_md.c index e152dbe9bef..6504891af34 100644 --- a/src/java.base/windows/native/libjava/java_props_md.c +++ b/src/java.base/windows/native/libjava/java_props_md.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -74,43 +74,32 @@ getEncodingInternal(LCID lcid) case 65001: strcpy(ret, "UTF-8"); break; - case 874: /* 9:Thai */ - case 932: /* 10:Japanese */ - case 949: /* 12:Korean Extended Wansung */ - case 950: /* 13:Chinese (Taiwan, Hongkong, Macau) */ - case 1361: /* 15:Korean Johab */ + case 874: /* Thai */ + case 932: /* Japanese */ + case 936: /* Chinese (Simplified) */ + case 949: /* Korean Extended Wansung */ + case 950: /* Chinese (Taiwan, Hongkong, Macau) */ + case 1361: /* Korean Johab */ ret[0] = 'M'; ret[1] = 'S'; - break; - case 936: - strcpy(ret, "GBK"); - break; - case 54936: - strcpy(ret, "GB18030"); - break; - default: - ret[0] = 'C'; - ret[1] = 'p'; - break; - } - //Traditional Chinese Windows should use MS950_HKSCS_XP as the - //default encoding, if HKSCS patch has been installed. - // "old" MS950 0xfa41 -> u+e001 - // "new" MS950 0xfa41 -> u+92db - if (strcmp(ret, "MS950") == 0) { - TCHAR mbChar[2] = {(char)0xfa, (char)0x41}; - WCHAR unicodeChar; - MultiByteToWideChar(CP_ACP, 0, mbChar, 2, &unicodeChar, 1); - if (unicodeChar == 0x92db) { - strcpy(ret, "MS950_HKSCS_XP"); - } - } else { - //SimpChinese Windows should use GB18030 as the default - //encoding, if gb18030 patch has been installed (on windows - //2000/XP, (1)Codepage 54936 will be available - //(2)simsun18030.ttc will exist under system fonts dir ) - if (strcmp(ret, "GBK") == 0 && IsValidCodePage(54936)) { + // Special handling for Chinese + if (codepage == 950) { + //Traditional Chinese Windows should use MS950_HKSCS_XP as the + //default encoding, if HKSCS patch has been installed. + // "old" MS950 0xfa41 -> u+e001 + // "new" MS950 0xfa41 -> u+92db + TCHAR mbChar[2] = {(char)0xfa, (char)0x41}; + WCHAR unicodeChar; + MultiByteToWideChar(CP_ACP, 0, mbChar, 2, &unicodeChar, 1); + if (unicodeChar == 0x92db) { + strcpy(ret, "MS950_HKSCS_XP"); + } + } else if (codepage == 936 && IsValidCodePage(54936)) { + //SimpChinese Windows should use GB18030 as the default + //encoding, if gb18030 patch has been installed (on windows + //2000/XP, (1)Codepage 54936 will be available + //(2)simsun18030.ttc will exist under system fonts dir ) char systemPath[MAX_PATH + 1]; char* gb18030Font = "\\FONTS\\SimSun18030.ttc"; FILE *f = NULL; @@ -123,6 +112,14 @@ getEncodingInternal(LCID lcid) } } } + break; + case 54936: + strcpy(ret, "GB18030"); + break; + default: + ret[0] = 'C'; + ret[1] = 'p'; + break; } return ret; diff --git a/src/java.compiler/share/classes/javax/annotation/processing/Processor.java b/src/java.compiler/share/classes/javax/annotation/processing/Processor.java index 4be159868fa..2f2a9b285f6 100644 --- a/src/java.compiler/share/classes/javax/annotation/processing/Processor.java +++ b/src/java.compiler/share/classes/javax/annotation/processing/Processor.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -101,28 +101,29 @@ import javax.lang.model.SourceVersion; * supports, possibly an empty set. * * For a given round, the tool computes the set of annotation - * interfaces that are present on the elements enclosed within the - * root elements. If there is at least one annotation interface - * present, then as processors claim annotation interfaces, they are - * removed from the set of unmatched annotation interfaces. When the - * set is empty or no more processors are available, the round has run - * to completion. If there are no annotation interfaces present, - * annotation processing still occurs but only universal + * interfaces that are present on the elements {@linkplain + * RoundEnvironment#getElementsAnnotatedWith(TypeElement) included} + * within the root elements. If there is at least one annotation + * interface present, then as processors claim annotation interfaces, + * they are removed from the set of unmatched annotation interfaces. + * When the set is empty or no more processors are available, the + * round has run to completion. If there are no annotation interfaces + * present, annotation processing still occurs but only universal * processors which support processing all annotation interfaces, * {@code "*"}, can claim the (empty) set of annotation interfaces. * *

An annotation interface is considered present if there is at least - * one annotation of that interface present on an element enclosed within + * one annotation of that interface present on an element included within * the root elements of a round. For this purpose, a type parameter is - * considered to be enclosed by its {@linkplain + * considered to be included by its {@linkplain * TypeParameterElement#getGenericElement generic * element}. * For this purpose, a package element is not considered to - * enclose the top-level classes and interfaces within that + * include the top-level classes and interfaces within that * package. (A root element representing a package is created when a * {@code package-info} file is processed.) Likewise, for this - * purpose, a module element is not considered to enclose the + * purpose, a module element is not considered to include the * packages within that module. (A root element representing a module * is created when a {@code module-info} file is processed.) * diff --git a/src/java.compiler/share/classes/javax/lang/model/util/Types.java b/src/java.compiler/share/classes/javax/lang/model/util/Types.java index e7212a7e0be..f632135a899 100644 --- a/src/java.compiler/share/classes/javax/lang/model/util/Types.java +++ b/src/java.compiler/share/classes/javax/lang/model/util/Types.java @@ -74,6 +74,7 @@ public interface Types { * Types without corresponding elements include: *

    *
  • {@linkplain TypeKind#isPrimitive() primitive types} + *
  • {@linkplain TypeKind#ARRAY array types} *
  • {@linkplain TypeKind#EXECUTABLE executable types} *
  • {@linkplain TypeKind#NONE "none"} pseudo-types *
  • {@linkplain TypeKind#NULL null types} diff --git a/src/java.desktop/aix/native/libawt/porting_aix.c b/src/java.desktop/aix/native/libawt/porting_aix.c deleted file mode 100644 index b506ef5a44b..00000000000 --- a/src/java.desktop/aix/native/libawt/porting_aix.c +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright (c) 2012, 2023 SAP SE. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. Oracle designates this - * particular file as subject to the "Classpath" exception as provided - * by Oracle in the LICENSE file that accompanied this code. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include -#include -#include -#include - -#include "porting_aix.h" - -static unsigned char dladdr_buffer[0x8000]; - -static void fill_dll_info(void) { - int rc = loadquery(L_GETINFO,dladdr_buffer, sizeof(dladdr_buffer)); - if (rc == -1) { - fprintf(stderr, "loadquery failed (%d %s)", errno, strerror(errno)); - fflush(stderr); - } -} - -static int dladdr_dont_reload(void* addr, Dl_info* info) { - const struct ld_info* p = (struct ld_info*) dladdr_buffer; - memset((void *)info, 0, sizeof(Dl_info)); - for (;;) { - if (addr >= p->ldinfo_textorg && - (char*)addr < (char*)(p->ldinfo_textorg) + p->ldinfo_textsize) { - info->dli_fname = p->ldinfo_filename; - info->dli_fbase = p->ldinfo_textorg; - return 1; /* [sic] */ - } - if (!p->ldinfo_next) { - break; - } - p = (struct ld_info*)(((char*)p) + p->ldinfo_next); - } - return 0; /* [sic] */ -} - -#ifdef __cplusplus -extern "C" -#endif -int dladdr(void *addr, Dl_info *info) { - static int loaded = 0; - if (!loaded) { - fill_dll_info(); - loaded = 1; - } - if (!addr) { - return 0; /* [sic] */ - } - /* Address could be AIX function descriptor? */ - void* const addr0 = *( (void**) addr ); - int rc = dladdr_dont_reload(addr, info); - if (rc == 0) { - rc = dladdr_dont_reload(addr0, info); - if (rc == 0) { /* [sic] */ - fill_dll_info(); /* refill, maybe loadquery info is outdated */ - rc = dladdr_dont_reload(addr, info); - if (rc == 0) { - rc = dladdr_dont_reload(addr0, info); - } - } - } - return rc; -} diff --git a/src/java.desktop/macosx/classes/sun/lwawt/macosx/CAccessibility.java b/src/java.desktop/macosx/classes/sun/lwawt/macosx/CAccessibility.java index 494995735e6..636d8240a55 100644 --- a/src/java.desktop/macosx/classes/sun/lwawt/macosx/CAccessibility.java +++ b/src/java.desktop/macosx/classes/sun/lwawt/macosx/CAccessibility.java @@ -116,7 +116,9 @@ final class CAccessibility implements PropertyChangeListener { if (newValue instanceof Accessible) { AccessibleContext nvAC = ((Accessible) newValue).getAccessibleContext(); AccessibleRole nvRole = nvAC.getAccessibleRole(); - if (!ignoredRoles.contains(roleKey(nvRole))) { + String roleStr = nvRole == null ? null : + AWTAccessor.getAccessibleBundleAccessor().getKey(nvRole); + if (!ignoredRoles.contains(roleStr)) { focusChanged(); } } @@ -1034,8 +1036,10 @@ final class CAccessibility implements PropertyChangeListener { // "ignored", and we should skip it and its descendants if (isShowing(context)) { final AccessibleRole role = context.getAccessibleRole(); - if (role != null && ignoredRoles != null && - ignoredRoles.contains(roleKey(role))) { + String roleStr = role == null ? null : + AWTAccessor.getAccessibleBundleAccessor().getKey(role); + if (roleStr != null && ignoredRoles != null && + ignoredRoles.contains(roleStr)) { // Get the child's unignored children. _addChildren(child, whichChildren, false, childrenAndRoles, ChildrenOperations.COMMON); @@ -1096,8 +1100,6 @@ final class CAccessibility implements PropertyChangeListener { return isShowing(parentContext); } - private static native String roleKey(AccessibleRole aRole); - public static Object[] getChildren(final Accessible a, final Component c) { if (a == null) return null; return invokeAndWait(new Callable() { diff --git a/src/java.desktop/macosx/classes/sun/lwawt/macosx/CAccessible.java b/src/java.desktop/macosx/classes/sun/lwawt/macosx/CAccessible.java index 5be7f70b981..4315abe6197 100644 --- a/src/java.desktop/macosx/classes/sun/lwawt/macosx/CAccessible.java +++ b/src/java.desktop/macosx/classes/sun/lwawt/macosx/CAccessible.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -188,10 +188,6 @@ final class CAccessible extends CFRetainedResource implements Accessible { // Do send check box state changes to native side if (thisRole == AccessibleRole.CHECK_BOX) { - if (!Objects.equals(newValue, oldValue)) { - valueChanged(ptr); - } - // Notify native side to handle check box style menuitem if (parentRole == AccessibleRole.POPUP_MENU && newValue != null && ((AccessibleState)newValue) == AccessibleState.FOCUSED) { @@ -201,23 +197,12 @@ final class CAccessible extends CFRetainedResource implements Accessible { // Do send radio button state changes to native side if (thisRole == AccessibleRole.RADIO_BUTTON) { - if (newValue != null && !newValue.equals(oldValue)) { - valueChanged(ptr); - } - // Notify native side to handle radio button style menuitem if (parentRole == AccessibleRole.POPUP_MENU && newValue != null && ((AccessibleState)newValue) == AccessibleState.FOCUSED) { menuItemSelected(ptr); } } - - // Do send toggle button state changes to native side - if (thisRole == AccessibleRole.TOGGLE_BUTTON) { - if (!Objects.equals(newValue, oldValue)) { - valueChanged(ptr); - } - } } else if (name.equals(ACCESSIBLE_NAME_PROPERTY)) { //for now trigger only for JTabbedPane. if (e.getSource() instanceof JTabbedPane) { @@ -227,7 +212,10 @@ final class CAccessible extends CFRetainedResource implements Accessible { AccessibleRole thisRole = accessible.getAccessibleContext() .getAccessibleRole(); if (thisRole == AccessibleRole.SLIDER || - thisRole == AccessibleRole.PROGRESS_BAR) { + thisRole == AccessibleRole.PROGRESS_BAR || + thisRole == AccessibleRole.CHECK_BOX || + thisRole == AccessibleRole.RADIO_BUTTON || + thisRole == AccessibleRole.TOGGLE_BUTTON ) { valueChanged(ptr); } } diff --git a/src/java.desktop/macosx/classes/sun/lwawt/macosx/CTextPipe.java b/src/java.desktop/macosx/classes/sun/lwawt/macosx/CTextPipe.java index cf4a6e72136..ba5bb769ad5 100644 --- a/src/java.desktop/macosx/classes/sun/lwawt/macosx/CTextPipe.java +++ b/src/java.desktop/macosx/classes/sun/lwawt/macosx/CTextPipe.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,6 @@ package sun.lwawt.macosx; - import java.awt.*; import java.awt.font.*; @@ -73,12 +72,17 @@ public class CTextPipe implements TextPipe { @Override public void drawString(final SunGraphics2D sg2d, final String s, final double x, final double y) { + + FontInfo info = sg2d.getFontInfo(); + double dx = x + info.originX; + double dy = y + info.originY; + final long nativeStrikePtr = getNativeStrikePtr(sg2d); if (OSXSurfaceData.IsSimpleColor(sg2d.paint) && nativeStrikePtr != 0) { final OSXSurfaceData surfaceData = (OSXSurfaceData)sg2d.getSurfaceData(); - surfaceData.drawString(this, sg2d, nativeStrikePtr, s, x, y); + surfaceData.drawString(this, sg2d, nativeStrikePtr, s, dx, dy); } else { - drawTextAsShape(sg2d, s, x, y); + drawTextAsShape(sg2d, s, dx, dy); } } @@ -153,6 +157,15 @@ public class CTextPipe implements TextPipe { final Font prevFont = sg2d.getFont(); sg2d.setFont(gV.getFont()); + int flags = gV.getLayoutFlags(); + boolean positionAdjustments = (flags & GlyphVector.FLAG_HAS_POSITION_ADJUSTMENTS) != 0; + if (positionAdjustments) { + // make sure GV positions are initialized, so they are available later in native code; this + // will already be the case if the user explicitly set the glyph positions, but not if the + // position adjustment flag was set because of a font translation transform or font tracking + gV.getGlyphPosition(0); + } + if (hasSlotData(gV)) { final int length = gV.getNumGlyphs(); float[] positions = gV.getGlyphPositions(0, length, null); @@ -177,12 +190,17 @@ public class CTextPipe implements TextPipe { @Override public void drawChars(final SunGraphics2D sg2d, final char[] data, final int offset, final int length, final int x, final int y) { + + FontInfo info = sg2d.getFontInfo(); + double dx = x + info.originX; + double dy = y + info.originY; + final long nativeStrikePtr = getNativeStrikePtr(sg2d); if (OSXSurfaceData.IsSimpleColor(sg2d.paint) && nativeStrikePtr != 0) { final OSXSurfaceData surfaceData = (OSXSurfaceData)sg2d.getSurfaceData(); - surfaceData.drawUnicodes(this, sg2d, nativeStrikePtr, data, offset, length, x, y); + surfaceData.drawUnicodes(this, sg2d, nativeStrikePtr, data, offset, length, (float) dx, (float) dy); } else { - drawTextAsShape(sg2d, new String(data, offset, length), x, y); + drawTextAsShape(sg2d, new String(data, offset, length), dx, dy); } } @@ -191,7 +209,8 @@ public class CTextPipe implements TextPipe { } public static final class Tracer extends CTextPipe { - void doDrawString(final SurfaceData sData, final long nativeStrikePtr, final String s, final float x, final float y) { + @Override + public void doDrawString(final SurfaceData sData, final long nativeStrikePtr, final String s, final double x, final double y) { GraphicsPrimitive.tracePrimitive("QuartzDrawString"); super.doDrawString(sData, nativeStrikePtr, s, x, y); } diff --git a/src/java.desktop/macosx/native/libawt_lwawt/awt/CDesktopPeer.m b/src/java.desktop/macosx/native/libawt_lwawt/awt/CDesktopPeer.m index 460749c363d..faacef5adea 100644 --- a/src/java.desktop/macosx/native/libawt_lwawt/awt/CDesktopPeer.m +++ b/src/java.desktop/macosx/native/libawt_lwawt/awt/CDesktopPeer.m @@ -70,6 +70,7 @@ JNI_COCOA_ENTER(env); dispatch_time_t timeout = dispatch_time(DISPATCH_TIME_NOW, (int64_t)(NSEC_PER_SEC)); // 1 second timeout // Asynchronous call to openURL + dispatch_retain(semaphore); [[NSWorkspace sharedWorkspace] openURLs:urls withApplicationAtURL:appURI configuration:configuration @@ -78,9 +79,11 @@ JNI_COCOA_ENTER(env); status = (OSStatus) error.code; } dispatch_semaphore_signal(semaphore); + dispatch_release(semaphore); }]; dispatch_semaphore_wait(semaphore, timeout); + dispatch_release(semaphore); JNI_COCOA_EXIT(env); return status; @@ -146,6 +149,7 @@ JNI_COCOA_ENTER(env); dispatch_time_t timeout = dispatch_time(DISPATCH_TIME_NOW, (int64_t)(NSEC_PER_SEC)); // 1 second timeout // Asynchronous call - openURLs:withApplicationAtURL + dispatch_retain(semaphore); [[NSWorkspace sharedWorkspace] openURLs:urls withApplicationAtURL:appURI configuration:configuration @@ -154,9 +158,11 @@ JNI_COCOA_ENTER(env); status = (OSStatus) error.code; } dispatch_semaphore_signal(semaphore); + dispatch_release(semaphore); }]; dispatch_semaphore_wait(semaphore, timeout); + dispatch_release(semaphore); [urlToOpen release]; JNI_COCOA_EXIT(env); diff --git a/src/java.desktop/macosx/native/libawt_lwawt/awt/JavaAccessibilityUtilities.m b/src/java.desktop/macosx/native/libawt_lwawt/awt/JavaAccessibilityUtilities.m index bf66df162d1..fcd330940cc 100644 --- a/src/java.desktop/macosx/native/libawt_lwawt/awt/JavaAccessibilityUtilities.m +++ b/src/java.desktop/macosx/native/libawt_lwawt/awt/JavaAccessibilityUtilities.m @@ -325,20 +325,6 @@ static BOOL JavaAccessibilityIsSupportedAttribute(id element, NSString *attribut return [[element accessibilityAttributeNames] indexOfObject:attribute] != NSNotFound; } -/* - * Class: sun_lwawt_macosx_CAccessibility - * Method: roleKey - * Signature: (Ljavax/accessibility/AccessibleRole;)Ljava/lang/String; - */ -JNIEXPORT jstring JNICALL Java_sun_lwawt_macosx_CAccessibility_roleKey -(JNIEnv *env, jclass clz, jobject axRole) -{ - DECLARE_CLASS_RETURN(sjc_AccessibleRole, "javax/accessibility/AccessibleRole", NULL); - DECLARE_FIELD_RETURN(sjf_key, sjc_AccessibleRole, "key", "Ljava/lang/String;", NULL); - return (*env)->GetObjectField(env, axRole, sjf_key); -} - - // errors from NSAccessibilityErrors void JavaAccessibilityRaiseSetAttributeToIllegalTypeException(const char *functionName, id element, NSString *attribute, id value) { diff --git a/src/java.desktop/macosx/native/libawt_lwawt/awt/a11y/CheckboxAccessibility.m b/src/java.desktop/macosx/native/libawt_lwawt/awt/a11y/CheckboxAccessibility.m index f2dbf60d92d..a5faf255440 100644 --- a/src/java.desktop/macosx/native/libawt_lwawt/awt/a11y/CheckboxAccessibility.m +++ b/src/java.desktop/macosx/native/libawt_lwawt/awt/a11y/CheckboxAccessibility.m @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,6 +36,18 @@ return NSAccessibilityCheckBoxRole; } +- (NSAccessibilitySubrole _Nullable)accessibilitySubrole +{ + JNIEnv *env = [ThreadUtilities getJNIEnv]; + if (env != NULL) { + NSString *javaRole = [self javaRole]; + if ([javaRole isEqualToString:@"togglebutton"]) { + return NSAccessibilityToggleSubrole; + } + } + return [super accessibilitySubrole]; +} + - (id _Nonnull) accessibilityValue { AWT_ASSERT_APPKIT_THREAD; diff --git a/src/java.desktop/macosx/native/libawt_lwawt/awt/a11y/CommonComponentAccessibility.m b/src/java.desktop/macosx/native/libawt_lwawt/awt/a11y/CommonComponentAccessibility.m index 0f0a395c597..45e8f981f50 100644 --- a/src/java.desktop/macosx/native/libawt_lwawt/awt/a11y/CommonComponentAccessibility.m +++ b/src/java.desktop/macosx/native/libawt_lwawt/awt/a11y/CommonComponentAccessibility.m @@ -136,6 +136,7 @@ static jobject sAccessibilityClass = NULL; [rolesMap setObject:@"StaticTextAccessibility" forKey:@"label"]; [rolesMap setObject:@"RadiobuttonAccessibility" forKey:@"radiobutton"]; [rolesMap setObject:@"CheckboxAccessibility" forKey:@"checkbox"]; + [rolesMap setObject:@"CheckboxAccessibility" forKey:@"togglebutton"]; [rolesMap setObject:@"SliderAccessibility" forKey:@"slider"]; [rolesMap setObject:@"ScrollAreaAccessibility" forKey:@"scrollpane"]; [rolesMap setObject:@"ScrollBarAccessibility" forKey:@"scrollbar"]; diff --git a/src/java.desktop/share/classes/com/sun/java/swing/plaf/motif/MotifLookAndFeel.java b/src/java.desktop/share/classes/com/sun/java/swing/plaf/motif/MotifLookAndFeel.java index d254443b8d1..5263d248f45 100644 --- a/src/java.desktop/share/classes/com/sun/java/swing/plaf/motif/MotifLookAndFeel.java +++ b/src/java.desktop/share/classes/com/sun/java/swing/plaf/motif/MotifLookAndFeel.java @@ -537,6 +537,10 @@ public class MotifLookAndFeel extends BasicLookAndFeel @SuppressWarnings("deprecation") final int metaMask = KeyEvent.META_MASK; + Object commonInputMap = new UIDefaults.LazyInputMap(new Object[] { + "SPACE", "pressed", + "released SPACE", "released" + }); Object[] defaults = { "Desktop.background", table.get("desktop"), @@ -593,20 +597,13 @@ public class MotifLookAndFeel extends BasicLookAndFeel "Button.foreground", table.get("controlText"), "Button.select", table.get("controlLightShadow"), "Button.font", dialogPlain12, - "Button.focusInputMap", new UIDefaults.LazyInputMap(new Object[] { - "SPACE", "pressed", - "released SPACE", "released" - }), + "Button.focusInputMap", commonInputMap, "CheckBox.textIconGap", 8, "CheckBox.margin", new InsetsUIResource(4, 2, 4, 2), "CheckBox.icon", checkBoxIcon, "CheckBox.focus", table.get("activeCaptionBorder"), - "CheckBox.focusInputMap", - new UIDefaults.LazyInputMap(new Object[] { - "SPACE", "pressed", - "released SPACE", "released" - }), + "CheckBox.focusInputMap", commonInputMap, "RadioButton.margin", new InsetsUIResource(4, 2, 4, 2), "RadioButton.textIconGap", 8, @@ -615,22 +612,14 @@ public class MotifLookAndFeel extends BasicLookAndFeel "RadioButton.icon", radioButtonIcon, "RadioButton.focus", table.get("activeCaptionBorder"), "RadioButton.icon", radioButtonIcon, - "RadioButton.focusInputMap", - new UIDefaults.LazyInputMap(new Object[] { - "SPACE", "pressed", - "released SPACE", "released" - }), + "RadioButton.focusInputMap", commonInputMap, "ToggleButton.border", toggleButtonBorder, "ToggleButton.background", table.get("control"), "ToggleButton.foreground", table.get("controlText"), "ToggleButton.focus", table.get("controlText"), "ToggleButton.select", table.get("controlLightShadow"), - "ToggleButton.focusInputMap", - new UIDefaults.LazyInputMap(new Object[] { - "SPACE", "pressed", - "released SPACE", "released" - }), + "ToggleButton.focusInputMap", commonInputMap, // Menus "Menu.border", menuMarginBorder, diff --git a/src/java.desktop/share/classes/java/awt/AWTEvent.java b/src/java.desktop/share/classes/java/awt/AWTEvent.java index d48fae68cbe..f365393ffb1 100644 --- a/src/java.desktop/share/classes/java/awt/AWTEvent.java +++ b/src/java.desktop/share/classes/java/awt/AWTEvent.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -554,22 +554,9 @@ public abstract class AWTEvent extends EventObject { */ void copyPrivateDataInto(AWTEvent that) { that.bdata = this.bdata; - // Copy canAccessSystemClipboard value from this into that. - if (this instanceof InputEvent && that instanceof InputEvent) { - - AWTAccessor.InputEventAccessor accessor - = AWTAccessor.getInputEventAccessor(); - - boolean b = accessor.canAccessSystemClipboard((InputEvent) this); - accessor.setCanAccessSystemClipboard((InputEvent) that, b); - } that.isSystemGenerated = this.isSystemGenerated; } void dispatched() { - if (this instanceof InputEvent) { - AWTAccessor.getInputEventAccessor(). - setCanAccessSystemClipboard((InputEvent) this, false); - } } } // class AWTEvent diff --git a/src/java.desktop/share/classes/java/awt/Desktop.java b/src/java.desktop/share/classes/java/awt/Desktop.java index 3f73fa6cd81..43bd1f9c11c 100644 --- a/src/java.desktop/share/classes/java/awt/Desktop.java +++ b/src/java.desktop/share/classes/java/awt/Desktop.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -272,6 +272,8 @@ public class Desktop { } } + private static Desktop desktop; + /** * Returns the {@code Desktop} instance of the current * desktop context. On some platforms the Desktop API may not be @@ -292,12 +294,8 @@ public class Desktop { "supported on the current platform"); } - sun.awt.AppContext context = sun.awt.AppContext.getAppContext(); - Desktop desktop = (Desktop)context.get(Desktop.class); - if (desktop == null) { desktop = new Desktop(); - context.put(Desktop.class, desktop); } return desktop; diff --git a/src/java.desktop/share/classes/java/awt/Dialog.java b/src/java.desktop/share/classes/java/awt/Dialog.java index 83aa89b9bf7..038aa5b65e3 100644 --- a/src/java.desktop/share/classes/java/awt/Dialog.java +++ b/src/java.desktop/share/classes/java/awt/Dialog.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1995, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1995, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,7 +41,6 @@ import javax.accessibility.AccessibleRole; import javax.accessibility.AccessibleState; import javax.accessibility.AccessibleStateSet; -import sun.awt.AppContext; import sun.awt.SunToolkit; import sun.awt.util.IdentityArrayList; @@ -1013,30 +1012,12 @@ public class Dialog extends Window { if (!isModal()) { conditionalShow(null, null); } else { - AppContext showAppContext = AppContext.getAppContext(); - AtomicLong time = new AtomicLong(); Component predictedFocusOwner = null; try { predictedFocusOwner = getMostRecentFocusOwner(); if (conditionalShow(predictedFocusOwner, time)) { modalFilter = ModalEventFilter.createFilterForDialog(this); - // if this dialog is toolkit-modal, the filter should be added - // to all EDTs (for all AppContexts) - if (modalityType == ModalityType.TOOLKIT_MODAL) { - for (AppContext appContext : AppContext.getAppContexts()) { - if (appContext == showAppContext) { - continue; - } - EventQueue eventQueue = (EventQueue)appContext.get(AppContext.EVENT_QUEUE_KEY); - // it may occur that EDT for appContext hasn't been started yet, so - // we post an empty invocation event to trigger EDT initialization - eventQueue.postEvent(new InvocationEvent(this, () -> {})); - EventDispatchThread edt = eventQueue.getDispatchThread(); - edt.addEventFilter(modalFilter); - } - } - modalityPushed(); try { EventQueue eventQueue = Toolkit.getDefaultToolkit().getSystemEventQueue(); @@ -1047,19 +1028,6 @@ public class Dialog extends Window { } finally { modalityPopped(); } - - // if this dialog is toolkit-modal, its filter must be removed - // from all EDTs (for all AppContexts) - if (modalityType == ModalityType.TOOLKIT_MODAL) { - for (AppContext appContext : AppContext.getAppContexts()) { - if (appContext == showAppContext) { - continue; - } - EventQueue eventQueue = (EventQueue)appContext.get(AppContext.EVENT_QUEUE_KEY); - EventDispatchThread edt = eventQueue.getDispatchThread(); - edt.removeEventFilter(modalFilter); - } - } } } finally { if (predictedFocusOwner != null) { @@ -1482,8 +1450,7 @@ public class Dialog extends Window { return getDocumentRoot() == w.getDocumentRoot(); } case APPLICATION_MODAL: - return !w.isModalExcluded(ModalExclusionType.APPLICATION_EXCLUDE) && - (appContext == w.appContext); + return !w.isModalExcluded(ModalExclusionType.APPLICATION_EXCLUDE); case TOOLKIT_MODAL: return !w.isModalExcluded(ModalExclusionType.TOOLKIT_EXCLUDE); } diff --git a/src/java.desktop/share/classes/java/awt/EventDispatchThread.java b/src/java.desktop/share/classes/java/awt/EventDispatchThread.java index b817ca12ece..1a991741fab 100644 --- a/src/java.desktop/share/classes/java/awt/EventDispatchThread.java +++ b/src/java.desktop/share/classes/java/awt/EventDispatchThread.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -203,8 +203,8 @@ class EventDispatchThread extends Thread { eq.dispatchEvent(event); } catch (InterruptedException interruptedException) { - doDispatch = false; // AppContext.dispose() interrupts all - // Threads in the AppContext + // keep this catch case for compatibility + doDispatch = false; } catch (Throwable e) { processException(e); diff --git a/src/java.desktop/share/classes/java/awt/KeyboardFocusManager.java b/src/java.desktop/share/classes/java/awt/KeyboardFocusManager.java index 06932d33f8a..9b55e754a64 100644 --- a/src/java.desktop/share/classes/java/awt/KeyboardFocusManager.java +++ b/src/java.desktop/share/classes/java/awt/KeyboardFocusManager.java @@ -2264,15 +2264,14 @@ public abstract class KeyboardFocusManager temporary, descendant, cause); // Fix 5028014. Rolled out. // SunToolkit.postPriorityEvent(currentFocusOwnerEvent); - SunToolkit.postEvent(currentFocusOwner.appContext, - currentFocusOwnerEvent); + SunToolkit.postEvent(currentFocusOwnerEvent); } FocusEvent newFocusOwnerEvent = new FocusEvent(descendant, FocusEvent.FOCUS_GAINED, temporary, currentFocusOwner, cause); // Fix 5028014. Rolled out. // SunToolkit.postPriorityEvent(newFocusOwnerEvent); - SunToolkit.postEvent(descendant.appContext, newFocusOwnerEvent); + SunToolkit.postEvent(newFocusOwnerEvent); if (focusLog.isLoggable(PlatformLogger.Level.FINEST)) focusLog.finest("2. SNFH_HANDLED for {0}", String.valueOf(descendant)); diff --git a/src/java.desktop/share/classes/java/awt/MouseInfo.java b/src/java.desktop/share/classes/java/awt/MouseInfo.java index 6b913adf06e..7a30243b06c 100644 --- a/src/java.desktop/share/classes/java/awt/MouseInfo.java +++ b/src/java.desktop/share/classes/java/awt/MouseInfo.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -58,6 +58,7 @@ public class MouseInfo { * * @throws HeadlessException if GraphicsEnvironment.isHeadless() returns true * @return location of the mouse pointer + * @see GraphicsConfiguration * @since 1.5 */ public static PointerInfo getPointerInfo() throws HeadlessException { diff --git a/src/java.desktop/share/classes/java/awt/SentEvent.java b/src/java.desktop/share/classes/java/awt/SentEvent.java index 632b4ee85a8..eb85fa1453d 100644 --- a/src/java.desktop/share/classes/java/awt/SentEvent.java +++ b/src/java.desktop/share/classes/java/awt/SentEvent.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,7 +27,6 @@ package java.awt; import java.io.Serial; -import sun.awt.AppContext; import sun.awt.SunToolkit; /** @@ -51,22 +50,16 @@ class SentEvent extends AWTEvent implements ActiveEvent { boolean dispatched; private AWTEvent nested; - @SuppressWarnings("serial") // Not statically typed as Serializable - private AppContext toNotify; SentEvent() { this(null); } SentEvent(AWTEvent nested) { - this(nested, null); - } - SentEvent(AWTEvent nested, AppContext toNotify) { super((nested != null) ? nested.getSource() : Toolkit.getDefaultToolkit(), ID); this.nested = nested; - this.toNotify = toNotify; } public void dispatch() { @@ -76,9 +69,6 @@ class SentEvent extends AWTEvent implements ActiveEvent { } } finally { dispatched = true; - if (toNotify != null) { - SunToolkit.postEvent(toNotify, new SentEvent()); - } synchronized (this) { notifyAll(); } @@ -86,9 +76,6 @@ class SentEvent extends AWTEvent implements ActiveEvent { } final void dispose() { dispatched = true; - if (toNotify != null) { - SunToolkit.postEvent(toNotify, new SentEvent()); - } synchronized (this) { notifyAll(); } diff --git a/src/java.desktop/share/classes/java/awt/SequencedEvent.java b/src/java.desktop/share/classes/java/awt/SequencedEvent.java index 13ec5317822..25fe72e1787 100644 --- a/src/java.desktop/share/classes/java/awt/SequencedEvent.java +++ b/src/java.desktop/share/classes/java/awt/SequencedEvent.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,12 +29,11 @@ import java.io.Serial; import java.util.LinkedList; import sun.awt.AWTAccessor; -import sun.awt.AppContext; import sun.awt.SunToolkit; /** * A mechanism for ensuring that a series of AWTEvents are executed in a - * precise order, even across multiple AppContexts. The nested events will be + * precise order. The nested events will be * dispatched in the order in which their wrapping SequencedEvents were * constructed. The only exception to this rule is if the peer of the target of * the nested event was destroyed (with a call to Component.removeNotify) @@ -57,7 +56,6 @@ class SequencedEvent extends AWTEvent implements ActiveEvent { private final AWTEvent nested; @SuppressWarnings("serial") // Not statically typed as Serializable - private AppContext appContext; private boolean disposed; private final LinkedList pendingEvents = new LinkedList<>(); @@ -145,7 +143,6 @@ class SequencedEvent extends AWTEvent implements ActiveEvent { * dispatched or disposed. If this method is invoked before all previous nested events * have been dispatched, then this method blocks until such a point is * reached. - * While waiting disposes nested events to disposed AppContext * * NOTE: Locking protocol. Since dispose() can get EventQueue lock, * dispatch() shall never call dispose() while holding the lock on the list, @@ -154,8 +151,6 @@ class SequencedEvent extends AWTEvent implements ActiveEvent { */ public final void dispatch() { try { - appContext = AppContext.getAppContext(); - if (getFirst() != this) { if (EventQueue.isDispatchThread()) { if (Thread.currentThread() instanceof EventDispatchThread) { @@ -201,19 +196,6 @@ class SequencedEvent extends AWTEvent implements ActiveEvent { } } - /** - * true only if event exists and nested source appContext is disposed. - */ - private static final boolean isOwnerAppContextDisposed(SequencedEvent se) { - if (se != null) { - Object target = se.nested.getSource(); - if (target instanceof Component) { - return ((Component)target).appContext.isDisposed(); - } - } - return false; - } - /** * Sequenced events are dispatched in order, so we cannot dispatch * until we are the first sequenced event in the queue (i.e. it's our @@ -224,26 +206,13 @@ class SequencedEvent extends AWTEvent implements ActiveEvent { if (disposed) { return true; } - // getFirstWithContext can dispose this - return this == getFirstWithContext() || disposed; + return this == getFirst(); } private static final synchronized SequencedEvent getFirst() { return list.getFirst(); } - /* Disposes all events from disposed AppContext - * return first valid event - */ - private static final SequencedEvent getFirstWithContext() { - SequencedEvent first = getFirst(); - while(isOwnerAppContextDisposed(first)) { - first.dispose(); - first = getFirst(); - } - return first; - } - /** * Disposes of this instance. This method is invoked once the nested event * has been dispatched and handled, or when the peer of the target of the @@ -283,12 +252,12 @@ class SequencedEvent extends AWTEvent implements ActiveEvent { } } // Wake up waiting threads - if (next != null && next.appContext != null) { - SunToolkit.postEvent(next.appContext, new SentEvent()); + if (next != null) { + SunToolkit.postEvent(new SentEvent()); } for(AWTEvent e : pendingEvents) { - SunToolkit.postEvent(appContext, e); + SunToolkit.postEvent(e); } } } diff --git a/src/java.desktop/share/classes/java/awt/Toolkit.java b/src/java.desktop/share/classes/java/awt/Toolkit.java index 1ca5cdd8112..4fc9dc8e82d 100644 --- a/src/java.desktop/share/classes/java/awt/Toolkit.java +++ b/src/java.desktop/share/classes/java/awt/Toolkit.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1995, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1995, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -72,7 +72,6 @@ import java.util.stream.Collectors; import javax.accessibility.AccessibilityProvider; import sun.awt.AWTAccessor; -import sun.awt.AppContext; import sun.awt.HeadlessToolkit; import sun.awt.PeerEvent; import sun.awt.PlatformGraphicsInfo; @@ -2046,8 +2045,6 @@ public abstract class Toolkit { @SuppressWarnings("serial") private static class DesktopPropertyChangeSupport extends PropertyChangeSupport { - private static final StringBuilder PROP_CHANGE_SUPPORT_KEY = - new StringBuilder("desktop property change support key"); private final Object source; public DesktopPropertyChangeSupport(Object sourceBean) { @@ -2055,16 +2052,14 @@ public abstract class Toolkit { source = sourceBean; } + private static PropertyChangeSupport pcs; @Override public synchronized void addPropertyChangeListener( String propertyName, PropertyChangeListener listener) { - PropertyChangeSupport pcs = (PropertyChangeSupport) - AppContext.getAppContext().get(PROP_CHANGE_SUPPORT_KEY); if (null == pcs) { pcs = new PropertyChangeSupport(source); - AppContext.getAppContext().put(PROP_CHANGE_SUPPORT_KEY, pcs); } pcs.addPropertyChangeListener(propertyName, listener); } @@ -2074,8 +2069,6 @@ public abstract class Toolkit { String propertyName, PropertyChangeListener listener) { - PropertyChangeSupport pcs = (PropertyChangeSupport) - AppContext.getAppContext().get(PROP_CHANGE_SUPPORT_KEY); if (null != pcs) { pcs.removePropertyChangeListener(propertyName, listener); } @@ -2084,8 +2077,6 @@ public abstract class Toolkit { @Override public synchronized PropertyChangeListener[] getPropertyChangeListeners() { - PropertyChangeSupport pcs = (PropertyChangeSupport) - AppContext.getAppContext().get(PROP_CHANGE_SUPPORT_KEY); if (null != pcs) { return pcs.getPropertyChangeListeners(); } else { @@ -2096,8 +2087,6 @@ public abstract class Toolkit { @Override public synchronized PropertyChangeListener[] getPropertyChangeListeners(String propertyName) { - PropertyChangeSupport pcs = (PropertyChangeSupport) - AppContext.getAppContext().get(PROP_CHANGE_SUPPORT_KEY); if (null != pcs) { return pcs.getPropertyChangeListeners(propertyName); } else { @@ -2107,19 +2096,14 @@ public abstract class Toolkit { @Override public synchronized void addPropertyChangeListener(PropertyChangeListener listener) { - PropertyChangeSupport pcs = (PropertyChangeSupport) - AppContext.getAppContext().get(PROP_CHANGE_SUPPORT_KEY); if (null == pcs) { pcs = new PropertyChangeSupport(source); - AppContext.getAppContext().put(PROP_CHANGE_SUPPORT_KEY, pcs); } pcs.addPropertyChangeListener(listener); } @Override public synchronized void removePropertyChangeListener(PropertyChangeListener listener) { - PropertyChangeSupport pcs = (PropertyChangeSupport) - AppContext.getAppContext().get(PROP_CHANGE_SUPPORT_KEY); if (null != pcs) { pcs.removePropertyChangeListener(listener); } @@ -2131,33 +2115,16 @@ public abstract class Toolkit { */ @Override public void firePropertyChange(final PropertyChangeEvent evt) { + if (pcs == null) { + return; + } Object oldValue = evt.getOldValue(); Object newValue = evt.getNewValue(); String propertyName = evt.getPropertyName(); if (oldValue != null && newValue != null && oldValue.equals(newValue)) { return; } - Runnable updater = new Runnable() { - public void run() { - PropertyChangeSupport pcs = (PropertyChangeSupport) - AppContext.getAppContext().get(PROP_CHANGE_SUPPORT_KEY); - if (null != pcs) { - pcs.firePropertyChange(evt); - } - } - }; - final AppContext currentAppContext = AppContext.getAppContext(); - for (AppContext appContext : AppContext.getAppContexts()) { - if (null == appContext || appContext.isDisposed()) { - continue; - } - if (currentAppContext == appContext) { - updater.run(); - } else { - final PeerEvent e = new PeerEvent(source, updater, PeerEvent.ULTIMATE_PRIORITY_EVENT); - SunToolkit.postEvent(appContext, e); - } - } + pcs.firePropertyChange(evt); } } diff --git a/src/java.desktop/share/classes/java/awt/WaitDispatchSupport.java b/src/java.desktop/share/classes/java/awt/WaitDispatchSupport.java index 71e8b3086a1..3e567f538b1 100644 --- a/src/java.desktop/share/classes/java/awt/WaitDispatchSupport.java +++ b/src/java.desktop/share/classes/java/awt/WaitDispatchSupport.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -211,8 +211,6 @@ class WaitDispatchSupport implements SecondaryLoop { } }, interval); } - // Dispose SequencedEvent we are dispatching on the current - // AppContext, to prevent us from hang - see 4531693 for details SequencedEvent currentSE = KeyboardFocusManager. getCurrentKeyboardFocusManager().getCurrentSequencedEvent(); if (currentSE != null) { diff --git a/src/java.desktop/share/classes/java/awt/Window.java b/src/java.desktop/share/classes/java/awt/Window.java index b41409a138e..23aefd8860d 100644 --- a/src/java.desktop/share/classes/java/awt/Window.java +++ b/src/java.desktop/share/classes/java/awt/Window.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1995, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1995, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -64,7 +64,6 @@ import javax.accessibility.AccessibleState; import javax.accessibility.AccessibleStateSet; import sun.awt.AWTAccessor; -import sun.awt.AppContext; import sun.awt.DebugSettings; import sun.awt.SunToolkit; import sun.awt.util.IdentityArrayList; @@ -259,7 +258,7 @@ public class Window extends Container implements Accessible { /** * Contains all the windows that have a peer object associated, * i. e. between addNotify() and removeNotify() calls. The list - * of all Window instances can be obtained from AppContext object. + * of all Window instances can be obtained from {@link #getWindows()} * * @since 1.6 */ @@ -275,7 +274,7 @@ public class Window extends Container implements Accessible { new Vector>(); /* - * We insert a weak reference into the Vector of all Windows in AppContext + * We insert a weak reference into the Vector of all Windows * instead of 'this' so that garbage collection can still take place * correctly. */ @@ -427,11 +426,9 @@ public class Window extends Container implements Accessible { static class WindowDisposerRecord implements sun.java2d.DisposerRecord { WeakReference owner; final WeakReference weakThis; - final WeakReference context; - WindowDisposerRecord(AppContext context, Window victim) { + WindowDisposerRecord(Window victim) { weakThis = victim.weakThis; - this.context = new WeakReference(context); } public void updateOwner() { @@ -448,10 +445,7 @@ public class Window extends Container implements Accessible { parent.removeOwnedWindow(weakThis); } } - AppContext ac = context.get(); - if (null != ac) { - Window.removeFromWindowList(ac, weakThis); - } + Window.removeFromWindowList(weakThis); } } @@ -499,7 +493,7 @@ public class Window extends Container implements Accessible { } modalExclusionType = Dialog.ModalExclusionType.NO_EXCLUDE; - disposerRecord = new WindowDisposerRecord(appContext, this); + disposerRecord = new WindowDisposerRecord(this); sun.java2d.Disposer.addRecord(anchor, disposerRecord); SunToolkit.checkAndSetPolicy(this); @@ -1489,34 +1483,6 @@ public class Window extends Container implements Accessible { } } - private static Window[] getWindows(AppContext appContext) { - synchronized (Window.class) { - Window[] realCopy; - @SuppressWarnings("unchecked") - Vector> windowList = - (Vector>)appContext.get(Window.class); - if (windowList != null) { - int fullSize = windowList.size(); - int realSize = 0; - Window[] fullCopy = new Window[fullSize]; - for (int i = 0; i < fullSize; i++) { - Window w = windowList.get(i).get(); - if (w != null) { - fullCopy[realSize++] = w; - } - } - if (fullSize != realSize) { - realCopy = Arrays.copyOf(fullCopy, realSize); - } else { - realCopy = fullCopy; - } - } else { - realCopy = new Window[0]; - } - return realCopy; - } - } - /** * Returns an array of all {@code Window}s, both owned and ownerless, * created by this application. @@ -1534,7 +1500,24 @@ public class Window extends Container implements Accessible { * @since 1.6 */ public static Window[] getWindows() { - return getWindows(AppContext.getAppContext()); + synchronized (Window.class) { + Window[] realCopy; + int fullSize = windowList.size(); + int realSize = 0; + Window[] fullCopy = new Window[fullSize]; + for (int i = 0; i < fullSize; i++) { + Window w = windowList.get(i).get(); + if (w != null) { + fullCopy[realSize++] = w; + } + } + if (fullSize != realSize) { + realCopy = Arrays.copyOf(fullCopy, realSize); + } else { + realCopy = fullCopy; + } + return realCopy; + } } /** @@ -2746,30 +2729,22 @@ public class Window extends Container implements Accessible { child.disposerRecord.updateOwner(); } + private static final Vector> windowList = new Vector<>(); + private void addToWindowList() { synchronized (Window.class) { - @SuppressWarnings("unchecked") - Vector> windowList = (Vector>)appContext.get(Window.class); - if (windowList == null) { - windowList = new Vector>(); - appContext.put(Window.class, windowList); - } windowList.add(weakThis); } } - private static void removeFromWindowList(AppContext context, WeakReference weakThis) { + private static void removeFromWindowList(WeakReference weakThis) { synchronized (Window.class) { - @SuppressWarnings("unchecked") - Vector> windowList = (Vector>)context.get(Window.class); - if (windowList != null) { - windowList.remove(weakThis); - } + windowList.remove(weakThis); } } private void removeFromWindowList() { - removeFromWindowList(appContext, weakThis); + removeFromWindowList(weakThis); } /** @@ -2909,7 +2884,7 @@ public class Window extends Container implements Accessible { weakThis = new WeakReference<>(this); anchor = new Object(); - disposerRecord = new WindowDisposerRecord(appContext, this); + disposerRecord = new WindowDisposerRecord(this); sun.java2d.Disposer.addRecord(anchor, disposerRecord); addToWindowList(); diff --git a/src/java.desktop/share/classes/java/awt/event/InputEvent.java b/src/java.desktop/share/classes/java/awt/event/InputEvent.java index 9f1172916df..7517ea810e3 100644 --- a/src/java.desktop/share/classes/java/awt/event/InputEvent.java +++ b/src/java.desktop/share/classes/java/awt/event/InputEvent.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -309,13 +309,6 @@ public abstract sealed class InputEvent extends ComponentEvent */ int modifiers; - /* - * A flag that indicates that this instance can be used to access - * the system clipboard. - * This should be false in a headless environment, true in a headful one. - */ - private transient boolean canAccessSystemClipboard; - static { /* ensure that the necessary native libraries are loaded */ NativeLibLoader.loadLibraries(); @@ -328,15 +321,6 @@ public abstract sealed class InputEvent extends ComponentEvent return InputEvent.getButtonDownMasks(); } - public boolean canAccessSystemClipboard(InputEvent event) { - return event.canAccessSystemClipboard; - } - - @Override - public void setCanAccessSystemClipboard(InputEvent event, - boolean canAccessSystemClipboard) { - event.canAccessSystemClipboard = canAccessSystemClipboard; - } }); } @@ -381,11 +365,6 @@ public abstract sealed class InputEvent extends ComponentEvent super(source, id); this.when = when; this.modifiers = modifiers; - canAccessSystemClipboard = canAccessSystemClipboard(); - } - - private boolean canAccessSystemClipboard() { - return !GraphicsEnvironment.isHeadless(); } /** diff --git a/src/java.desktop/share/classes/java/awt/event/InputMethodEvent.java b/src/java.desktop/share/classes/java/awt/event/InputMethodEvent.java index 12ad1a03171..429d0b819f4 100644 --- a/src/java.desktop/share/classes/java/awt/event/InputMethodEvent.java +++ b/src/java.desktop/share/classes/java/awt/event/InputMethodEvent.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,7 +37,6 @@ import java.text.AttributedCharacterIterator; import java.text.CharacterIterator; import sun.awt.AWTAccessor; -import sun.awt.AppContext; import sun.awt.SunToolkit; /** @@ -444,8 +443,7 @@ public class InputMethodEvent extends AWTEvent { // throw the IllegalArgumentException to conform to EventObject spec throw new IllegalArgumentException("null source"); } - AppContext appContext = SunToolkit.targetToAppContext(source); - EventQueue eventQueue = SunToolkit.getSystemEventQueueImplPP(appContext); + EventQueue eventQueue = SunToolkit.getSystemEventQueueImplPP(); return AWTAccessor.getEventQueueAccessor().getMostRecentEventTime(eventQueue); } } diff --git a/src/java.desktop/share/classes/java/awt/event/WindowEvent.java b/src/java.desktop/share/classes/java/awt/event/WindowEvent.java index d898860f153..b2dbdb4f13f 100644 --- a/src/java.desktop/share/classes/java/awt/event/WindowEvent.java +++ b/src/java.desktop/share/classes/java/awt/event/WindowEvent.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,7 +29,6 @@ import java.awt.Window; import java.io.Serial; import java.lang.annotation.Native; -import sun.awt.AppContext; import sun.awt.SunToolkit; /** @@ -324,21 +323,14 @@ public class WindowEvent extends ComponentEvent { * WINDOW_LOST_FOCUS event, this is the Window that gained activation or * focus. For any other type of WindowEvent, or if the focus or activation * change occurs with a native application, with a Java application in a - * different VM or context, or with no other Window, null is returned. + * different VM, or with no other Window, null is returned. * * @return the other Window involved in the focus or activation change, or * null * @since 1.4 */ public Window getOppositeWindow() { - if (opposite == null) { - return null; - } - - return (SunToolkit.targetToAppContext(opposite) == - AppContext.getAppContext()) - ? opposite - : null; + return opposite; } /** diff --git a/src/java.desktop/share/classes/java/awt/geom/AffineTransform.java b/src/java.desktop/share/classes/java/awt/geom/AffineTransform.java index 9abc55d8e6f..a6869369714 100644 --- a/src/java.desktop/share/classes/java/awt/geom/AffineTransform.java +++ b/src/java.desktop/share/classes/java/awt/geom/AffineTransform.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1137,7 +1137,7 @@ public class AffineTransform implements Cloneable, java.io.Serializable { * The values are stored in the array as * { m00 m10 m01 m11 m02 m12 }. * An array of 4 doubles can also be specified, in which case only the - * first four elements representing the non-transform + * first four elements representing the non-translation * parts of the array are retrieved and the values are stored into * the array as { m00 m10 m01 m11 } * @param flatmatrix the double array used to store the returned diff --git a/src/java.desktop/share/classes/java/beans/beancontext/BeanContextSupport.java b/src/java.desktop/share/classes/java/beans/beancontext/BeanContextSupport.java index 3aa77048f8f..884b36d4b12 100644 --- a/src/java.desktop/share/classes/java/beans/beancontext/BeanContextSupport.java +++ b/src/java.desktop/share/classes/java/beans/beancontext/BeanContextSupport.java @@ -422,42 +422,54 @@ public class BeanContextSupport extends BeanContextChildSupport BeanContextChild cbcc = getChildBeanContextChild(targetChild); BeanContextChild bccp = null; - synchronized(targetChild) { + if (targetChild instanceof BeanContextProxy) { + bccp = ((BeanContextProxy)targetChild).getBeanContextProxy(); - if (targetChild instanceof BeanContextProxy) { - bccp = ((BeanContextProxy)targetChild).getBeanContextProxy(); + if (bccp == null) throw new NullPointerException("BeanContextPeer.getBeanContextProxy()"); + } - if (bccp == null) throw new NullPointerException("BeanContextPeer.getBeanContextProxy()"); - } + BCSChild bcsc = createBCSChild(targetChild, bccp); + BCSChild pbcsc = null; - BCSChild bcsc = createBCSChild(targetChild, bccp); - BCSChild pbcsc = null; + synchronized (children) { + children.put(targetChild, bcsc); - synchronized (children) { - children.put(targetChild, bcsc); + if (bccp != null) children.put(bccp, pbcsc = createBCSChild(bccp, targetChild)); + } - if (bccp != null) children.put(bccp, pbcsc = createBCSChild(bccp, targetChild)); - } + if (cbcc != null) synchronized(cbcc) { + try { + cbcc.setBeanContext(getBeanContextPeer()); + } catch (PropertyVetoException pve) { - if (cbcc != null) synchronized(cbcc) { - try { - cbcc.setBeanContext(getBeanContextPeer()); - } catch (PropertyVetoException pve) { + synchronized (children) { + children.remove(targetChild); - synchronized (children) { - children.remove(targetChild); - - if (bccp != null) children.remove(bccp); - } - - throw new IllegalStateException(); + if (bccp != null) children.remove(bccp); } - cbcc.addPropertyChangeListener("beanContext", childPCL); - cbcc.addVetoableChangeListener("beanContext", childVCL); + throw new IllegalStateException(); } - Visibility v = getChildVisibility(targetChild); + cbcc.addPropertyChangeListener("beanContext", childPCL); + cbcc.addVetoableChangeListener("beanContext", childVCL); + } + + Visibility v = getChildVisibility(targetChild); + + if (v != null) { + if (okToUseGui) + v.okToUseGui(); + else + v.dontUseGui(); + } + + if (getChildSerializable(targetChild) != null) serializable++; + + childJustAddedHook(targetChild, bcsc); + + if (bccp != null) { + v = getChildVisibility(bccp); if (v != null) { if (okToUseGui) @@ -466,26 +478,9 @@ public class BeanContextSupport extends BeanContextChildSupport v.dontUseGui(); } - if (getChildSerializable(targetChild) != null) serializable++; - - childJustAddedHook(targetChild, bcsc); - - if (bccp != null) { - v = getChildVisibility(bccp); - - if (v != null) { - if (okToUseGui) - v.okToUseGui(); - else - v.dontUseGui(); - } - - if (getChildSerializable(bccp) != null) serializable++; - - childJustAddedHook(bccp, pbcsc); - } - + if (getChildSerializable(bccp) != null) serializable++; + childJustAddedHook(bccp, pbcsc); } // The specification requires that we fire a notification of the change @@ -536,42 +531,40 @@ public class BeanContextSupport extends BeanContextChildSupport // we are required to notify the child that it is no longer nested here if // it implements java.beans.beancontext.BeanContextChild - synchronized(targetChild) { - if (callChildSetBC) { - BeanContextChild cbcc = getChildBeanContextChild(targetChild); - if (cbcc != null) synchronized(cbcc) { - cbcc.removePropertyChangeListener("beanContext", childPCL); - cbcc.removeVetoableChangeListener("beanContext", childVCL); - - try { - cbcc.setBeanContext(null); - } catch (PropertyVetoException pve1) { - cbcc.addPropertyChangeListener("beanContext", childPCL); - cbcc.addVetoableChangeListener("beanContext", childVCL); - throw new IllegalStateException(); - } + if (callChildSetBC) { + BeanContextChild cbcc = getChildBeanContextChild(targetChild); + if (cbcc != null) synchronized(cbcc) { + cbcc.removePropertyChangeListener("beanContext", childPCL); + cbcc.removeVetoableChangeListener("beanContext", childVCL); + try { + cbcc.setBeanContext(null); + } catch (PropertyVetoException pve1) { + cbcc.addPropertyChangeListener("beanContext", childPCL); + cbcc.addVetoableChangeListener("beanContext", childVCL); + throw new IllegalStateException(); } + } + } - synchronized (children) { - children.remove(targetChild); + synchronized (children) { + children.remove(targetChild); - if (bcsc.isProxyPeer()) { - pbcsc = children.get(peer = bcsc.getProxyPeer()); - children.remove(peer); - } + if (bcsc.isProxyPeer()) { + pbcsc = children.get(peer = bcsc.getProxyPeer()); + children.remove(peer); } + } - if (getChildSerializable(targetChild) != null) serializable--; + if (getChildSerializable(targetChild) != null) serializable--; - childJustRemovedHook(targetChild, bcsc); + childJustRemovedHook(targetChild, bcsc); - if (peer != null) { - if (getChildSerializable(peer) != null) serializable--; + if (peer != null) { + if (getChildSerializable(peer) != null) serializable--; - childJustRemovedHook(peer, pbcsc); - } + childJustRemovedHook(peer, pbcsc); } fireChildrenRemoved(new BeanContextMembershipEvent(getBeanContextPeer(), peer == null ? new Object[] { targetChild } : new Object[] { targetChild, peer } )); diff --git a/src/java.desktop/share/classes/javax/accessibility/AccessibleContext.java b/src/java.desktop/share/classes/javax/accessibility/AccessibleContext.java index 096ca3aef44..e7fc58b0825 100644 --- a/src/java.desktop/share/classes/javax/accessibility/AccessibleContext.java +++ b/src/java.desktop/share/classes/javax/accessibility/AccessibleContext.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,7 +34,6 @@ import java.beans.PropertyChangeSupport; import java.util.Locale; import sun.awt.AWTAccessor; -import sun.awt.AppContext; /** * {@code AccessibleContext} represents the minimum information all accessible @@ -84,24 +83,8 @@ public abstract class AccessibleContext { */ protected AccessibleContext() {} - /** - * The {@code AppContext} that should be used to dispatch events for this - * {@code AccessibleContext}. - */ - private volatile AppContext targetAppContext; - static { AWTAccessor.setAccessibleContextAccessor(new AWTAccessor.AccessibleContextAccessor() { - @Override - public void setAppContext(AccessibleContext accessibleContext, AppContext appContext) { - accessibleContext.targetAppContext = appContext; - } - - @Override - public AppContext getAppContext(AccessibleContext accessibleContext) { - return accessibleContext.targetAppContext; - } - @Override public Object getNativeAXResource(AccessibleContext accessibleContext) { return accessibleContext.nativeAXResource; diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/Media.java b/src/java.desktop/share/classes/javax/print/attribute/standard/Media.java index 6b62f1dbecd..0cb9a4c30c5 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/Media.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/Media.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -89,6 +89,7 @@ public abstract class Media extends EnumSyntax * @return {@code true} if {@code object} is equivalent to this media * attribute, {@code false} otherwise */ + @Override public boolean equals(Object object) { return object instanceof Media other && object.getClass() == this.getClass() && @@ -105,6 +106,7 @@ public abstract class Media extends EnumSyntax * @return printing attribute class (category), an instance of class * {@link Class java.lang.Class} */ + @Override public final Class getCategory() { return Media.class; } @@ -118,6 +120,7 @@ public abstract class Media extends EnumSyntax * * @return attribute category name */ + @Override public final String getName() { return "media"; } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/MediaName.java b/src/java.desktop/share/classes/javax/print/attribute/standard/MediaName.java index 9c1b4060a34..4adb2839759 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/MediaName.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/MediaName.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -107,6 +107,7 @@ public class MediaName extends Media implements Attribute { * * @return the string table */ + @Override protected String[] getStringTable() { return myStringTable.clone(); @@ -117,6 +118,7 @@ public class MediaName extends Media implements Attribute { * * @return the enumeration value table */ + @Override protected EnumSyntax[] getEnumValueTable() { return (EnumSyntax[])myEnumValueTable.clone(); } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/MediaPrintableArea.java b/src/java.desktop/share/classes/javax/print/attribute/standard/MediaPrintableArea.java index 72a58f2b697..a9cc2bba195 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/MediaPrintableArea.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/MediaPrintableArea.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -241,6 +241,7 @@ public final class MediaPrintableArea * @return {@code true} if {@code object} is equivalent to this media * margins attribute, {@code false} otherwise */ + @Override public boolean equals(Object object) { boolean ret = false; if (object instanceof MediaPrintableArea) { @@ -262,6 +263,7 @@ public final class MediaPrintableArea * @return printing attribute class (category), an instance of class * {@link Class java.lang.Class} */ + @Override public final Class getCategory() { return MediaPrintableArea.class; } @@ -277,6 +279,7 @@ public final class MediaPrintableArea * * @return attribute category name */ + @Override public final String getName() { return "media-printable-area"; } @@ -304,6 +307,7 @@ public final class MediaPrintableArea /** * Returns a string version of this rectangular size attribute in mm. */ + @Override public String toString() { return(toString(MM, "mm")); } @@ -311,6 +315,7 @@ public final class MediaPrintableArea /** * Returns a hash code value for this attribute. */ + @Override public int hashCode() { return x + 37*y + 43*w + 47*h; } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/MediaSize.java b/src/java.desktop/share/classes/javax/print/attribute/standard/MediaSize.java index 6a8db3b94c2..57c0d305809 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/MediaSize.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/MediaSize.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -243,6 +243,7 @@ public class MediaSize extends Size2DSyntax implements Attribute { * @return {@code true} if {@code object} is equivalent to this media size * attribute, {@code false} otherwise */ + @Override public boolean equals(Object object) { return (super.equals(object) && object instanceof MediaSize); } @@ -257,6 +258,7 @@ public class MediaSize extends Size2DSyntax implements Attribute { * @return printing attribute class (category), an instance of class * {@link Class java.lang.Class} */ + @Override public final Class getCategory() { return MediaSize.class; } @@ -270,6 +272,7 @@ public class MediaSize extends Size2DSyntax implements Attribute { * * @return attribute category name */ + @Override public final String getName() { return "media-size"; } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/MediaSizeName.java b/src/java.desktop/share/classes/javax/print/attribute/standard/MediaSizeName.java index f9dde495610..52b60b046bf 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/MediaSizeName.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/MediaSizeName.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -608,6 +608,7 @@ public class MediaSizeName extends Media { /** * Returns the string table for class {@code MediaSizeName}. */ + @Override protected String[] getStringTable() { return myStringTable.clone(); @@ -616,6 +617,7 @@ public class MediaSizeName extends Media { /** * Returns the enumeration value table for class {@code MediaSizeName}. */ + @Override protected EnumSyntax[] getEnumValueTable() { return (EnumSyntax[])myEnumValueTable.clone(); } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/MediaTray.java b/src/java.desktop/share/classes/javax/print/attribute/standard/MediaTray.java index 138aae6248a..bc20cbb8901 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/MediaTray.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/MediaTray.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -132,6 +132,7 @@ public class MediaTray extends Media implements Attribute { /** * Returns the string table for class {@code MediaTray}. */ + @Override protected String[] getStringTable() { return myStringTable.clone(); @@ -140,6 +141,7 @@ public class MediaTray extends Media implements Attribute { /** * Returns the enumeration value table for class {@code MediaTray}. */ + @Override protected EnumSyntax[] getEnumValueTable() { return (EnumSyntax[])myEnumValueTable.clone(); } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/MultipleDocumentHandling.java b/src/java.desktop/share/classes/javax/print/attribute/standard/MultipleDocumentHandling.java index 6d12597c88a..4937f379684 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/MultipleDocumentHandling.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/MultipleDocumentHandling.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -219,6 +219,7 @@ public class MultipleDocumentHandling extends EnumSyntax /** * Returns the string table for class {@code MultipleDocumentHandling}. */ + @Override protected String[] getStringTable() { return myStringTable.clone(); } @@ -227,6 +228,7 @@ public class MultipleDocumentHandling extends EnumSyntax * Returns the enumeration value table for class * {@code MultipleDocumentHandling}. */ + @Override protected EnumSyntax[] getEnumValueTable() { return (EnumSyntax[])myEnumValueTable.clone(); } @@ -242,6 +244,7 @@ public class MultipleDocumentHandling extends EnumSyntax * @return printing attribute class (category), an instance of class * {@link Class java.lang.Class} */ + @Override public final Class getCategory() { return MultipleDocumentHandling.class; } @@ -255,6 +258,7 @@ public class MultipleDocumentHandling extends EnumSyntax * * @return attribute category name */ + @Override public final String getName() { return "multiple-document-handling"; } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/NumberOfDocuments.java b/src/java.desktop/share/classes/javax/print/attribute/standard/NumberOfDocuments.java index 18b8144fb89..901907620f9 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/NumberOfDocuments.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/NumberOfDocuments.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -77,6 +77,7 @@ public final class NumberOfDocuments extends IntegerSyntax * @return {@code true} if {@code object} is equivalent to this number of * documents attribute, {@code false} otherwise */ + @Override public boolean equals(Object object) { return (super.equals (object) && object instanceof NumberOfDocuments); @@ -92,6 +93,7 @@ public final class NumberOfDocuments extends IntegerSyntax * @return printing attribute class (category), an instance of class * {@link Class java.lang.Class} */ + @Override public final Class getCategory() { return NumberOfDocuments.class; } @@ -105,6 +107,7 @@ public final class NumberOfDocuments extends IntegerSyntax * * @return attribute category name */ + @Override public final String getName() { return "number-of-documents"; } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/NumberOfInterveningJobs.java b/src/java.desktop/share/classes/javax/print/attribute/standard/NumberOfInterveningJobs.java index 54cb4b85e7a..480de3b1b47 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/NumberOfInterveningJobs.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/NumberOfInterveningJobs.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -78,6 +78,7 @@ public final class NumberOfInterveningJobs extends IntegerSyntax * @return {@code true} if {@code object} is equivalent to this number of * intervening jobs attribute, {@code false} otherwise */ + @Override public boolean equals(Object object) { return (super.equals (object) && object instanceof NumberOfInterveningJobs); @@ -93,6 +94,7 @@ public final class NumberOfInterveningJobs extends IntegerSyntax * @return printing attribute class (category), an instance of class * {@link Class java.lang.Class} */ + @Override public final Class getCategory() { return NumberOfInterveningJobs.class; } @@ -106,6 +108,7 @@ public final class NumberOfInterveningJobs extends IntegerSyntax * * @return attribute category name */ + @Override public final String getName() { return "number-of-intervening-jobs"; } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/NumberUp.java b/src/java.desktop/share/classes/javax/print/attribute/standard/NumberUp.java index b3aabbc6cfd..0b019346c8e 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/NumberUp.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/NumberUp.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -145,6 +145,7 @@ public final class NumberUp extends IntegerSyntax * @return {@code true} if {@code object} is equivalent to this number up * attribute, {@code false} otherwise */ + @Override public boolean equals(Object object) { return (super.equals(object) && object instanceof NumberUp); } @@ -159,6 +160,7 @@ public final class NumberUp extends IntegerSyntax * @return printing attribute class (category), an instance of class * {@link Class java.lang.Class} */ + @Override public final Class getCategory() { return NumberUp.class; } @@ -171,6 +173,7 @@ public final class NumberUp extends IntegerSyntax * * @return attribute category name */ + @Override public final String getName() { return "number-up"; } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/NumberUpSupported.java b/src/java.desktop/share/classes/javax/print/attribute/standard/NumberUpSupported.java index dfb79835e0f..cef65ed17a3 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/NumberUpSupported.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/NumberUpSupported.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -136,6 +136,7 @@ public final class NumberUpSupported extends SetOfIntegerSyntax * @return {@code true} if {@code object} is equivalent to this number up * supported attribute, {@code false} otherwise */ + @Override public boolean equals(Object object) { return (super.equals (object) && object instanceof NumberUpSupported); @@ -151,6 +152,7 @@ public final class NumberUpSupported extends SetOfIntegerSyntax * @return printing attribute class (category), an instance of class * {@link Class java.lang.Class} */ + @Override public final Class getCategory() { return NumberUpSupported.class; } @@ -164,6 +166,7 @@ public final class NumberUpSupported extends SetOfIntegerSyntax * * @return attribute category name */ + @Override public final String getName() { return "number-up-supported"; } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/OrientationRequested.java b/src/java.desktop/share/classes/javax/print/attribute/standard/OrientationRequested.java index 253adfe1ff1..00e3de4a1de 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/OrientationRequested.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/OrientationRequested.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -150,6 +150,7 @@ public final class OrientationRequested extends EnumSyntax /** * Returns the string table for class {@code OrientationRequested}. */ + @Override protected String[] getStringTable() { return myStringTable; } @@ -158,6 +159,7 @@ public final class OrientationRequested extends EnumSyntax * Returns the enumeration value table for class * {@code OrientationRequested}. */ + @Override protected EnumSyntax[] getEnumValueTable() { return myEnumValueTable; } @@ -166,6 +168,7 @@ public final class OrientationRequested extends EnumSyntax * Returns the lowest integer value used by class * {@code OrientationRequested}. */ + @Override protected int getOffset() { return 3; } @@ -180,6 +183,7 @@ public final class OrientationRequested extends EnumSyntax * @return printing attribute class (category), an instance of class * {@link Class java.lang.Class} */ + @Override public final Class getCategory() { return OrientationRequested.class; } @@ -193,6 +197,7 @@ public final class OrientationRequested extends EnumSyntax * * @return attribute category name */ + @Override public final String getName() { return "orientation-requested"; } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/OutputDeviceAssigned.java b/src/java.desktop/share/classes/javax/print/attribute/standard/OutputDeviceAssigned.java index c94a105e92d..bea5cd35060 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/OutputDeviceAssigned.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/OutputDeviceAssigned.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -92,6 +92,7 @@ public final class OutputDeviceAssigned extends TextSyntax * @return {@code true} if {@code object} is equivalent to this output * device assigned attribute, {@code false} otherwise */ + @Override public boolean equals(Object object) { return (super.equals (object) && object instanceof OutputDeviceAssigned); @@ -107,6 +108,7 @@ public final class OutputDeviceAssigned extends TextSyntax * @return printing attribute class (category), an instance of class * {@link Class java.lang.Class} */ + @Override public final Class getCategory() { return OutputDeviceAssigned.class; } @@ -120,6 +122,7 @@ public final class OutputDeviceAssigned extends TextSyntax * * @return attribute category name */ + @Override public final String getName() { return "output-device-assigned"; } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/PDLOverrideSupported.java b/src/java.desktop/share/classes/javax/print/attribute/standard/PDLOverrideSupported.java index 46b9e8493d3..d9862ff633b 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/PDLOverrideSupported.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/PDLOverrideSupported.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -97,6 +97,7 @@ public class PDLOverrideSupported extends EnumSyntax /** * Returns the string table for class {@code PDLOverrideSupported}. */ + @Override protected String[] getStringTable() { return myStringTable.clone(); } @@ -105,6 +106,7 @@ public class PDLOverrideSupported extends EnumSyntax * Returns the enumeration value table for class * {@code PDLOverrideSupported}. */ + @Override protected EnumSyntax[] getEnumValueTable() { return (EnumSyntax[])myEnumValueTable.clone(); } @@ -119,6 +121,7 @@ public class PDLOverrideSupported extends EnumSyntax * @return printing attribute class (category), an instance of class * {@link Class java.lang.Class} */ + @Override public final Class getCategory() { return PDLOverrideSupported.class; } @@ -132,6 +135,7 @@ public class PDLOverrideSupported extends EnumSyntax * * @return attribute category name */ + @Override public final String getName() { return "pdl-override-supported"; } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/PageRanges.java b/src/java.desktop/share/classes/javax/print/attribute/standard/PageRanges.java index 3ba0fa8e07e..74325a3eb2f 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/PageRanges.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/PageRanges.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -209,6 +209,7 @@ public final class PageRanges extends SetOfIntegerSyntax * @return {@code true} if {@code object} is equivalent to this page ranges * attribute, {@code false} otherwise */ + @Override public boolean equals(Object object) { return (super.equals(object) && object instanceof PageRanges); } @@ -223,6 +224,7 @@ public final class PageRanges extends SetOfIntegerSyntax * @return printing attribute class (category), an instance of class * {@link Class java.lang.Class} */ + @Override public final Class getCategory() { return PageRanges.class; } @@ -235,6 +237,7 @@ public final class PageRanges extends SetOfIntegerSyntax * * @return attribute category name */ + @Override public final String getName() { return "page-ranges"; } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/PagesPerMinute.java b/src/java.desktop/share/classes/javax/print/attribute/standard/PagesPerMinute.java index 184339fb978..4d05663d1db 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/PagesPerMinute.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/PagesPerMinute.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -78,6 +78,7 @@ public final class PagesPerMinute extends IntegerSyntax * @return {@code true} if {@code object} is equivalent to this pages per * minute attribute, {@code false} otherwise */ + @Override public boolean equals(Object object) { return (super.equals (object) && object instanceof PagesPerMinute); @@ -93,6 +94,7 @@ public final class PagesPerMinute extends IntegerSyntax * @return printing attribute class (category), an instance of class * {@link Class java.lang.Class} */ + @Override public final Class getCategory() { return PagesPerMinute.class; } @@ -106,6 +108,7 @@ public final class PagesPerMinute extends IntegerSyntax * * @return attribute category name */ + @Override public final String getName() { return "pages-per-minute"; } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/PagesPerMinuteColor.java b/src/java.desktop/share/classes/javax/print/attribute/standard/PagesPerMinuteColor.java index 32f15a4f165..0dc3d0c9420 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/PagesPerMinuteColor.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/PagesPerMinuteColor.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -91,6 +91,7 @@ public final class PagesPerMinuteColor extends IntegerSyntax * @return {@code true} if {@code object} is equivalent to this pages per * minute color attribute, {@code false} otherwise */ + @Override public boolean equals(Object object) { return (super.equals(object) && object instanceof PagesPerMinuteColor); @@ -106,6 +107,7 @@ public final class PagesPerMinuteColor extends IntegerSyntax * @return printing attribute class (category), an instance of class * {@link Class java.lang.Class} */ + @Override public final Class getCategory() { return PagesPerMinuteColor.class; } @@ -119,6 +121,7 @@ public final class PagesPerMinuteColor extends IntegerSyntax * * @return attribute category name */ + @Override public final String getName() { return "pages-per-minute-color"; } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/PresentationDirection.java b/src/java.desktop/share/classes/javax/print/attribute/standard/PresentationDirection.java index d4ff8581a2d..c40b6f3390e 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/PresentationDirection.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/PresentationDirection.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -157,6 +157,7 @@ public final class PresentationDirection extends EnumSyntax /** * Returns the string table for class {@code PresentationDirection}. */ + @Override protected String[] getStringTable() { return myStringTable; } @@ -165,6 +166,7 @@ public final class PresentationDirection extends EnumSyntax * Returns the enumeration value table for class * {@code PresentationDirection}. */ + @Override protected EnumSyntax[] getEnumValueTable() { return myEnumValueTable; } @@ -179,6 +181,7 @@ public final class PresentationDirection extends EnumSyntax * @return printing attribute class (category), an instance of class * {@link Class java.lang.Class} */ + @Override public final Class getCategory() { return PresentationDirection.class; } @@ -192,6 +195,7 @@ public final class PresentationDirection extends EnumSyntax * * @return attribute category name */ + @Override public final String getName() { return "presentation-direction"; } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/PrintQuality.java b/src/java.desktop/share/classes/javax/print/attribute/standard/PrintQuality.java index d00637fe23c..2e64522da7c 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/PrintQuality.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/PrintQuality.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -100,6 +100,7 @@ public class PrintQuality extends EnumSyntax /** * Returns the string table for class {@code PrintQuality}. */ + @Override protected String[] getStringTable() { return myStringTable.clone(); } @@ -107,6 +108,7 @@ public class PrintQuality extends EnumSyntax /** * Returns the enumeration value table for class {@code PrintQuality}. */ + @Override protected EnumSyntax[] getEnumValueTable() { return (EnumSyntax[])myEnumValueTable.clone(); } @@ -114,6 +116,7 @@ public class PrintQuality extends EnumSyntax /** * Returns the lowest integer value used by class {@code PrintQuality}. */ + @Override protected int getOffset() { return 3; } @@ -128,6 +131,7 @@ public class PrintQuality extends EnumSyntax * @return printing attribute class (category), an instance of class * {@link Class java.lang.Class} */ + @Override public final Class getCategory() { return PrintQuality.class; } @@ -141,6 +145,7 @@ public class PrintQuality extends EnumSyntax * * @return attribute category name */ + @Override public final String getName() { return "print-quality"; } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterInfo.java b/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterInfo.java index 4d8d2cf0601..39b8759d34d 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterInfo.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterInfo.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -87,6 +87,7 @@ public final class PrinterInfo extends TextSyntax * @return {@code true} if {@code object} is equivalent to this printer info * attribute, {@code false} otherwise */ + @Override public boolean equals(Object object) { return (super.equals(object) && object instanceof PrinterInfo); } @@ -101,6 +102,7 @@ public final class PrinterInfo extends TextSyntax * @return printing attribute class (category), an instance of class * {@link Class java.lang.Class} */ + @Override public final Class getCategory() { return PrinterInfo.class; } @@ -114,6 +116,7 @@ public final class PrinterInfo extends TextSyntax * * @return attribute category name */ + @Override public final String getName() { return "printer-info"; } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterIsAcceptingJobs.java b/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterIsAcceptingJobs.java index 92e0d1fcfcd..f8a99d4db46 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterIsAcceptingJobs.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterIsAcceptingJobs.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -100,6 +100,7 @@ public final class PrinterIsAcceptingJobs extends EnumSyntax /** * Returns the string table for class {@code PrinterIsAcceptingJobs}. */ + @Override protected String[] getStringTable() { return myStringTable; } @@ -108,6 +109,7 @@ public final class PrinterIsAcceptingJobs extends EnumSyntax * Returns the enumeration value table for class * {@code PrinterIsAcceptingJobs}. */ + @Override protected EnumSyntax[] getEnumValueTable() { return myEnumValueTable; } @@ -122,6 +124,7 @@ public final class PrinterIsAcceptingJobs extends EnumSyntax * @return printing attribute class (category), an instance of class * {@link Class java.lang.Class} */ + @Override public final Class getCategory() { return PrinterIsAcceptingJobs.class; } @@ -135,6 +138,7 @@ public final class PrinterIsAcceptingJobs extends EnumSyntax * * @return attribute category name */ + @Override public final String getName() { return "printer-is-accepting-jobs"; } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterLocation.java b/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterLocation.java index a0cfed406ce..c23112c79d2 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterLocation.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterLocation.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -83,6 +83,7 @@ public final class PrinterLocation extends TextSyntax * @return {@code true} if {@code object} is equivalent to this printer * location attribute, {@code false} otherwise */ + @Override public boolean equals(Object object) { return (super.equals(object) && object instanceof PrinterLocation); } @@ -97,6 +98,7 @@ public final class PrinterLocation extends TextSyntax * @return printing attribute class (category), an instance of class * {@link Class java.lang.Class} */ + @Override public final Class getCategory() { return PrinterLocation.class; } @@ -110,6 +112,7 @@ public final class PrinterLocation extends TextSyntax * * @return attribute category name */ + @Override public final String getName() { return "printer-location"; } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterMakeAndModel.java b/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterMakeAndModel.java index b5fe4a6ee95..eff34c8bd59 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterMakeAndModel.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterMakeAndModel.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -82,6 +82,7 @@ public final class PrinterMakeAndModel extends TextSyntax * @return {@code true} if {@code object} is equivalent to this printer make * and model attribute, {@code false} otherwise */ + @Override public boolean equals(Object object) { return (super.equals(object) && object instanceof PrinterMakeAndModel); @@ -97,6 +98,7 @@ public final class PrinterMakeAndModel extends TextSyntax * @return printing attribute class (category), an instance of class * {@link Class java.lang.Class} */ + @Override public final Class getCategory() { return PrinterMakeAndModel.class; } @@ -110,6 +112,7 @@ public final class PrinterMakeAndModel extends TextSyntax * * @return attribute category name */ + @Override public final String getName() { return "printer-make-and-model"; } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterMessageFromOperator.java b/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterMessageFromOperator.java index 9ce0a098b80..e125d4955b3 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterMessageFromOperator.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterMessageFromOperator.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -95,6 +95,7 @@ public final class PrinterMessageFromOperator extends TextSyntax * @return {@code true} if {@code object} is equivalent to this printer * message from operator attribute, {@code false} otherwise */ + @Override public boolean equals(Object object) { return (super.equals(object) && object instanceof PrinterMessageFromOperator); @@ -110,6 +111,7 @@ public final class PrinterMessageFromOperator extends TextSyntax * @return printing attribute class (category), an instance of class * {@link Class java.lang.Class} */ + @Override public final Class getCategory() { return PrinterMessageFromOperator.class; } @@ -123,6 +125,7 @@ public final class PrinterMessageFromOperator extends TextSyntax * * @return attribute category name */ + @Override public final String getName() { return "printer-message-from-operator"; } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterMoreInfo.java b/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterMoreInfo.java index 501ffdb6cfb..7e94507cf8c 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterMoreInfo.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterMoreInfo.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -88,6 +88,7 @@ public final class PrinterMoreInfo extends URISyntax * @return {@code true} if {@code object} is equivalent to this printer more * info attribute, {@code false} otherwise */ + @Override public boolean equals(Object object) { return (super.equals(object) && object instanceof PrinterMoreInfo); @@ -103,6 +104,7 @@ public final class PrinterMoreInfo extends URISyntax * @return printing attribute class (category), an instance of class * {@link Class java.lang.Class} */ + @Override public final Class getCategory() { return PrinterMoreInfo.class; } @@ -116,6 +118,7 @@ public final class PrinterMoreInfo extends URISyntax * * @return attribute category name */ + @Override public final String getName() { return "printer-more-info"; } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterMoreInfoManufacturer.java b/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterMoreInfoManufacturer.java index b6f8b92fc83..be6952fa00b 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterMoreInfoManufacturer.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterMoreInfoManufacturer.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -88,6 +88,7 @@ public final class PrinterMoreInfoManufacturer extends URISyntax * @return {@code true} if {@code object} is equivalent to this printer more * info manufacturer attribute, {@code false} otherwise */ + @Override public boolean equals(Object object) { return (super.equals(object) && object instanceof PrinterMoreInfoManufacturer); @@ -103,6 +104,7 @@ public final class PrinterMoreInfoManufacturer extends URISyntax * @return printing attribute class (category), an instance of class * {@link Class java.lang.Class} */ + @Override public final Class getCategory() { return PrinterMoreInfoManufacturer.class; } @@ -116,6 +118,7 @@ public final class PrinterMoreInfoManufacturer extends URISyntax * * @return attribute category name */ + @Override public final String getName() { return "printer-more-info-manufacturer"; } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterName.java b/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterName.java index 9a066c5d005..019ef3c8911 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterName.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterName.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -85,6 +85,7 @@ public final class PrinterName extends TextSyntax * @return {@code true} if {@code object} is equivalent to this printer name * attribute, {@code false} otherwise */ + @Override public boolean equals(Object object) { return (super.equals(object) && object instanceof PrinterName); } @@ -99,6 +100,7 @@ public final class PrinterName extends TextSyntax * @return printing attribute class (category), an instance of class * {@link Class java.lang.Class} */ + @Override public final Class getCategory() { return PrinterName.class; } @@ -112,6 +114,7 @@ public final class PrinterName extends TextSyntax * * @return attribute category name */ + @Override public final String getName() { return "printer-name"; } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterResolution.java b/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterResolution.java index 3bba7feb751..a47d57209cd 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterResolution.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterResolution.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -108,6 +108,7 @@ public final class PrinterResolution extends ResolutionSyntax * @return {@code true} if {@code object} is equivalent to this printer * resolution attribute, {@code false} otherwise */ + @Override public boolean equals(Object object) { return (super.equals (object) && object instanceof PrinterResolution); @@ -123,6 +124,7 @@ public final class PrinterResolution extends ResolutionSyntax * @return printing attribute class (category), an instance of class * {@link Class java.lang.Class} */ + @Override public final Class getCategory() { return PrinterResolution.class; } @@ -136,6 +138,7 @@ public final class PrinterResolution extends ResolutionSyntax * * @return attribute category name */ + @Override public final String getName() { return "printer-resolution"; } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterState.java b/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterState.java index 9cbb3da9473..f2c921360a4 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterState.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterState.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -114,6 +114,7 @@ implements PrintServiceAttribute { /** * Returns the string table for class {@code PrinterState}. */ + @Override protected String[] getStringTable() { return myStringTable; } @@ -121,6 +122,7 @@ implements PrintServiceAttribute { /** * Returns the enumeration value table for class {@code PrinterState}. */ + @Override protected EnumSyntax[] getEnumValueTable() { return myEnumValueTable; } @@ -135,6 +137,7 @@ implements PrintServiceAttribute { * @return printing attribute class (category), an instance of class * {@link Class java.lang.Class} */ + @Override public final Class getCategory() { return PrinterState.class; } @@ -148,6 +151,7 @@ implements PrintServiceAttribute { * * @return attribute category name */ + @Override public final String getName() { return "printer-state"; } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterStateReason.java b/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterStateReason.java index 5c3b98ee127..8586713ed65 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterStateReason.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterStateReason.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -403,6 +403,7 @@ public class PrinterStateReason extends EnumSyntax implements Attribute { /** * Returns the string table for class {@code PrinterStateReason}. */ + @Override protected String[] getStringTable() { return myStringTable.clone(); } @@ -410,6 +411,7 @@ public class PrinterStateReason extends EnumSyntax implements Attribute { /** * Returns the enumeration value table for class {@code PrinterStateReason}. */ + @Override protected EnumSyntax[] getEnumValueTable() { return (EnumSyntax[])myEnumValueTable.clone(); } @@ -424,6 +426,7 @@ public class PrinterStateReason extends EnumSyntax implements Attribute { * @return printing attribute class (category), an instance of class * {@link Class java.lang.Class} */ + @Override public final Class getCategory() { return PrinterStateReason.class; } @@ -437,6 +440,7 @@ public class PrinterStateReason extends EnumSyntax implements Attribute { * * @return attribute category name */ + @Override public final String getName() { return "printer-state-reason"; } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterStateReasons.java b/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterStateReasons.java index 8e46b145651..fca59ca4bba 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterStateReasons.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterStateReasons.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -165,6 +165,7 @@ public final class PrinterStateReasons * {@link Severity Severity} * @since 1.5 */ + @Override public Severity put(PrinterStateReason reason, Severity severity) { if (reason == null) { throw new NullPointerException("reason is null"); @@ -185,6 +186,7 @@ public final class PrinterStateReasons * @return printing attribute class (category), an instance of class * {@link Class java.lang.Class} */ + @Override public final Class getCategory() { return PrinterStateReasons.class; } @@ -198,6 +200,7 @@ public final class PrinterStateReasons * * @return attribute category name */ + @Override public final String getName() { return "printer-state-reasons"; } @@ -240,6 +243,7 @@ public final class PrinterStateReasons myEntrySet = entrySet; } + @Override public int size() { int result = 0; for (PrinterStateReason ignored : this) { @@ -248,6 +252,7 @@ public final class PrinterStateReasons return result; } + @Override public Iterator iterator() { return new PrinterStateReasonSetIterator(mySeverity, myEntrySet.iterator()); @@ -276,10 +281,12 @@ public final class PrinterStateReasons } } + @Override public boolean hasNext() { return myEntry != null; } + @Override public PrinterStateReason next() { if (myEntry == null) { throw new NoSuchElementException(); @@ -289,6 +296,7 @@ public final class PrinterStateReasons return result; } + @Override public void remove() { throw new UnsupportedOperationException(); } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterURI.java b/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterURI.java index dfe0bc5cc1f..d6934aae172 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterURI.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/PrinterURI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -79,6 +79,7 @@ public final class PrinterURI extends URISyntax * @return {@code true} if {@code object} is equivalent to this * {@code PrinterURI} attribute, {@code false} otherwise */ + @Override public boolean equals(Object object) { return (super.equals(object) && object instanceof PrinterURI); } @@ -93,6 +94,7 @@ public final class PrinterURI extends URISyntax * @return printing attribute class (category), an instance of class * {@link Class java.lang.Class} */ + @Override public final Class getCategory() { return PrinterURI.class; } @@ -106,6 +108,7 @@ public final class PrinterURI extends URISyntax * * @return attribute category name */ + @Override public final String getName() { return "printer-uri"; } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/QueuedJobCount.java b/src/java.desktop/share/classes/javax/print/attribute/standard/QueuedJobCount.java index c56eeb35a83..e44ead9b7cf 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/QueuedJobCount.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/QueuedJobCount.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -76,6 +76,7 @@ public final class QueuedJobCount extends IntegerSyntax * @return {@code true} if {@code object} is equivalent to this queued job * count attribute, {@code false} otherwise */ + @Override public boolean equals(Object object) { return (super.equals (object) && object instanceof QueuedJobCount); @@ -91,6 +92,7 @@ public final class QueuedJobCount extends IntegerSyntax * @return printing attribute class (category), an instance of class * {@link Class java.lang.Class} */ + @Override public final Class getCategory() { return QueuedJobCount.class; } @@ -104,6 +106,7 @@ public final class QueuedJobCount extends IntegerSyntax * * @return attribute category name */ + @Override public final String getName() { return "queued-job-count"; } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/ReferenceUriSchemesSupported.java b/src/java.desktop/share/classes/javax/print/attribute/standard/ReferenceUriSchemesSupported.java index 9784742030b..426fe4db3c4 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/ReferenceUriSchemesSupported.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/ReferenceUriSchemesSupported.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -154,6 +154,7 @@ public class ReferenceUriSchemesSupported /** * Returns the string table for class {@code ReferenceUriSchemesSupported}. */ + @Override protected String[] getStringTable() { return myStringTable.clone(); } @@ -162,6 +163,7 @@ public class ReferenceUriSchemesSupported * Returns the enumeration value table for class * {@code ReferenceUriSchemesSupported}. */ + @Override protected EnumSyntax[] getEnumValueTable() { return (EnumSyntax[])myEnumValueTable.clone(); } @@ -177,6 +179,7 @@ public class ReferenceUriSchemesSupported * @return printing attribute class (category), an instance of class * {@link Class java.lang.Class} */ + @Override public final Class getCategory() { return ReferenceUriSchemesSupported.class; } @@ -191,6 +194,7 @@ public class ReferenceUriSchemesSupported * * @return attribute category name */ + @Override public final String getName() { return "reference-uri-schemes-supported"; } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/RequestingUserName.java b/src/java.desktop/share/classes/javax/print/attribute/standard/RequestingUserName.java index 1d233eb7602..c2efb2da1d6 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/RequestingUserName.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/RequestingUserName.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -92,6 +92,7 @@ public final class RequestingUserName extends TextSyntax * @return {@code true} if {@code object} is equivalent to this requesting * user name attribute, {@code false} otherwise */ + @Override public boolean equals(Object object) { return (super.equals(object) && object instanceof RequestingUserName); @@ -107,6 +108,7 @@ public final class RequestingUserName extends TextSyntax * @return printing attribute class (category), an instance of class * {@link Class java.lang.Class} */ + @Override public final Class getCategory() { return RequestingUserName.class; } @@ -120,6 +122,7 @@ public final class RequestingUserName extends TextSyntax * * @return attribute category name */ + @Override public final String getName() { return "requesting-user-name"; } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/Severity.java b/src/java.desktop/share/classes/javax/print/attribute/standard/Severity.java index 7543d4503a9..c217f12292f 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/Severity.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/Severity.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -124,6 +124,7 @@ public final class Severity extends EnumSyntax implements Attribute { /** * Returns the string table for class {@code Severity}. */ + @Override protected String[] getStringTable() { return myStringTable; } @@ -131,6 +132,7 @@ public final class Severity extends EnumSyntax implements Attribute { /** * Returns the enumeration value table for class {@code Severity}. */ + @Override protected EnumSyntax[] getEnumValueTable() { return myEnumValueTable; } @@ -145,6 +147,7 @@ public final class Severity extends EnumSyntax implements Attribute { * @return printing attribute class (category), an instance of class * {@link Class java.lang.Class} */ + @Override public final Class getCategory() { return Severity.class; } @@ -157,6 +160,7 @@ public final class Severity extends EnumSyntax implements Attribute { * * @return attribute category name */ + @Override public final String getName() { return "severity"; } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/SheetCollate.java b/src/java.desktop/share/classes/javax/print/attribute/standard/SheetCollate.java index 45ec5f5374b..1754a27c86f 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/SheetCollate.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/SheetCollate.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -191,6 +191,7 @@ public final class SheetCollate extends EnumSyntax /** * Returns the string table for class {@code SheetCollate}. */ + @Override protected String[] getStringTable() { return myStringTable; } @@ -198,6 +199,7 @@ public final class SheetCollate extends EnumSyntax /** * Returns the enumeration value table for class {@code SheetCollate}. */ + @Override protected EnumSyntax[] getEnumValueTable() { return myEnumValueTable; } @@ -212,6 +214,7 @@ public final class SheetCollate extends EnumSyntax * @return printing attribute class (category), an instance of class * {@link Class java.lang.Class} */ + @Override public final Class getCategory() { return SheetCollate.class; } @@ -225,6 +228,7 @@ public final class SheetCollate extends EnumSyntax * * @return attribute category name */ + @Override public final String getName() { return "sheet-collate"; } diff --git a/src/java.desktop/share/classes/javax/print/attribute/standard/Sides.java b/src/java.desktop/share/classes/javax/print/attribute/standard/Sides.java index 86d421045b0..0321cd876de 100644 --- a/src/java.desktop/share/classes/javax/print/attribute/standard/Sides.java +++ b/src/java.desktop/share/classes/javax/print/attribute/standard/Sides.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -183,6 +183,7 @@ public final class Sides extends EnumSyntax /** * Returns the string table for class {@code Sides}. */ + @Override protected String[] getStringTable() { return myStringTable; } @@ -190,6 +191,7 @@ public final class Sides extends EnumSyntax /** * Returns the enumeration value table for class {@code Sides}. */ + @Override protected EnumSyntax[] getEnumValueTable() { return myEnumValueTable; } @@ -203,6 +205,7 @@ public final class Sides extends EnumSyntax * @return printing attribute class (category), an instance of class * {@link Class java.lang.Class} */ + @Override public final Class getCategory() { return Sides.class; } @@ -215,6 +218,7 @@ public final class Sides extends EnumSyntax * * @return attribute category name */ + @Override public final String getName() { return "sides"; } diff --git a/src/java.desktop/share/classes/javax/swing/AbstractButton.java b/src/java.desktop/share/classes/javax/swing/AbstractButton.java index d4112b5ed04..ad5f0eba3de 100644 --- a/src/java.desktop/share/classes/javax/swing/AbstractButton.java +++ b/src/java.desktop/share/classes/javax/swing/AbstractButton.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1070,13 +1070,13 @@ public abstract class AbstractButton extends JComponent implements ItemSelectabl Action oldValue = getAction(); if (action==null || !action.equals(a)) { action = a; - if (oldValue!=null) { + if (oldValue != null) { removeActionListener(oldValue); oldValue.removePropertyChangeListener(actionPropertyChangeListener); actionPropertyChangeListener = null; } configurePropertiesFromAction(action); - if (action!=null) { + if (action != null) { // Don't add if it is already a listener if (!isListener(ActionListener.class, action)) { addActionListener(action); diff --git a/src/java.desktop/share/classes/javax/swing/ActionPropertyChangeListener.java b/src/java.desktop/share/classes/javax/swing/ActionPropertyChangeListener.java index db12b6766b8..7d33e936b7b 100644 --- a/src/java.desktop/share/classes/javax/swing/ActionPropertyChangeListener.java +++ b/src/java.desktop/share/classes/javax/swing/ActionPropertyChangeListener.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -111,7 +111,7 @@ abstract class ActionPropertyChangeListener while ((r = (OwnedWeakReference)queue.poll()) != null) { ActionPropertyChangeListener oldPCL = r.getOwner(); Action oldAction = oldPCL.getAction(); - if (oldAction!=null) { + if (oldAction != null) { oldAction.removePropertyChangeListener(oldPCL); } } diff --git a/src/java.desktop/share/classes/javax/swing/AncestorNotifier.java b/src/java.desktop/share/classes/javax/swing/AncestorNotifier.java index f3cfdd9d0ba..3606bdc9c0b 100644 --- a/src/java.desktop/share/classes/javax/swing/AncestorNotifier.java +++ b/src/java.desktop/share/classes/javax/swing/AncestorNotifier.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -213,7 +213,7 @@ class AncestorNotifier implements ComponentListener, PropertyChangeListener, Ser public void propertyChange(PropertyChangeEvent evt) { String s = evt.getPropertyName(); - if (s!=null && (s.equals("parent") || s.equals("ancestor"))) { + if (s != null && (s.equals("parent") || s.equals("ancestor"))) { JComponent component = (JComponent)evt.getSource(); if (evt.getNewValue() != null) { diff --git a/src/java.desktop/share/classes/javax/swing/ArrayTable.java b/src/java.desktop/share/classes/javax/swing/ArrayTable.java index 7c38f9fb577..4941264c0fe 100644 --- a/src/java.desktop/share/classes/javax/swing/ArrayTable.java +++ b/src/java.desktop/share/classes/javax/swing/ArrayTable.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -145,10 +145,10 @@ class ArrayTable implements Cloneable { */ public Object get(Object key) { Object value = null; - if (table !=null) { + if (table != null) { if (isArray()) { Object[] array = (Object[])table; - for (int i = 0; i extends JComponent implements Scrollable, Accessible } Rectangle newFirstRect = getCellBounds(newFirst,newFirst); Rectangle firstRect = getCellBounds(first,first); - if ((newFirstRect != null) && (firstRect!=null)) { + if ((newFirstRect != null) && (firstRect != null)) { while ( (newFirstRect.y + visibleRect.height < firstRect.y + firstRect.height) && (newFirstRect.y < firstRect.y) ) { diff --git a/src/java.desktop/share/classes/javax/swing/JOptionPane.java b/src/java.desktop/share/classes/javax/swing/JOptionPane.java index 707b9febf10..c41400b9b37 100644 --- a/src/java.desktop/share/classes/javax/swing/JOptionPane.java +++ b/src/java.desktop/share/classes/javax/swing/JOptionPane.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1656,7 +1656,7 @@ public class JOptionPane extends JComponent implements Accessible return getDesktopPaneForComponent(parentComponent.getParent()); } - private static final Object sharedFrameKey = JOptionPane.class; + private static volatile Frame sharedRootFrame; /** * Sets the frame to use for class methods in which a frame is @@ -1668,11 +1668,7 @@ public class JOptionPane extends JComponent implements Accessible * @param newRootFrame the default Frame to use */ public static void setRootFrame(Frame newRootFrame) { - if (newRootFrame != null) { - SwingUtilities.appContextPut(sharedFrameKey, newRootFrame); - } else { - SwingUtilities.appContextRemove(sharedFrameKey); - } + sharedRootFrame = newRootFrame; } /** @@ -1687,13 +1683,10 @@ public class JOptionPane extends JComponent implements Accessible * @see java.awt.GraphicsEnvironment#isHeadless */ public static Frame getRootFrame() throws HeadlessException { - Frame sharedFrame = - (Frame)SwingUtilities.appContextGet(sharedFrameKey); - if (sharedFrame == null) { - sharedFrame = SwingUtilities.getSharedOwnerFrame(); - SwingUtilities.appContextPut(sharedFrameKey, sharedFrame); + if (sharedRootFrame == null) { + sharedRootFrame = SwingUtilities.getSharedOwnerFrame(); } - return sharedFrame; + return sharedRootFrame; } /** diff --git a/src/java.desktop/share/classes/javax/swing/JPopupMenu.java b/src/java.desktop/share/classes/javax/swing/JPopupMenu.java index 2ecc7cf3b1e..f3d11a29b4a 100644 --- a/src/java.desktop/share/classes/javax/swing/JPopupMenu.java +++ b/src/java.desktop/share/classes/javax/swing/JPopupMenu.java @@ -975,7 +975,7 @@ public class JPopupMenu extends JComponent implements Accessible,MenuElement { if (newFrame != frame) { // Use the invoker's frame so that events // are propagated properly - if (newFrame!=null) { + if (newFrame != null) { this.frame = newFrame; if(popup != null) { setVisible(false); @@ -1012,7 +1012,7 @@ public class JPopupMenu extends JComponent implements Accessible,MenuElement { */ JPopupMenu getRootPopupMenu() { JPopupMenu mp = this; - while((mp!=null) && (mp.isPopupMenu()!=true) && + while((mp != null) && (mp.isPopupMenu()!=true) && (mp.getInvoker() != null) && (mp.getInvoker().getParent() instanceof JPopupMenu popupMenu) ) { @@ -1182,7 +1182,7 @@ public class JPopupMenu extends JComponent implements Accessible,MenuElement { private static Frame getFrame(Component c) { Component w = c; - while(!(w instanceof Frame) && (w!=null)) { + while(!(w instanceof Frame) && (w != null)) { w = w.getParent(); } return (Frame)w; diff --git a/src/java.desktop/share/classes/javax/swing/JTextField.java b/src/java.desktop/share/classes/javax/swing/JTextField.java index 0d66209d8af..d56e450aca4 100644 --- a/src/java.desktop/share/classes/javax/swing/JTextField.java +++ b/src/java.desktop/share/classes/javax/swing/JTextField.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -579,15 +579,15 @@ public class JTextField extends JTextComponent implements SwingConstants { = "the Action instance connected with this ActionEvent source") public void setAction(Action a) { Action oldValue = getAction(); - if (action==null || !action.equals(a)) { + if (action == null || !action.equals(a)) { action = a; - if (oldValue!=null) { + if (oldValue != null) { removeActionListener(oldValue); oldValue.removePropertyChangeListener(actionPropertyChangeListener); actionPropertyChangeListener = null; } configurePropertiesFromAction(action); - if (action!=null) { + if (action != null) { // Don't add if it is already a listener if (!isListener(ActionListener.class, action)) { addActionListener(action); diff --git a/src/java.desktop/share/classes/javax/swing/JTree.java b/src/java.desktop/share/classes/javax/swing/JTree.java index 622c55e2ede..6af2ca31c5b 100644 --- a/src/java.desktop/share/classes/javax/swing/JTree.java +++ b/src/java.desktop/share/classes/javax/swing/JTree.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -2087,7 +2087,7 @@ public class JTree extends JComponent implements Scrollable, Accessible value = expandedState.get(path); if (value == null || !value) return false; - } while( (path=path.getParentPath())!=null ); + } while( (path=path.getParentPath()) != null ); return true; } diff --git a/src/java.desktop/share/classes/javax/swing/KeyboardManager.java b/src/java.desktop/share/classes/javax/swing/KeyboardManager.java index 42a6403ddb5..54a60b5d172 100644 --- a/src/java.desktop/share/classes/javax/swing/KeyboardManager.java +++ b/src/java.desktop/share/classes/javax/swing/KeyboardManager.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -330,7 +330,7 @@ class KeyboardManager { return; } Hashtable keyMap = containerMap.get(topContainer); - if (keyMap!=null) { + if (keyMap != null) { Vector v = (Vector)keyMap.get(JMenuBar.class); if (v != null) { v.removeElement(mb); diff --git a/src/java.desktop/share/classes/javax/swing/PopupFactory.java b/src/java.desktop/share/classes/javax/swing/PopupFactory.java index 208177fc55e..7245820c289 100644 --- a/src/java.desktop/share/classes/javax/swing/PopupFactory.java +++ b/src/java.desktop/share/classes/javax/swing/PopupFactory.java @@ -925,7 +925,7 @@ public class PopupFactory { add to that, otherwise add to the window. */ while (!(parent instanceof Window) && - (parent!=null)) { + (parent != null)) { parent = parent.getParent(); } diff --git a/src/java.desktop/share/classes/javax/swing/RepaintManager.java b/src/java.desktop/share/classes/javax/swing/RepaintManager.java index db8b13afa22..ab21e555919 100644 --- a/src/java.desktop/share/classes/javax/swing/RepaintManager.java +++ b/src/java.desktop/share/classes/javax/swing/RepaintManager.java @@ -32,7 +32,6 @@ import java.util.*; import java.util.concurrent.atomic.AtomicInteger; import sun.awt.AWTAccessor; -import sun.awt.AppContext; import sun.awt.DisplayChangedListener; import sun.awt.SunToolkit; import sun.java2d.SunGraphicsEnvironment; @@ -380,7 +379,13 @@ public class RepaintManager return; } if (invalidComponents != null) { - invalidComponents.remove(component); + int n = invalidComponents.size(); + for (int i = 0; i < n; i++) { + if (component == invalidComponents.get(i)) { + invalidComponents.remove(i); + break; + } + } } } @@ -1733,21 +1738,14 @@ public class RepaintManager } private static void scheduleDisplayChanges() { - // To avoid threading problems, we notify each RepaintManager + // To avoid threading problems, we notify the RepaintManager // on the thread it was created on. - for (AppContext context : AppContext.getAppContexts()) { - synchronized(context) { - if (!context.isDisposed()) { - EventQueue eventQueue = (EventQueue)context.get( - AppContext.EVENT_QUEUE_KEY); - if (eventQueue != null) { - eventQueue.postEvent(new InvocationEvent( - Toolkit.getDefaultToolkit(), - new DisplayChangedRunnable())); - } - } - } - } + EventQueue eventQueue = Toolkit.getDefaultToolkit().getSystemEventQueue(); + eventQueue.postEvent( + new InvocationEvent( + Toolkit.getDefaultToolkit(), + new DisplayChangedRunnable()) + ); } } diff --git a/src/java.desktop/share/classes/javax/swing/SwingUtilities.java b/src/java.desktop/share/classes/javax/swing/SwingUtilities.java index afe1c444c31..3603292b0ca 100644 --- a/src/java.desktop/share/classes/javax/swing/SwingUtilities.java +++ b/src/java.desktop/share/classes/javax/swing/SwingUtilities.java @@ -38,7 +38,6 @@ import javax.swing.event.MenuDragMouseEvent; import javax.swing.plaf.UIResource; import javax.swing.text.View; -import sun.awt.AppContext; import sun.awt.AWTAccessor; import sun.awt.AWTAccessor.MouseEventAccessor; @@ -506,7 +505,7 @@ public class SwingUtilities implements SwingConstants public static boolean isDescendingFrom(Component a,Component b) { if(a == b) return true; - for(Container p = a.getParent();p!=null;p=p.getParent()) + for(Container p = a.getParent(); p != null; p = p.getParent()) if(p == b) return true; return false; @@ -1986,26 +1985,6 @@ public class SwingUtilities implements SwingConstants return (WindowListener)sharedOwnerFrame; } - /* Don't make these AppContext accessors public or protected -- - * since AppContext is in sun.awt in 1.2, we shouldn't expose it - * even indirectly with a public API. - */ - // REMIND(aim): phase out use of 4 methods below since they - // are just private covers for AWT methods (?) - - static Object appContextGet(Object key) { - return AppContext.getAppContext().get(key); - } - - static void appContextPut(Object key, Object value) { - AppContext.getAppContext().put(key, value); - } - - static void appContextRemove(Object key) { - AppContext.getAppContext().remove(key); - } - - static Class loadSystemClass(String className) throws ClassNotFoundException { return Class.forName(className, true, Thread.currentThread(). getContextClassLoader()); diff --git a/src/java.desktop/share/classes/javax/swing/SwingWorker.java b/src/java.desktop/share/classes/javax/swing/SwingWorker.java index 75f1700bded..dae695b4868 100644 --- a/src/java.desktop/share/classes/javax/swing/SwingWorker.java +++ b/src/java.desktop/share/classes/javax/swing/SwingWorker.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,7 +45,8 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import sun.awt.AppContext; +import sun.awt.util.ThreadGroupUtils; + import sun.swing.AccumulativeRunnable; /** @@ -266,7 +267,7 @@ public abstract class SwingWorker implements RunnableFuture { */ private AccumulativeRunnable doNotifyProgressChange; - private final AccumulativeRunnable doSubmit = getDoSubmit(); + private final AccumulativeRunnable doSubmit = new DoSubmitAccumulativeRunnable(); /** * Values for the {@code state} bound property. @@ -755,18 +756,16 @@ public abstract class SwingWorker implements RunnableFuture { } + private static ExecutorService executorService; + /** * returns workersExecutorService. * - * returns the service stored in the appContext or creates it if - * necessary. + * returns the service and creates it if necessary. * * @return ExecutorService for the {@code SwingWorkers} */ private static synchronized ExecutorService getWorkersExecutorService() { - final AppContext appContext = AppContext.getAppContext(); - ExecutorService executorService = - (ExecutorService) appContext.get(SwingWorker.class); if (executorService == null) { //this creates daemon threads. ThreadFactory threadFactory = @@ -788,46 +787,26 @@ public abstract class SwingWorker implements RunnableFuture { 10L, TimeUnit.MINUTES, new LinkedBlockingQueue(), threadFactory); - appContext.put(SwingWorker.class, executorService); - // Don't use ShutdownHook here as it's not enough. We should track - // AppContext disposal instead of JVM shutdown, see 6799345 for details - final ExecutorService es = executorService; - appContext.addPropertyChangeListener(AppContext.DISPOSED_PROPERTY_NAME, - new PropertyChangeListener() { - @Override - public void propertyChange(PropertyChangeEvent pce) { - boolean disposed = (Boolean)pce.getNewValue(); - if (disposed) { - final WeakReference executorServiceRef = - new WeakReference(es); - final ExecutorService executorService = - executorServiceRef.get(); - if (executorService != null) { - executorService.shutdown(); - } - } - } + final Runnable shutdownHook = new Runnable() { + final WeakReference executorServiceRef = + new WeakReference(executorService); + public void run() { + final ExecutorService executorService = executorServiceRef.get(); + if (executorService != null) { + executorService.shutdown(); + } } - ); + }; + ThreadGroup rootTG = ThreadGroupUtils.getRootThreadGroup(); + Thread t = new Thread(rootTG, shutdownHook, + "SwingWorker ES", 0, false); + t.setContextClassLoader(null); + Runtime.getRuntime().addShutdownHook(t); } return executorService; } - private static final Object DO_SUBMIT_KEY = new StringBuilder("doSubmit"); - private static AccumulativeRunnable getDoSubmit() { - synchronized (DO_SUBMIT_KEY) { - final AppContext appContext = AppContext.getAppContext(); - Object doSubmit = appContext.get(DO_SUBMIT_KEY); - if (doSubmit == null) { - doSubmit = new DoSubmitAccumulativeRunnable(); - appContext.put(DO_SUBMIT_KEY, doSubmit); - } - @SuppressWarnings("unchecked") - AccumulativeRunnable tmp = (AccumulativeRunnable) doSubmit; - return tmp; - } - } private static class DoSubmitAccumulativeRunnable extends AccumulativeRunnable implements ActionListener { private static final int DELAY = 1000 / 30; diff --git a/src/java.desktop/share/classes/javax/swing/Timer.java b/src/java.desktop/share/classes/javax/swing/Timer.java index 2cb8381d7d3..1063532715c 100644 --- a/src/java.desktop/share/classes/javax/swing/Timer.java +++ b/src/java.desktop/share/classes/javax/swing/Timer.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -173,8 +173,7 @@ public class Timer implements Serializable private final transient Lock lock = new ReentrantLock(); // This field is maintained by TimerQueue. - // eventQueued can also be reset by the TimerQueue, but will only ever - // happen in an AppContext case when TimerQueues thread is destroyed. + // eventQueued can also be reset by the TimerQueue // access to this field is synchronized on getLock() lock. transient TimerQueue.DelayedTimer delayedTimer = null; diff --git a/src/java.desktop/share/classes/javax/swing/TimerQueue.java b/src/java.desktop/share/classes/javax/swing/TimerQueue.java index 4ef2769652a..39c06e57479 100644 --- a/src/java.desktop/share/classes/javax/swing/TimerQueue.java +++ b/src/java.desktop/share/classes/javax/swing/TimerQueue.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,7 +28,7 @@ package javax.swing; import java.util.concurrent.*; import java.util.concurrent.locks.*; import java.util.concurrent.atomic.AtomicLong; -import sun.awt.AppContext; +import sun.awt.util.ThreadGroupUtils; /** * Internal class to manage all Timers using one thread. @@ -40,8 +40,7 @@ import sun.awt.AppContext; */ class TimerQueue implements Runnable { - private static final Object sharedInstanceKey = - new StringBuffer("TimerQueue.sharedInstanceKey"); + private static volatile TimerQueue sharedInstance; private final DelayQueue queue; private volatile boolean running; @@ -69,14 +68,10 @@ class TimerQueue implements Runnable public static TimerQueue sharedInstance() { synchronized (classLock) { - TimerQueue sharedInst = (TimerQueue) - SwingUtilities.appContextGet( - sharedInstanceKey); - if (sharedInst == null) { - sharedInst = new TimerQueue(); - SwingUtilities.appContextPut(sharedInstanceKey, sharedInst); + if (sharedInstance == null) { + sharedInstance = new TimerQueue(); } - return sharedInst; + return sharedInstance; } } @@ -88,9 +83,10 @@ class TimerQueue implements Runnable return; } try { - final ThreadGroup threadGroup = AppContext.getAppContext().getThreadGroup(); + final ThreadGroup threadGroup = ThreadGroupUtils.getRootThreadGroup(); String name = "TimerQueue"; Thread timerThread = new Thread(threadGroup, this, name, 0, false); + timerThread.setContextClassLoader(null); timerThread.setDaemon(true); timerThread.setPriority(Thread.NORM_PRIORITY); timerThread.start(); @@ -183,11 +179,6 @@ class TimerQueue implements Runnable timer.getLock().unlock(); } } catch (InterruptedException ie) { - // Shouldn't ignore InterruptedExceptions here, so AppContext - // is disposed gracefully, see 6799345 for details - if (AppContext.getAppContext().isDisposed()) { - break; - } } } } finally { diff --git a/src/java.desktop/share/classes/javax/swing/TransferHandler.java b/src/java.desktop/share/classes/javax/swing/TransferHandler.java index f412314b28f..0f2229482bc 100644 --- a/src/java.desktop/share/classes/javax/swing/TransferHandler.java +++ b/src/java.desktop/share/classes/javax/swing/TransferHandler.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,7 +38,6 @@ import javax.swing.text.JTextComponent; import sun.reflect.misc.MethodUtil; import sun.swing.SwingUtilities2; -import sun.awt.AppContext; import sun.swing.*; import sun.awt.SunToolkit; @@ -1096,15 +1095,12 @@ public class TransferHandler implements Serializable { private String propertyName; private static SwingDragGestureRecognizer recognizer = null; + private static DropHandler handler; private static DropTargetListener getDropTargetListener() { synchronized(DropHandler.class) { - DropHandler handler = - (DropHandler)AppContext.getAppContext().get(DropHandler.class); - if (handler == null) { handler = new DropHandler(); - AppContext.getAppContext().put(DropHandler.class, handler); } return handler; @@ -1725,29 +1721,22 @@ public class TransferHandler implements Serializable { } } + private static Clipboard clipboard; /** * Returns the clipboard to use for cut/copy/paste. */ private Clipboard getClipboard(JComponent c) { - if (SwingUtilities2.canAccessSystemClipboard()) { + if (!GraphicsEnvironment.isHeadless()) { return c.getToolkit().getSystemClipboard(); } - Clipboard clipboard = (Clipboard)sun.awt.AppContext.getAppContext(). - get(SandboxClipboardKey); - if (clipboard == null) { - clipboard = new Clipboard("Sandboxed Component Clipboard"); - sun.awt.AppContext.getAppContext().put(SandboxClipboardKey, - clipboard); + // Likely it is impossible to be here in headless. + synchronized (TransferHandler.class) { + if (clipboard == null) { + clipboard = new Clipboard("Headless clipboard"); + } + return clipboard; } - return clipboard; } - - /** - * Key used in app context to lookup Clipboard to use if access to - * System clipboard is denied. - */ - private static Object SandboxClipboardKey = new Object(); - } } diff --git a/src/java.desktop/share/classes/javax/swing/UIDefaults.java b/src/java.desktop/share/classes/javax/swing/UIDefaults.java index d59a46dba32..1d6b7257273 100644 --- a/src/java.desktop/share/classes/javax/swing/UIDefaults.java +++ b/src/java.desktop/share/classes/javax/swing/UIDefaults.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1169,9 +1169,9 @@ public class UIDefaults extends Hashtable */ private Class[] getClassArray(Object[] args) { Class[] types = null; - if (args!=null) { + if (args != null) { types = new Class[args.length]; - for (int i = 0; i< args.length; i++) { + for (int i = 0; i < args.length; i++) { /* PENDING(ges): At present only the primitive types used are handled correctly; this should eventually handle all primitive types */ @@ -1199,7 +1199,7 @@ public class UIDefaults extends Hashtable private String printArgs(Object[] array) { String s = "{"; - if (array !=null) { + if (array != null) { for (int i = 0 ; i < array.length-1; i++) { s = s.concat(array[i] + ","); } diff --git a/src/java.desktop/share/classes/javax/swing/UIManager.java b/src/java.desktop/share/classes/javax/swing/UIManager.java index 69063c562e6..f323842ae49 100644 --- a/src/java.desktop/share/classes/javax/swing/UIManager.java +++ b/src/java.desktop/share/classes/javax/swing/UIManager.java @@ -56,7 +56,6 @@ import sun.awt.OSInfo; import sun.swing.SwingUtilities2; import java.util.HashMap; import java.util.Objects; -import sun.awt.AppContext; import sun.awt.AWTAccessor; import sun.swing.SwingAccessor; @@ -179,10 +178,7 @@ public class UIManager implements Serializable /** * This class defines the state managed by the UIManager. For * Swing applications the fields in this class could just as well - * be static members of UIManager however we give them - * "AppContext" - * scope instead so that potentially multiple lightweight - * applications running in a single VM have their own state. + * be static members of UIManager. */ private static class LAFState { @@ -206,8 +202,8 @@ public class UIManager implements Serializable void setSystemDefaults(UIDefaults x) { tables[1] = x; } /** - * Returns the SwingPropertyChangeSupport for the current - * AppContext. If create is a true, a non-null + * Returns the SwingPropertyChangeSupport instance. + * If create is a true, a non-null * SwingPropertyChangeSupport will be returned, if * create is false and this has not been invoked * with true, null will be returned. @@ -1366,18 +1362,7 @@ public class UIManager implements Serializable return; } - // Try to get default LAF from system property, then from AppContext - // (6653395), then use cross-platform one by default. - String lafName = null; - @SuppressWarnings("unchecked") - HashMap lafData = - (HashMap) AppContext.getAppContext().remove("swing.lafdata"); - if (lafData != null) { - lafName = lafData.remove("defaultlaf"); - } - if (lafName == null) { - lafName = getCrossPlatformLookAndFeelClassName(); - } + String lafName = getCrossPlatformLookAndFeelClassName(); lafName = swingProps.getProperty(defaultLAFKey, lafName); try { @@ -1385,13 +1370,6 @@ public class UIManager implements Serializable } catch (Exception e) { throw new Error("Cannot load " + lafName); } - - // Set any properties passed through AppContext (6653395). - if (lafData != null) { - for (Object key: lafData.keySet()) { - UIManager.put(key, lafData.get(key)); - } - } } @@ -1451,8 +1429,8 @@ public class UIManager implements Serializable /* * This method is called before any code that depends on the - * AppContext specific LAFState object runs. - * In some AppContext cases, it's possible for this method + * LAFState object runs. + * In some cases, it's possible for this method * to be re-entered, which is why we grab a lock before calling * initialize(). */ diff --git a/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicComboPopup.java b/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicComboPopup.java index 38798d4c94f..d825dd73e9c 100644 --- a/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicComboPopup.java +++ b/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicComboPopup.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -243,8 +243,8 @@ public class BasicComboPopup extends JPopupMenu implements ComboPopup { public void hide() { MenuSelectionManager manager = MenuSelectionManager.defaultManager(); MenuElement [] selection = manager.getSelectedPath(); - for ( int i = 0 ; i < selection.length ; i++ ) { - if ( selection[i] == this ) { + for (int i = 0 ; i < selection.length; i++ ) { + if (selection[i] == this) { manager.clearSelectedPath(); break; } @@ -924,7 +924,7 @@ public class BasicComboPopup extends JPopupMenu implements ComboPopup { if (!SwingUtilities.isLeftMouseButton(e) || !comboBox.isEnabled() || !comboBox.isShowing()) return; - if ( comboBox.isEditable() ) { + if (comboBox.isEditable()) { Component comp = comboBox.getEditor().getEditorComponent(); if ((!(comp instanceof JComponent)) || ((JComponent)comp).isRequestFocusEnabled()) { comp.requestFocus(FocusEvent.Cause.MOUSE_EVENT); @@ -957,12 +957,12 @@ public class BasicComboPopup extends JPopupMenu implements ComboPopup { Component source = (Component)e.getSource(); Dimension size = source.getSize(); Rectangle bounds = new Rectangle( 0, 0, size.width, size.height); - if ( !bounds.contains( e.getPoint() ) ) { + if (!bounds.contains(e.getPoint())) { MouseEvent newEvent = convertMouseEvent( e ); Point location = newEvent.getPoint(); Rectangle r = new Rectangle(); list.computeVisibleRect( r ); - if ( r.contains( location ) ) { + if (r.contains(location)) { if (comboBox.getSelectedIndex() == list.getSelectedIndex()) { comboBox.getEditor().setItem(list.getSelectedValue()); } @@ -989,7 +989,7 @@ public class BasicComboPopup extends JPopupMenu implements ComboPopup { Point location = anEvent.getPoint(); Rectangle r = new Rectangle(); list.computeVisibleRect( r ); - if ( r.contains( location ) ) { + if (r.contains(location)) { updateListBoxSelectionForEvent( anEvent, false ); } } @@ -999,34 +999,34 @@ public class BasicComboPopup extends JPopupMenu implements ComboPopup { if (e.getSource() == list) { return; } - if ( isVisible() ) { + if (isVisible()) { MouseEvent newEvent = convertMouseEvent( e ); Rectangle r = new Rectangle(); list.computeVisibleRect( r ); - if ( newEvent.getPoint().y >= r.y && newEvent.getPoint().y <= r.y + r.height - 1 ) { + if (newEvent.getPoint().y >= r.y && newEvent.getPoint().y <= r.y + r.height - 1) { hasEntered = true; - if ( isAutoScrolling ) { + if (isAutoScrolling) { stopAutoScrolling(); } Point location = newEvent.getPoint(); - if ( r.contains( location ) ) { + if (r.contains(location)) { updateListBoxSelectionForEvent( newEvent, false ); } } else { - if ( hasEntered ) { + if (hasEntered) { int directionToScroll = newEvent.getPoint().y < r.y ? SCROLL_UP : SCROLL_DOWN; - if ( isAutoScrolling && scrollDirection != directionToScroll ) { + if (isAutoScrolling && scrollDirection != directionToScroll) { stopAutoScrolling(); startAutoScrolling( directionToScroll ); } - else if ( !isAutoScrolling ) { + else if (!isAutoScrolling) { startAutoScrolling( directionToScroll ); } } else { - if ( e.getPoint().y < 0 ) { + if (e.getPoint().y < 0) { hasEntered = true; startAutoScrolling( SCROLL_UP ); } @@ -1043,7 +1043,7 @@ public class BasicComboPopup extends JPopupMenu implements ComboPopup { JComboBox comboBox = (JComboBox)e.getSource(); String propertyName = e.getPropertyName(); - if ( propertyName == "model" ) { + if (propertyName == "model") { @SuppressWarnings("unchecked") ComboBoxModel oldModel = (ComboBoxModel)e.getOldValue(); @SuppressWarnings("unchecked") @@ -1053,13 +1053,13 @@ public class BasicComboPopup extends JPopupMenu implements ComboPopup { list.setModel(newModel); - if ( isVisible() ) { + if (isVisible()) { hide(); } } - else if ( propertyName == "renderer" ) { + else if (propertyName == "renderer") { list.setCellRenderer( comboBox.getRenderer() ); - if ( isVisible() ) { + if (isVisible()) { hide(); } } @@ -1067,18 +1067,18 @@ public class BasicComboPopup extends JPopupMenu implements ComboPopup { // Pass along the new component orientation // to the list and the scroller - ComponentOrientation o =(ComponentOrientation)e.getNewValue(); + ComponentOrientation o = (ComponentOrientation)e.getNewValue(); JList list = getList(); - if (list!=null && list.getComponentOrientation()!=o) { + if (list != null && list.getComponentOrientation() != o) { list.setComponentOrientation(o); } - if (scroller!=null && scroller.getComponentOrientation()!=o) { + if (scroller != null && scroller.getComponentOrientation() != o) { scroller.setComponentOrientation(o); } - if (o!=getComponentOrientation()) { + if (o != getComponentOrientation()) { setComponentOrientation(o); } } @@ -1134,13 +1134,13 @@ public class BasicComboPopup extends JPopupMenu implements ComboPopup { protected void startAutoScrolling( int direction ) { // XXX - should be a private method within InvocationMouseMotionHandler // if possible. - if ( isAutoScrolling ) { + if (isAutoScrolling) { autoscrollTimer.stop(); } isAutoScrolling = true; - if ( direction == SCROLL_UP ) { + if (direction == SCROLL_UP) { scrollDirection = SCROLL_UP; Point convertedPoint = SwingUtilities.convertPoint( scroller, new Point( 1, 1 ), list ); int top = list.locationToIndex( convertedPoint ); @@ -1149,7 +1149,7 @@ public class BasicComboPopup extends JPopupMenu implements ComboPopup { autoscrollTimer = new Timer( 100, new AutoScrollActionHandler( SCROLL_UP) ); } - else if ( direction == SCROLL_DOWN ) { + else if (direction == SCROLL_DOWN) { scrollDirection = SCROLL_DOWN; Dimension size = scroller.getSize(); Point convertedPoint = SwingUtilities.convertPoint( scroller, @@ -1171,7 +1171,7 @@ public class BasicComboPopup extends JPopupMenu implements ComboPopup { protected void stopAutoScrolling() { isAutoScrolling = false; - if ( autoscrollTimer != null ) { + if (autoscrollTimer != null) { autoscrollTimer.stop(); autoscrollTimer = null; } @@ -1183,7 +1183,7 @@ public class BasicComboPopup extends JPopupMenu implements ComboPopup { */ protected void autoScrollUp() { int index = list.getSelectedIndex(); - if ( index > 0 ) { + if (index > 0) { list.setSelectedIndex( index - 1 ); list.ensureIndexIsVisible( index - 1 ); } @@ -1196,7 +1196,7 @@ public class BasicComboPopup extends JPopupMenu implements ComboPopup { protected void autoScrollDown() { int index = list.getSelectedIndex(); int lastItem = list.getModel().getSize() - 1; - if ( index < lastItem ) { + if (index < lastItem) { list.setSelectedIndex( index + 1 ); list.ensureIndexIsVisible( index + 1 ); } @@ -1234,7 +1234,7 @@ public class BasicComboPopup extends JPopupMenu implements ComboPopup { * @param e a mouse event */ protected void delegateFocus( MouseEvent e ) { - if ( comboBox.isEditable() ) { + if (comboBox.isEditable()) { Component comp = comboBox.getEditor().getEditorComponent(); if ((!(comp instanceof JComponent)) || ((JComponent)comp).isRequestFocusEnabled()) { if (e != null) { @@ -1258,7 +1258,7 @@ public class BasicComboPopup extends JPopupMenu implements ComboPopup { * visible. */ protected void togglePopup() { - if ( isVisible() ) { + if (isVisible()) { hide(); } else { @@ -1274,7 +1274,7 @@ public class BasicComboPopup extends JPopupMenu implements ComboPopup { * @param selectedIndex the index to set the list */ private void setListSelection(int selectedIndex) { - if ( selectedIndex == -1 ) { + if (selectedIndex == -1) { list.clearSelection(); } else { @@ -1325,7 +1325,7 @@ public class BasicComboPopup extends JPopupMenu implements ComboPopup { ListCellRenderer renderer = list.getCellRenderer(); Object value = null; - for ( int i = 0; i < minRowCount; ++i ) { + for (int i = 0; i < minRowCount; ++i) { value = list.getModel().getElementAt( i ); Component c = renderer.getListCellRendererComponent( list, value, i, false, false ); height += c.getPreferredSize().height; @@ -1439,18 +1439,18 @@ public class BasicComboPopup extends JPopupMenu implements ComboPopup { // XXX - only seems to be called from this class. shouldScroll flag is // never true Point location = anEvent.getPoint(); - if ( list == null ) + if (list == null) return; int index = list.locationToIndex(location); - if ( index == -1 ) { - if ( location.y < 0 ) + if (index == -1) { + if (location.y < 0) index = 0; else index = comboBox.getModel().getSize() - 1; } - if ( list.getSelectedIndex() != index ) { + if (list.getSelectedIndex() != index) { list.setSelectedIndex(index); - if ( shouldScroll ) + if (shouldScroll) list.ensureIndexIsVisible(index); } } diff --git a/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicListUI.java b/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicListUI.java index 0a4aa03dce9..37bcbec2156 100644 --- a/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicListUI.java +++ b/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicListUI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -876,7 +876,7 @@ public class BasicListUI extends ListUI } Long l = (Long)UIManager.get("List.timeFactor"); - timeFactor = (l!=null) ? l.longValue() : 1000L; + timeFactor = (l != null) ? l.longValue() : 1000L; updateIsFileList(); } diff --git a/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicMenuBarUI.java b/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicMenuBarUI.java index 2079aafd3a2..a9abaee0129 100644 --- a/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicMenuBarUI.java +++ b/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicMenuBarUI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -125,7 +125,7 @@ public class BasicMenuBarUI extends MenuBarUI { for (int i = 0; i < menuBar.getMenuCount(); i++) { JMenu menu = menuBar.getMenu(i); - if (menu!=null) + if (menu != null) menu.getModel().addChangeListener(changeListener); } menuBar.addContainerListener(containerListener); @@ -167,7 +167,7 @@ public class BasicMenuBarUI extends MenuBarUI { * Uninstalls default properties. */ protected void uninstallDefaults() { - if (menuBar!=null) { + if (menuBar != null) { LookAndFeel.uninstallBorder(menuBar); } } @@ -180,7 +180,7 @@ public class BasicMenuBarUI extends MenuBarUI { for (int i = 0; i < menuBar.getMenuCount(); i++) { JMenu menu = menuBar.getMenu(i); - if (menu !=null) + if (menu != null) menu.getModel().removeChangeListener(changeListener); } @@ -237,10 +237,10 @@ public class BasicMenuBarUI extends MenuBarUI { // ChangeListener // public void stateChanged(ChangeEvent e) { - int i,c; - for(i=0,c = menuBar.getMenuCount() ; i < c ; i++) { + final int c = menuBar.getMenuCount(); + for (int i = 0; i < c; i++) { JMenu menu = menuBar.getMenu(i); - if(menu !=null && menu.isSelected()) { + if (menu != null && menu.isSelected()) { menuBar.getSelectionModel().setSelectedIndex(i); break; } @@ -277,7 +277,7 @@ public class BasicMenuBarUI extends MenuBarUI { MenuElement[] me; MenuElement[] subElements; JMenu menu = menuBar.getMenu(0); - if (menu!=null) { + if (menu != null) { me = new MenuElement[3]; me[0] = (MenuElement) menuBar; me[1] = (MenuElement) menu; diff --git a/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicPopupMenuUI.java b/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicPopupMenuUI.java index 8df27d3e3cb..a3ffb034b4b 100644 --- a/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicPopupMenuUI.java +++ b/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicPopupMenuUI.java @@ -908,7 +908,7 @@ public class BasicPopupMenuUI extends PopupMenuUI { } boolean isInPopup(Component src) { - for (Component c=src; c!=null; c=c.getParent()) { + for (Component c = src; c != null; c = c.getParent()) { if (c instanceof Window) { break; } else if (c instanceof JPopupMenu) { diff --git a/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicSpinnerUI.java b/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicSpinnerUI.java index 2b871343697..b523f8c7bd3 100644 --- a/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicSpinnerUI.java +++ b/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicSpinnerUI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -688,7 +688,7 @@ public class BasicSpinnerUI extends SpinnerUI arrowButton = (JButton)e.getSource(); } } else { - if (arrowButton!=null && !arrowButton.getModel().isPressed() + if (arrowButton != null && !arrowButton.getModel().isPressed() && autoRepeatTimer.isRunning()) { autoRepeatTimer.stop(); spinner = null; diff --git a/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicSplitPaneUI.java b/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicSplitPaneUI.java index 270181f4600..1d8c471e9ec 100644 --- a/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicSplitPaneUI.java +++ b/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicSplitPaneUI.java @@ -2273,7 +2273,7 @@ public class BasicSplitPaneUI extends SplitPaneUI JSplitPane parentSplitPane = (JSplitPane)SwingUtilities.getAncestorOfClass( JSplitPane.class, splitPane); - if (parentSplitPane!=null) { + if (parentSplitPane != null) { parentSplitPane.requestFocus(); } } @@ -2307,7 +2307,7 @@ public class BasicSplitPaneUI extends SplitPaneUI } while (splitPane.isAncestorOf(focusOn) && !focusFrom.contains(focusOn)); } - if ( focusOn!=null && !splitPane.isAncestorOf(focusOn) ) { + if ( focusOn != null && !splitPane.isAncestorOf(focusOn) ) { focusOn.requestFocus(); } } @@ -2323,7 +2323,7 @@ public class BasicSplitPaneUI extends SplitPaneUI if (focusOn != null) { // don't change the focus if the new focused component belongs // to the same splitpane and the same side - if ( focus!=null && + if ( focus != null && ( (SwingUtilities.isDescendingFrom(focus, left) && SwingUtilities.isDescendingFrom(focusOn, left)) || (SwingUtilities.isDescendingFrom(focus, right) && @@ -2338,15 +2338,15 @@ public class BasicSplitPaneUI extends SplitPaneUI Component left = splitPane.getLeftComponent(); Component right = splitPane.getRightComponent(); Component next; - if (focus!=null && SwingUtilities.isDescendingFrom(focus, left) && - right!=null) { + if (focus != null && SwingUtilities.isDescendingFrom(focus, left) && + right != null) { next = getFirstAvailableComponent(right); if (next != null) { return next; } } JSplitPane parentSplitPane = (JSplitPane)SwingUtilities.getAncestorOfClass(JSplitPane.class, splitPane); - if (parentSplitPane!=null) { + if (parentSplitPane != null) { // focus next side of the parent split pane next = getNextSide(parentSplitPane, focus); } else { diff --git a/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicTabbedPaneUI.java b/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicTabbedPaneUI.java index 842e8892c76..de23dbab29a 100644 --- a/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicTabbedPaneUI.java +++ b/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicTabbedPaneUI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -504,7 +504,7 @@ public class BasicTabbedPaneUI extends TabbedPaneUI implements SwingConstants { tabPane.addFocusListener(focusListener); } tabPane.addContainerListener(getHandler()); - if (tabPane.getTabCount()>0) { + if (tabPane.getTabCount() > 0) { Boolean htmlDisabled = (Boolean) tabPane.getClientProperty("html.disable"); if (!(Boolean.TRUE.equals(htmlDisabled))) { @@ -528,7 +528,7 @@ public class BasicTabbedPaneUI extends TabbedPaneUI implements SwingConstants { } tabPane.removeContainerListener(getHandler()); - if (htmlViews!=null) { + if (htmlViews != null) { htmlViews.removeAllElements(); htmlViews = null; } @@ -949,8 +949,8 @@ public class BasicTabbedPaneUI extends TabbedPaneUI implements SwingConstants { // Paint tabRuns of tabs from back to front for (int i = runCount - 1; i >= 0; i--) { int start = tabRuns[i]; - int next = tabRuns[(i == runCount - 1)? 0 : i + 1]; - int end = (next != 0? next - 1: tabCount - 1); + int next = tabRuns[(i == runCount - 1) ? 0 : i + 1]; + int end = (next != 0 ? next - 1 : tabCount - 1); for (int j = start; j <= end; j++) { if (j != selectedIndex && rects[j].intersects(clipRect)) { paintTab(g, tabPlacement, rects, j, iconRect, textRect); @@ -1118,7 +1118,7 @@ public class BasicTabbedPaneUI extends TabbedPaneUI implements SwingConstants { int xx = x; g.setColor(shadow); while(xx <= x+rects[tabIndex].width) { - for (int i=0; i < xCropLen.length; i+=2) { + for (int i = 0; i < xCropLen.length; i += 2) { g.drawLine(xx+yCropLen[i],y-xCropLen[i], xx+yCropLen[i+1]-1,y-xCropLen[i+1]); } @@ -1133,7 +1133,7 @@ public class BasicTabbedPaneUI extends TabbedPaneUI implements SwingConstants { int yy = y; g.setColor(shadow); while(yy <= y+rects[tabIndex].height) { - for (int i=0; i < xCropLen.length; i+=2) { + for (int i = 0; i < xCropLen.length; i += 2) { g.drawLine(x-xCropLen[i],yy+yCropLen[i], x-xCropLen[i+1],yy+yCropLen[i+1]-1); } @@ -1549,7 +1549,7 @@ public class BasicTabbedPaneUI extends TabbedPaneUI implements SwingConstants { protected void paintContentBorderTopEdge(Graphics g, int tabPlacement, int selectedIndex, int x, int y, int w, int h) { - Rectangle selRect = selectedIndex < 0? null : + Rectangle selRect = selectedIndex < 0 ? null : getTabBounds(selectedIndex, calcRect); g.setColor(lightHighlight); @@ -1588,7 +1588,7 @@ public class BasicTabbedPaneUI extends TabbedPaneUI implements SwingConstants { protected void paintContentBorderLeftEdge(Graphics g, int tabPlacement, int selectedIndex, int x, int y, int w, int h) { - Rectangle selRect = selectedIndex < 0? null : + Rectangle selRect = selectedIndex < 0 ? null : getTabBounds(selectedIndex, calcRect); g.setColor(lightHighlight); @@ -1624,7 +1624,7 @@ public class BasicTabbedPaneUI extends TabbedPaneUI implements SwingConstants { protected void paintContentBorderBottomEdge(Graphics g, int tabPlacement, int selectedIndex, int x, int y, int w, int h) { - Rectangle selRect = selectedIndex < 0? null : + Rectangle selRect = selectedIndex < 0 ? null : getTabBounds(selectedIndex, calcRect); g.setColor(shadow); @@ -1667,7 +1667,7 @@ public class BasicTabbedPaneUI extends TabbedPaneUI implements SwingConstants { protected void paintContentBorderRightEdge(Graphics g, int tabPlacement, int selectedIndex, int x, int y, int w, int h) { - Rectangle selRect = selectedIndex < 0? null : + Rectangle selRect = selectedIndex < 0 ? null : getTabBounds(selectedIndex, calcRect); g.setColor(shadow); @@ -4090,7 +4090,7 @@ public class BasicTabbedPaneUI extends TabbedPaneUI implements SwingConstants { setHtmlView(v, inserted, index); } } else { // Not HTML - if (htmlViews!=null) { // Add placeholder + if (htmlViews != null) { // Add placeholder setHtmlView(null, inserted, index); } // else nada! } @@ -4336,8 +4336,8 @@ public class BasicTabbedPaneUI extends TabbedPaneUI implements SwingConstants { private Vector createHTMLVector() { Vector htmlViews = new Vector(); int count = tabPane.getTabCount(); - if (count>0) { - for (int i=0 ; i 0) { + for (int i = 0 ; i < count; i++) { String title = tabPane.getTitleAt(i); if (BasicHTML.isHTMLString(title)) { htmlViews.addElement(BasicHTML.createHTMLView(tabPane, title)); diff --git a/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicToolBarUI.java b/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicToolBarUI.java index 41ab6137976..9c07b6a03d1 100644 --- a/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicToolBarUI.java +++ b/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicToolBarUI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -980,7 +980,7 @@ public class BasicToolBarUI extends ToolBarUI implements SwingConstants { toolBar.setOrientation( orientation ); - if (dragWindow !=null) + if (dragWindow != null) dragWindow.setOrientation(orientation); } @@ -1616,7 +1616,7 @@ public class BasicToolBarUI extends ToolBarUI implements SwingConstants this.orientation = o; Dimension size = getSize(); setSize(new Dimension(size.height, size.width)); - if (offset!=null) { + if (offset != null) { if( BasicGraphicsUtils.isLeftToRight(toolBar) ) { setOffset(new Point(offset.y, offset.x)); } else if( o == JToolBar.HORIZONTAL ) { diff --git a/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicTreeUI.java b/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicTreeUI.java index 28beaee6929..19a25005be9 100644 --- a/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicTreeUI.java +++ b/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicTreeUI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -943,7 +943,7 @@ public class BasicTreeUI extends TreeUI lineTypeDashed = UIManager.getBoolean("Tree.lineTypeDashed"); Long l = (Long)UIManager.get("Tree.timeFactor"); - timeFactor = (l!=null) ? l.longValue() : 1000L; + timeFactor = (l != null) ? l.longValue() : 1000L; Object showsRootHandles = UIManager.get("Tree.showsRootHandles"); if (showsRootHandles != null) { diff --git a/src/java.desktop/share/classes/javax/swing/plaf/basic/DragRecognitionSupport.java b/src/java.desktop/share/classes/javax/swing/plaf/basic/DragRecognitionSupport.java index abdd460e44b..517dad35d45 100644 --- a/src/java.desktop/share/classes/javax/swing/plaf/basic/DragRecognitionSupport.java +++ b/src/java.desktop/share/classes/javax/swing/plaf/basic/DragRecognitionSupport.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2008, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,14 +29,12 @@ import java.awt.event.*; import java.awt.dnd.DragSource; import javax.swing.*; import sun.awt.dnd.SunDragSourceContextPeer; -import sun.awt.AppContext; /** * Drag gesture recognition support for classes that have a * TransferHandler. The gesture for a drag in this class is a mouse * press followed by movement by DragSource.getDragThreshold() - * pixels. An instance of this class is maintained per AppContext, and the - * public static methods call into the appropriate instance. + * pixels. * * @author Shannon Hickey */ @@ -53,19 +51,14 @@ class DragRecognitionSupport { public void dragStarting(MouseEvent me); } + private static DragRecognitionSupport support; /** - * Returns the DragRecognitionSupport for the caller's AppContext. + * Returns the DragRecognitionSupport instance. */ - private static DragRecognitionSupport getDragRecognitionSupport() { - DragRecognitionSupport support = - (DragRecognitionSupport)AppContext.getAppContext(). - get(DragRecognitionSupport.class); - + private static synchronized DragRecognitionSupport getDragRecognitionSupport() { if (support == null) { support = new DragRecognitionSupport(); - AppContext.getAppContext().put(DragRecognitionSupport.class, support); } - return support; } diff --git a/src/java.desktop/share/classes/javax/swing/plaf/metal/MetalLookAndFeel.java b/src/java.desktop/share/classes/javax/swing/plaf/metal/MetalLookAndFeel.java index 7c56c681423..461c597cc5f 100644 --- a/src/java.desktop/share/classes/javax/swing/plaf/metal/MetalLookAndFeel.java +++ b/src/java.desktop/share/classes/javax/swing/plaf/metal/MetalLookAndFeel.java @@ -706,6 +706,11 @@ public class MetalLookAndFeel extends BasicLookAndFeel // DEFAULTS TABLE // + Object commonInputMap = new UIDefaults.LazyInputMap(new Object[] { + "SPACE", "pressed", + "released SPACE", "released" + }); + Object[] defaults = { // *** Auditory Feedback "AuditoryCues.defaultCueList", defaultCueList, @@ -791,6 +796,8 @@ public class MetalLookAndFeel extends BasicLookAndFeel }), + + // Buttons "Button.defaultButtonFollowsFocus", Boolean.FALSE, "Button.disabledText", inactiveControlTextColor, @@ -798,10 +805,8 @@ public class MetalLookAndFeel extends BasicLookAndFeel "Button.border", buttonBorder, "Button.font", controlTextValue, "Button.focus", focusColor, - "Button.focusInputMap", new UIDefaults.LazyInputMap(new Object[] { - "SPACE", "pressed", - "released SPACE", "released" - }), + "Button.focusInputMap", commonInputMap, + // Button default margin is (2, 14, 2, 14), defined in // BasicLookAndFeel via "Button.margin" UI property. @@ -810,11 +815,8 @@ public class MetalLookAndFeel extends BasicLookAndFeel "CheckBox.font", controlTextValue, "CheckBox.focus", focusColor, "CheckBox.icon",(LazyValue) t -> MetalIconFactory.getCheckBoxIcon(), - "CheckBox.focusInputMap", - new UIDefaults.LazyInputMap(new Object[] { - "SPACE", "pressed", - "released SPACE", "released" - }), + "CheckBox.focusInputMap", commonInputMap, + // margin is 2 all the way around, BasicBorders.RadioButtonBorder // (checkbox uses RadioButtonBorder) is 2 all the way around too. "CheckBox.totalInsets", new Insets(4, 4, 4, 4), @@ -824,11 +826,7 @@ public class MetalLookAndFeel extends BasicLookAndFeel "RadioButton.icon",(LazyValue) t -> MetalIconFactory.getRadioButtonIcon(), "RadioButton.font", controlTextValue, "RadioButton.focus", focusColor, - "RadioButton.focusInputMap", - new UIDefaults.LazyInputMap(new Object[] { - "SPACE", "pressed", - "released SPACE", "released" - }), + "RadioButton.focusInputMap", commonInputMap, // margin is 2 all the way around, BasicBorders.RadioButtonBorder // is 2 all the way around too. "RadioButton.totalInsets", new Insets(4, 4, 4, 4), @@ -838,11 +836,7 @@ public class MetalLookAndFeel extends BasicLookAndFeel "ToggleButton.focus", focusColor, "ToggleButton.border", toggleButtonBorder, "ToggleButton.font", controlTextValue, - "ToggleButton.focusInputMap", - new UIDefaults.LazyInputMap(new Object[] { - "SPACE", "pressed", - "released SPACE", "released" - }), + "ToggleButton.focusInputMap", commonInputMap, // File View diff --git a/src/java.desktop/share/classes/javax/swing/plaf/nimbus/AbstractRegionPainter.java b/src/java.desktop/share/classes/javax/swing/plaf/nimbus/AbstractRegionPainter.java index 1bdff19fa2d..d06406d69d6 100644 --- a/src/java.desktop/share/classes/javax/swing/plaf/nimbus/AbstractRegionPainter.java +++ b/src/java.desktop/share/classes/javax/swing/plaf/nimbus/AbstractRegionPainter.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -660,7 +660,7 @@ public abstract class AbstractRegionPainter implements Painter { ImageScalingHelper.paint(g, 0, 0, w, h, img, insets, dstInsets, ImageScalingHelper.PaintType.PAINT9_STRETCH, ImageScalingHelper.PAINT_ALL); g.setRenderingHint(RenderingHints.KEY_INTERPOLATION, - oldScalingHints!=null?oldScalingHints:RenderingHints.VALUE_INTERPOLATION_NEAREST_NEIGHBOR); + oldScalingHints != null ? oldScalingHints:RenderingHints.VALUE_INTERPOLATION_NEAREST_NEIGHBOR); } else { // render directly paint0(g, c, w, h, extendedCacheKeys); diff --git a/src/java.desktop/share/classes/javax/swing/plaf/nimbus/NimbusLookAndFeel.java b/src/java.desktop/share/classes/javax/swing/plaf/nimbus/NimbusLookAndFeel.java index c9340a62368..7ef7beb5d1c 100644 --- a/src/java.desktop/share/classes/javax/swing/plaf/nimbus/NimbusLookAndFeel.java +++ b/src/java.desktop/share/classes/javax/swing/plaf/nimbus/NimbusLookAndFeel.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -531,7 +531,7 @@ public class NimbusLookAndFeel extends SynthLookAndFeel { public Object createValue(UIDefaults table) { Object obj = null; // check specified state - if (state!=null){ + if (state != null){ obj = uiDefaults.get(prefix+"["+state+"]."+suffix); } // check enabled state diff --git a/src/java.desktop/share/classes/javax/swing/plaf/nimbus/SynthPainterImpl.java b/src/java.desktop/share/classes/javax/swing/plaf/nimbus/SynthPainterImpl.java index ca19a74b6ac..fd761ac4730 100644 --- a/src/java.desktop/share/classes/javax/swing/plaf/nimbus/SynthPainterImpl.java +++ b/src/java.desktop/share/classes/javax/swing/plaf/nimbus/SynthPainterImpl.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -63,13 +63,13 @@ class SynthPainterImpl extends SynthPainter { if (p != null) { if (g instanceof Graphics2D){ Graphics2D gfx = (Graphics2D)g; - if (transform!=null){ + if (transform != null) { gfx.transform(transform); } gfx.translate(x, y); p.paint(gfx, ctx.getComponent(), w, h); gfx.translate(-x, -y); - if (transform!=null){ + if (transform != null){ try { gfx.transform(transform.createInverse()); } catch (NoninvertibleTransformException e) { @@ -85,7 +85,7 @@ class SynthPainterImpl extends SynthPainter { BufferedImage img = new BufferedImage(w,h, BufferedImage.TYPE_INT_ARGB); Graphics2D gfx = img.createGraphics(); - if (transform!=null){ + if (transform != null){ gfx.transform(transform); } p.paint(gfx, ctx.getComponent(), w, h); diff --git a/src/java.desktop/share/classes/javax/swing/plaf/synth/SynthComboBoxUI.java b/src/java.desktop/share/classes/javax/swing/plaf/synth/SynthComboBoxUI.java index 77ca2a1fc05..0c373483153 100644 --- a/src/java.desktop/share/classes/javax/swing/plaf/synth/SynthComboBoxUI.java +++ b/src/java.desktop/share/classes/javax/swing/plaf/synth/SynthComboBoxUI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -814,7 +814,7 @@ public class SynthComboBoxUI extends BasicComboBoxUI implements public void propertyChange(PropertyChangeEvent evt) { ComboBoxEditor newEditor = comboBox.getEditor(); if (editor != newEditor){ - if (editorComponent!=null){ + if (editorComponent != null) { editorComponent.removeFocusListener(this); } editor = newEditor; diff --git a/src/java.desktop/share/classes/javax/swing/plaf/synth/SynthScrollPaneUI.java b/src/java.desktop/share/classes/javax/swing/plaf/synth/SynthScrollPaneUI.java index 96541d51f67..c07feb8b56d 100644 --- a/src/java.desktop/share/classes/javax/swing/plaf/synth/SynthScrollPaneUI.java +++ b/src/java.desktop/share/classes/javax/swing/plaf/synth/SynthScrollPaneUI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -225,7 +225,7 @@ public class SynthScrollPaneUI extends BasicScrollPaneUI private int getComponentState(JComponent c) { int baseState = SynthLookAndFeel.getComponentState(c); - if (viewportViewFocusHandler!=null && viewportViewHasFocus){ + if (viewportViewFocusHandler != null && viewportViewHasFocus) { baseState = baseState | FOCUSED; } return baseState; diff --git a/src/java.desktop/share/classes/javax/swing/text/JTextComponent.java b/src/java.desktop/share/classes/javax/swing/text/JTextComponent.java index 59cee1e12ee..f81ba9d66c2 100644 --- a/src/java.desktop/share/classes/javax/swing/text/JTextComponent.java +++ b/src/java.desktop/share/classes/javax/swing/text/JTextComponent.java @@ -1181,7 +1181,7 @@ public abstract class JTextComponent extends JComponent implements Scrollable, A Hashtable h = new Hashtable(); for (Action a : actions) { String value = (String)a.getValue(Action.NAME); - h.put((value!=null ? value:""), a); + h.put((value != null ? value : ""), a); } for (KeyBinding binding : bindings) { Action a = h.get(binding.actionName); diff --git a/src/java.desktop/share/classes/javax/swing/text/TextAction.java b/src/java.desktop/share/classes/javax/swing/text/TextAction.java index 5c11c994ab2..00da7bad93e 100644 --- a/src/java.desktop/share/classes/javax/swing/text/TextAction.java +++ b/src/java.desktop/share/classes/javax/swing/text/TextAction.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -107,11 +107,11 @@ public abstract class TextAction extends AbstractAction { Hashtable h = new Hashtable(); for (Action a : list1) { String value = (String)a.getValue(Action.NAME); - h.put((value!=null ? value:""), a); + h.put((value != null ? value : ""), a); } for (Action a : list2) { String value = (String)a.getValue(Action.NAME); - h.put((value!=null ? value:""), a); + h.put((value != null ? value : ""), a); } Action[] actions = new Action[h.size()]; int index = 0; diff --git a/src/java.desktop/share/classes/javax/swing/tree/DefaultTreeCellEditor.java b/src/java.desktop/share/classes/javax/swing/tree/DefaultTreeCellEditor.java index 4662c95c01f..7595638dc66 100644 --- a/src/java.desktop/share/classes/javax/swing/tree/DefaultTreeCellEditor.java +++ b/src/java.desktop/share/classes/javax/swing/tree/DefaultTreeCellEditor.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -259,7 +259,7 @@ public class DefaultTreeCellEditor implements ActionListener, TreeCellEditor, ((MouseEvent)event).getY()); editable = (lastPath != null && path != null && lastPath.equals(path)); - if (path!=null) { + if (path != null) { lastRow = tree.getRowForPath(path); Object value = path.getLastPathComponent(); boolean isSelected = tree.isRowSelected(lastRow); diff --git a/src/java.desktop/share/classes/module-info.java b/src/java.desktop/share/classes/module-info.java index 6d34d934194..57392f71321 100644 --- a/src/java.desktop/share/classes/module-info.java +++ b/src/java.desktop/share/classes/module-info.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -115,7 +115,6 @@ module java.desktop { // qualified exports may be inserted at build time // see make/GensrcModuleInfo.gmk exports sun.awt to - jdk.accessibility, jdk.unsupported.desktop; exports java.awt.dnd.peer to jdk.unsupported.desktop; diff --git a/src/java.desktop/share/classes/sun/awt/AWTAccessor.java b/src/java.desktop/share/classes/sun/awt/AWTAccessor.java index c74219f7efa..143d7e6198c 100644 --- a/src/java.desktop/share/classes/sun/awt/AWTAccessor.java +++ b/src/java.desktop/share/classes/sun/awt/AWTAccessor.java @@ -361,13 +361,6 @@ public final class AWTAccessor { * Accessor for InputEvent.getButtonDownMasks() */ int[] getButtonDownMasks(); - - /* - * Accessor for InputEvent.canAccessSystemClipboard field - */ - boolean canAccessSystemClipboard(InputEvent event); - void setCanAccessSystemClipboard(InputEvent event, - boolean canAccessSystemClipboard); } /** @@ -762,8 +755,6 @@ public final class AWTAccessor { * An accessor object for the AccessibleContext class */ public interface AccessibleContextAccessor { - void setAppContext(AccessibleContext accessibleContext, AppContext appContext); - AppContext getAppContext(AccessibleContext accessibleContext); Object getNativeAXResource(AccessibleContext accessibleContext); void setNativeAXResource(AccessibleContext accessibleContext, Object value); } diff --git a/src/java.desktop/share/classes/sun/awt/EmbeddedFrame.java b/src/java.desktop/share/classes/sun/awt/EmbeddedFrame.java index ab2ad5dfbf0..fa8ed1e707c 100644 --- a/src/java.desktop/share/classes/sun/awt/EmbeddedFrame.java +++ b/src/java.desktop/share/classes/sun/awt/EmbeddedFrame.java @@ -164,11 +164,8 @@ public abstract class EmbeddedFrame extends Frame } /** - * Because there may be many AppContexts, and we can't be sure where this - * EmbeddedFrame is first created or shown, we can't automatically determine - * the correct KeyboardFocusManager to attach to as KeyEventDispatcher. * Those who want to use the functionality of traversing out of the EmbeddedFrame - * must call this method on the AppContext. After that, all the changes + * must call this method. After that, all the changes * can be handled automatically, including possible replacement of * KeyboardFocusManager. */ @@ -184,7 +181,7 @@ public abstract class EmbeddedFrame extends Frame /** * Needed to avoid memory leak: we register this EmbeddedFrame as a listener with - * KeyboardFocusManager of an AppContext. We don't want the KFM to keep + * the KeyboardFocusManager. We don't want the KFM to keep * reference to our EmbeddedFrame forever if the Frame is no longer in use, so we * add listeners in show() and remove them in hide(). */ @@ -198,7 +195,7 @@ public abstract class EmbeddedFrame extends Frame /** * Needed to avoid memory leak: we register this EmbeddedFrame as a listener with - * KeyboardFocusManager of an AppContext. We don't want the KFM to keep + * the KeyboardFocusManager. We don't want the KFM to keep * reference to our EmbeddedFrame forever if the Frame is no longer in use, so we * add listeners in show() and remove them in hide(). */ diff --git a/src/java.desktop/share/classes/sun/awt/GlobalCursorManager.java b/src/java.desktop/share/classes/sun/awt/GlobalCursorManager.java index 27893d0ce87..8638bf81921 100644 --- a/src/java.desktop/share/classes/sun/awt/GlobalCursorManager.java +++ b/src/java.desktop/share/classes/sun/awt/GlobalCursorManager.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,7 +61,7 @@ public abstract class GlobalCursorManager { } } if (shouldPost) { - SunToolkit.postEvent(SunToolkit.targetToAppContext(heavy), in); + SunToolkit.postEvent(in); } } } diff --git a/src/java.desktop/share/classes/sun/awt/KeyboardFocusManagerPeerImpl.java b/src/java.desktop/share/classes/sun/awt/KeyboardFocusManagerPeerImpl.java index ef50d883ee5..909c0b58136 100644 --- a/src/java.desktop/share/classes/sun/awt/KeyboardFocusManagerPeerImpl.java +++ b/src/java.desktop/share/classes/sun/awt/KeyboardFocusManagerPeerImpl.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -127,7 +127,7 @@ public abstract class KeyboardFocusManagerPeerImpl implements KeyboardFocusManag if (focusLog.isLoggable(PlatformLogger.Level.FINER)) { focusLog.finer("Posting focus event: " + fl); } - SunToolkit.postEvent(SunToolkit.targetToAppContext(currentOwner), fl); + SunToolkit.postEvent(fl); } FocusEvent fg = new FocusEvent(lightweightChild, FocusEvent.FOCUS_GAINED, @@ -136,7 +136,7 @@ public abstract class KeyboardFocusManagerPeerImpl implements KeyboardFocusManag if (focusLog.isLoggable(PlatformLogger.Level.FINER)) { focusLog.finer("Posting focus event: " + fg); } - SunToolkit.postEvent(SunToolkit.targetToAppContext(lightweightChild), fg); + SunToolkit.postEvent(fg); return true; } diff --git a/src/java.desktop/share/classes/sun/awt/PaintEventDispatcher.java b/src/java.desktop/share/classes/sun/awt/PaintEventDispatcher.java index eec1fc93c66..831c67e1e4f 100644 --- a/src/java.desktop/share/classes/sun/awt/PaintEventDispatcher.java +++ b/src/java.desktop/share/classes/sun/awt/PaintEventDispatcher.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -93,7 +93,7 @@ public class PaintEventDispatcher { * This method is invoked from the toolkit thread when the surface * data of the component needs to be replaced. The method run() of * the Runnable argument performs surface data replacing, run() - * should be invoked on the EDT of this component's AppContext. + * should be invoked on the EDT. * Returns true if the Runnable has been enqueued to be invoked * on the EDT. * (Fix 6255371.) diff --git a/src/java.desktop/share/classes/sun/awt/datatransfer/DesktopDatatransferServiceImpl.java b/src/java.desktop/share/classes/sun/awt/datatransfer/DesktopDatatransferServiceImpl.java index e96ca888ee3..4234dbad59a 100644 --- a/src/java.desktop/share/classes/sun/awt/datatransfer/DesktopDatatransferServiceImpl.java +++ b/src/java.desktop/share/classes/sun/awt/datatransfer/DesktopDatatransferServiceImpl.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,6 @@ package sun.awt.datatransfer; -import sun.awt.AppContext; import sun.datatransfer.DesktopDatatransferService; import java.awt.EventQueue; @@ -43,8 +42,6 @@ import java.util.function.Supplier; */ public class DesktopDatatransferServiceImpl implements DesktopDatatransferService { - private static final Object FLAVOR_MAP_KEY = new Object(); - @Override public void invokeOnEventThread(Runnable r) { EventQueue.invokeLater(r); @@ -59,13 +56,11 @@ public class DesktopDatatransferServiceImpl implements DesktopDatatransferServic return null; } + private FlavorMap fm; @Override - public FlavorMap getFlavorMap(Supplier supplier) { - AppContext context = AppContext.getAppContext(); - FlavorMap fm = (FlavorMap) context.get(FLAVOR_MAP_KEY); + public synchronized FlavorMap getFlavorMap(Supplier supplier) { if (fm == null) { fm = supplier.get(); - context.put(FLAVOR_MAP_KEY, fm); } return fm; } diff --git a/src/java.desktop/share/classes/sun/awt/datatransfer/SunClipboard.java b/src/java.desktop/share/classes/sun/awt/datatransfer/SunClipboard.java index bc8071a798b..4ccee481a9e 100644 --- a/src/java.desktop/share/classes/sun/awt/datatransfer/SunClipboard.java +++ b/src/java.desktop/share/classes/sun/awt/datatransfer/SunClipboard.java @@ -226,9 +226,6 @@ public abstract class SunClipboard extends Clipboard { * argument is not {@code null} and is not equal to the current * contents context. * - * @param disposedContext the AppContext that is disposed or - * {@code null} if the ownership is lost because another - * application acquired ownership. */ protected void lostOwnershipLater() { SunToolkit.postEvent(new PeerEvent(this, () -> lostOwnershipNow(), diff --git a/src/java.desktop/share/classes/sun/awt/dnd/SunDragSourceContextPeer.java b/src/java.desktop/share/classes/sun/awt/dnd/SunDragSourceContextPeer.java index 258cebd6616..4aad70961bc 100644 --- a/src/java.desktop/share/classes/sun/awt/dnd/SunDragSourceContextPeer.java +++ b/src/java.desktop/share/classes/sun/awt/dnd/SunDragSourceContextPeer.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -264,8 +264,7 @@ public abstract class SunDragSourceContextPeer implements DragSourceContextPeer modifiers, x, y); EventDispatcher dispatcher = new EventDispatcher(dispatchType, event); - SunToolkit.invokeLaterOnAppContext( - SunToolkit.targetToAppContext(getComponent()), dispatcher); + SunToolkit.invokeLater(dispatcher); startSecondaryEventLoop(); } @@ -310,8 +309,7 @@ public abstract class SunDragSourceContextPeer implements DragSourceContextPeer EventDispatcher dispatcher = new EventDispatcher(DISPATCH_EXIT, event); - SunToolkit.invokeLaterOnAppContext( - SunToolkit.targetToAppContext(getComponent()), dispatcher); + SunToolkit.invokeLater(dispatcher); startSecondaryEventLoop(); } @@ -341,8 +339,7 @@ public abstract class SunDragSourceContextPeer implements DragSourceContextPeer EventDispatcher dispatcher = new EventDispatcher(DISPATCH_FINISH, event); - SunToolkit.invokeLaterOnAppContext( - SunToolkit.targetToAppContext(getComponent()), dispatcher); + SunToolkit.invokeLater(dispatcher); startSecondaryEventLoop(); setNativeContext(0); diff --git a/src/java.desktop/share/classes/sun/awt/dnd/SunDropTargetContextPeer.java b/src/java.desktop/share/classes/sun/awt/dnd/SunDropTargetContextPeer.java index 686c3166441..70d1bda9cd0 100644 --- a/src/java.desktop/share/classes/sun/awt/dnd/SunDropTargetContextPeer.java +++ b/src/java.desktop/share/classes/sun/awt/dnd/SunDropTargetContextPeer.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -55,7 +55,6 @@ import sun.util.logging.PlatformLogger; import java.io.IOException; import java.io.InputStream; -import sun.awt.AppContext; import sun.awt.SunToolkit; import sun.awt.datatransfer.DataTransferer; import sun.awt.datatransfer.ToolkitThreadBlockedHandler; @@ -558,7 +557,6 @@ public abstract class SunDropTargetContextPeer implements DropTargetContextPeer, final long nativeCtxt, final int eventID, final boolean dispatchType) { - AppContext appContext = SunToolkit.targetToAppContext(component); EventDispatcher dispatcher = new EventDispatcher(this, dropAction, actions, formats, nativeCtxt, @@ -572,7 +570,7 @@ public abstract class SunDropTargetContextPeer implements DropTargetContextPeer, } // schedule callback - SunToolkit.postEvent(appContext, event); + SunToolkit.postEvent(event); eventPosted(event); diff --git a/src/java.desktop/share/classes/sun/awt/im/ExecutableInputMethodManager.java b/src/java.desktop/share/classes/sun/awt/im/ExecutableInputMethodManager.java index d8246a4947c..f67f560e004 100644 --- a/src/java.desktop/share/classes/sun/awt/im/ExecutableInputMethodManager.java +++ b/src/java.desktop/share/classes/sun/awt/im/ExecutableInputMethodManager.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,7 +35,6 @@ import java.awt.PopupMenu; import java.awt.Menu; import java.awt.MenuItem; import java.awt.Toolkit; -import sun.awt.AppContext; import java.awt.event.ActionEvent; import java.awt.event.ActionListener; import java.awt.event.InvocationEvent; @@ -177,9 +176,8 @@ class ExecutableInputMethodManager extends InputMethodManager lock, true); - AppContext requesterAppContext = SunToolkit.targetToAppContext(requester); synchronized (lock) { - SunToolkit.postEvent(requesterAppContext, event); + SunToolkit.postEvent(event); while (!event.isDispatched()) { lock.wait(); } diff --git a/src/java.desktop/share/classes/sun/awt/util/PerformanceLogger.java b/src/java.desktop/share/classes/sun/awt/util/PerformanceLogger.java index 4a1dde6538d..cf0ff1d9958 100644 --- a/src/java.desktop/share/classes/sun/awt/util/PerformanceLogger.java +++ b/src/java.desktop/share/classes/sun/awt/util/PerformanceLogger.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -43,11 +43,6 @@ import java.io.Writer; * for setting and getting all times and for doing whatever * analysis is interesting; this class is merely a central container * for those timing values. - * Note that, due to the variables in this class being static, - * use of particular time values by multiple AppContexts will cause - * confusing results. For example, if two contexts run - * simultaneously, the initTime for those will collide - * and the results may be undefined. *

    * To automatically track startup performance in an app * use the command-line parameter sun.perflog as follows:
    diff --git a/src/java.desktop/share/classes/sun/font/ExtendedTextSourceLabel.java b/src/java.desktop/share/classes/sun/font/ExtendedTextSourceLabel.java index f3569941321..78ae70629b6 100644 --- a/src/java.desktop/share/classes/sun/font/ExtendedTextSourceLabel.java +++ b/src/java.desktop/share/classes/sun/font/ExtendedTextSourceLabel.java @@ -57,15 +57,16 @@ import java.util.Map; * Align bounds is a rect that defines how to align this to margins. * it generally allows some overhang that logical bounds would prevent. */ -class ExtendedTextSourceLabel implements TextLineComponent, Decoration.Label { +final class ExtendedTextSourceLabel implements TextLineComponent, Decoration.Label { private final TextSource source; private final Decoration decorator; // caches - private Font font; - private AffineTransform baseTX; - private CoreMetrics cm; + private final Font font; + private final AffineTransform baseTX; + private final CoreMetrics cm; + private final float advTracking; private Rectangle2D lb; private Rectangle2D ab; @@ -74,34 +75,18 @@ class ExtendedTextSourceLabel implements TextLineComponent, Decoration.Label { private StandardGlyphVector gv; private float[] charinfo; - private float advTracking; - /** * Create from a TextSource. */ public ExtendedTextSourceLabel(TextSource source, Decoration decorator) { this.source = source; this.decorator = decorator; - finishInit(); - } - - /** - * Create from a TextSource, optionally using cached data from oldLabel starting at the offset. - * If present oldLabel must have been created from a run of text that includes the text used in - * the new label. Start in source corresponds to logical character offset in oldLabel. - */ - public ExtendedTextSourceLabel(TextSource source, ExtendedTextSourceLabel oldLabel, int offset) { - // currently no optimization. - this.source = source; - this.decorator = oldLabel.decorator; - finishInit(); - } - - private void finishInit() { - font = source.getFont(); + Font font = source.getFont(); Map atts = font.getAttributes(); - baseTX = AttributeValues.getBaselineTransform(atts); + AffineTransform baseTX = AttributeValues.getBaselineTransform(atts); + + CoreMetrics cm; if (baseTX == null){ cm = source.getCoreMetrics(); } else { @@ -110,13 +95,15 @@ class ExtendedTextSourceLabel implements TextLineComponent, Decoration.Label { charTX = new AffineTransform(); } font = font.deriveFont(charTX); - LineMetrics lm = font.getLineMetrics(source.getChars(), source.getStart(), source.getStart() + source.getLength(), source.getFRC()); cm = CoreMetrics.get(lm); } - advTracking = font.getSize() * AttributeValues.getTracking(atts); + this.font = font; + this.baseTX = baseTX; + this.cm = cm; + this.advTracking = font.getSize() * AttributeValues.getTracking(atts); } /** diff --git a/src/java.desktop/share/classes/sun/font/GlyphLayout.java b/src/java.desktop/share/classes/sun/font/GlyphLayout.java index 5bff127f143..851201fe347 100644 --- a/src/java.desktop/share/classes/sun/font/GlyphLayout.java +++ b/src/java.desktop/share/classes/sun/font/GlyphLayout.java @@ -125,10 +125,10 @@ public final class GlyphLayout { } private static final class SDCache { - public AffineTransform dtx; - public AffineTransform gtx; - public Point2D.Float delta; - public FontStrikeDesc sd; + private final AffineTransform dtx; + private final AffineTransform gtx; + private final Point2D.Float delta; + private final FontStrikeDesc sd; private SDCache(Font font, FontRenderContext frc) { // !!! add getVectorTransform and hasVectorTransform to frc? then diff --git a/src/java.desktop/share/classes/sun/font/HBShaper.java b/src/java.desktop/share/classes/sun/font/HBShaper.java index 7d3f58fb88f..dea8a9e22dd 100644 --- a/src/java.desktop/share/classes/sun/font/HBShaper.java +++ b/src/java.desktop/share/classes/sun/font/HBShaper.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -138,6 +138,7 @@ public class HBShaper { private static final MemorySegment get_h_advance_stub; private static final MemorySegment get_v_advance_stub; private static final MemorySegment get_contour_pt_stub; + private static final MemorySegment get_table_data_fn_stub; private static final MemorySegment store_layout_results_stub; @@ -209,45 +210,36 @@ public class HBShaper { jdk_hb_shape_handle = tmp4; Arena garena = Arena.global(); // creating stubs that exist until VM exit. - FunctionDescriptor get_var_glyph_fd = getFunctionDescriptor(JAVA_INT, // return type - ADDRESS, ADDRESS, JAVA_INT, JAVA_INT, ADDRESS, ADDRESS); // arg types - MethodHandle get_var_glyph_mh = - getMethodHandle("get_variation_glyph", get_var_glyph_fd); - @SuppressWarnings("restricted") - MemorySegment tmp5 = LINKER.upcallStub(get_var_glyph_mh, get_var_glyph_fd, garena); - get_var_glyph_stub = tmp5; - FunctionDescriptor get_nominal_glyph_fd = getFunctionDescriptor(JAVA_INT, // return type - ADDRESS, ADDRESS, JAVA_INT, ADDRESS, ADDRESS); // arg types - MethodHandle get_nominal_glyph_mh = - getMethodHandle("get_nominal_glyph", get_nominal_glyph_fd); - @SuppressWarnings("restricted") - MemorySegment tmp6 = LINKER.upcallStub(get_nominal_glyph_mh, get_nominal_glyph_fd, garena); - get_nominal_glyph_stub = tmp6; + get_table_data_fn_stub = getUpcallStub(garena, + "getFontTableData", // method name + JAVA_INT, // return type + JAVA_INT, ADDRESS); // arg types - FunctionDescriptor get_h_adv_fd = getFunctionDescriptor(JAVA_INT, // return type - ADDRESS, ADDRESS, JAVA_INT, ADDRESS); // arg types - MethodHandle get_h_adv_mh = - getMethodHandle("get_glyph_h_advance", get_h_adv_fd); - @SuppressWarnings("restricted") - MemorySegment tmp7 = LINKER.upcallStub(get_h_adv_mh, get_h_adv_fd, garena); - get_h_advance_stub = tmp7; + get_var_glyph_stub = getUpcallStub(garena, + "get_variation_glyph", // method name + JAVA_INT, // return type + ADDRESS, ADDRESS, JAVA_INT, JAVA_INT, ADDRESS, ADDRESS); // arg types - FunctionDescriptor get_v_adv_fd = getFunctionDescriptor(JAVA_INT, // return type - ADDRESS, ADDRESS, JAVA_INT, ADDRESS); // arg types - MethodHandle get_v_adv_mh = - getMethodHandle("get_glyph_v_advance", get_v_adv_fd); - @SuppressWarnings("restricted") - MemorySegment tmp8 = LINKER.upcallStub(get_v_adv_mh, get_v_adv_fd, garena); - get_v_advance_stub = tmp8; + get_nominal_glyph_stub = getUpcallStub(garena, + "get_nominal_glyph", // method name + JAVA_INT, // return type + ADDRESS, ADDRESS, JAVA_INT, ADDRESS, ADDRESS); // arg types - FunctionDescriptor get_contour_pt_fd = getFunctionDescriptor(JAVA_INT, // return type - ADDRESS, ADDRESS, JAVA_INT, JAVA_INT, ADDRESS, ADDRESS, ADDRESS); // arg types - MethodHandle get_contour_pt_mh = - getMethodHandle("get_glyph_contour_point", get_contour_pt_fd); - @SuppressWarnings("restricted") - MemorySegment tmp9 = LINKER.upcallStub(get_contour_pt_mh, get_contour_pt_fd, garena); - get_contour_pt_stub = tmp9; + get_h_advance_stub = getUpcallStub(garena, + "get_glyph_h_advance", // method name + JAVA_INT, // return type + ADDRESS, ADDRESS, JAVA_INT, ADDRESS); // arg types + + get_v_advance_stub = getUpcallStub(garena, + "get_glyph_v_advance", // method name + JAVA_INT, // return type + ADDRESS, ADDRESS, JAVA_INT, ADDRESS); // arg types + + get_contour_pt_stub = getUpcallStub(garena, + "get_glyph_contour_point", // method name + JAVA_INT, // return type + ADDRESS, ADDRESS, JAVA_INT, JAVA_INT, ADDRESS, ADDRESS, ADDRESS); // arg types /* Having now created the font upcall stubs, we can call down to create * the native harfbuzz object holding these. @@ -303,15 +295,9 @@ public class HBShaper { clusterHandle = getVarHandle(GlyphInfoLayout, "cluster"); } - - /* - * This is expensive but it is done just once per font. - * The unbound stub could be cached but the savings would - * be very low in the only case it is used. - */ @SuppressWarnings("restricted") - private static MemorySegment getBoundUpcallStub - (Arena arena, Class clazz, Object bindArg, String mName, + private static MemorySegment getUpcallStub + (Arena arena, String mName, MemoryLayout retType, MemoryLayout... argTypes) { try { @@ -320,10 +306,8 @@ public class HBShaper { FunctionDescriptor.ofVoid(argTypes) : FunctionDescriptor.of(retType, argTypes); MethodType mType = nativeDescriptor.toMethodType(); - mType = mType.insertParameterTypes(0, clazz); MethodHandle mh = MH_LOOKUP.findStatic(HBShaper.class, mName, mType); - MethodHandle bound_handle = mh.bindTo(bindArg); - return LINKER.upcallStub(bound_handle, nativeDescriptor, arena); + return LINKER.upcallStub(mh, nativeDescriptor, arena); } catch (IllegalAccessException | NoSuchMethodException e) { return null; } @@ -480,15 +464,16 @@ public class HBShaper { }); } - private static int getFontTableData(Font2D font2D, - int tag, - MemorySegment data_ptr_out) { + private static int getFontTableData(int tag, MemorySegment data_ptr_out) { /* * On return, the data_out_ptr will point to memory allocated by native malloc, * so it will be freed by the caller using native free - when it is * done with it. */ + + Font2D font2D = scopedVars.get().font(); + @SuppressWarnings("restricted") MemorySegment data_ptr = data_ptr_out.reinterpret(ADDRESS.byteSize()); if (tag == 0) { @@ -539,10 +524,6 @@ public class HBShaper { private static class FaceRef implements DisposerRecord { private Font2D font2D; private MemorySegment face; - // get_table_data_fn uses an Arena managed by GC, - // so we need to keep a reference to it here until - // this FaceRef is collected. - private MemorySegment get_table_data_fn; private FaceRef(Font2D font) { this.font2D = font; @@ -561,16 +542,7 @@ public class HBShaper { private void createFace() { try { - get_table_data_fn = getBoundUpcallStub(Arena.ofAuto(), - Font2D.class, - font2D, // bind arg - "getFontTableData", // method name - JAVA_INT, // return type - JAVA_INT, ADDRESS); // arg types - if (get_table_data_fn == null) { - return; - } - face = (MemorySegment)create_face_handle.invokeExact(get_table_data_fn); + face = (MemorySegment)create_face_handle.invokeExact(get_table_data_fn_stub); } catch (Throwable t) { } } diff --git a/src/java.desktop/share/classes/sun/font/ScriptRunData.java b/src/java.desktop/share/classes/sun/font/ScriptRunData.java index 1f1f6b44d59..4e13fb5bbd0 100644 --- a/src/java.desktop/share/classes/sun/font/ScriptRunData.java +++ b/src/java.desktop/share/classes/sun/font/ScriptRunData.java @@ -38,11 +38,12 @@ public final class ScriptRunData { private static final int CHAR_START = 0; private static final int CHAR_LIMIT = 0x110000; - private static int cache = 0; + private static volatile int cache = 0; public static int getScript(int cp) { + int lcache = cache; // optimize for runs of characters in the same script - if (cp >= data[cache] && cp < data[cache+2]) { - return data[cache+1]; + if (cp >= data[lcache] && cp < data[lcache+2]) { + return data[lcache+1]; } if ((cp >= CHAR_START) && (cp < CHAR_LIMIT)) { int probe = dataPower; diff --git a/src/java.desktop/share/classes/sun/font/SunFontManager.java b/src/java.desktop/share/classes/sun/font/SunFontManager.java index 85a948ef594..323f0d056e1 100644 --- a/src/java.desktop/share/classes/sun/font/SunFontManager.java +++ b/src/java.desktop/share/classes/sun/font/SunFontManager.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -2480,30 +2480,12 @@ public abstract class SunFontManager implements FontSupport, FontManagerForSGE { * SunGraphicsEnvironment it performs the same initialization as is * performed normally. There may be some duplication of effort, but * that code is already written to be able to perform properly if called - * to duplicate work. The main difference is that if we detect we are - * in an AppContext environment these new fonts - * are not placed in the "default" maps but into an AppContext instance. - * The font lookup mechanism in java.awt.Font.getFont2D() is also updated - * so that look-up for composite fonts will in that case always - * do a lookup rather than returning a cached result. - * This is inefficient but necessary else singleton java.awt.Font - * instances would not retrieve the correct Font2D for the appcontext. - * sun.font.FontManager.findFont2D is also updated to that it uses - * a name map cache specific to that appcontext. - * - * Getting an AppContext is expensive, so there is a global variable - * that records whether these methods have ever been called and can - * avoid the expense for almost all applications. Once the correct - * CompositeFont is associated with the Font, everything should work - * through existing mechanisms. - * A special case is that GraphicsEnvironment.getAllFonts() must - * return an AppContext specific list. + * to duplicate work. * * Calling the methods below is "heavyweight" but it is expected that * these methods will be called very rarely. * - * If _usingAlternateComposites is true, we are not in an "AppContext" - * environment and the (single) application has selected + * If _usingAlternateComposites is true, the application has selected * an alternate composite font behaviour. * * - Printing: The implementation delegates logical fonts to an AWT diff --git a/src/java.desktop/share/classes/sun/java2d/SunGraphics2D.java b/src/java.desktop/share/classes/sun/java2d/SunGraphics2D.java index 9815d657eee..d66cd3fe3d5 100644 --- a/src/java.desktop/share/classes/sun/java2d/SunGraphics2D.java +++ b/src/java.desktop/share/classes/sun/java2d/SunGraphics2D.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -250,11 +250,6 @@ public final class SunGraphics2D private FontInfo glyphVectorFontInfo; private FontRenderContext glyphVectorFRC; - private static final int slowTextTransformMask = - AffineTransform.TYPE_GENERAL_TRANSFORM - | AffineTransform.TYPE_MASK_ROTATION - | AffineTransform.TYPE_FLIP; - static { if (PerformanceLogger.loggingEnabled()) { PerformanceLogger.setTime("SunGraphics2D static initialization"); @@ -453,13 +448,13 @@ public final class SunGraphics2D * or whether that shape must be "kept" unmodified. */ Shape intersectShapes(Shape s1, Shape s2, boolean keep1, boolean keep2) { - if (s1 instanceof Rectangle && s2 instanceof Rectangle) { - return ((Rectangle) s1).intersection((Rectangle) s2); + if (s1 instanceof Rectangle r1 && s2 instanceof Rectangle r2) { + return r1.intersection(r2); } - if (s1 instanceof Rectangle2D) { - return intersectRectShape((Rectangle2D) s1, s2, keep1, keep2); - } else if (s2 instanceof Rectangle2D) { - return intersectRectShape((Rectangle2D) s2, s1, keep2, keep1); + if (s1 instanceof Rectangle2D r1) { + return intersectRectShape(r1, s2, keep1, keep2); + } else if (s2 instanceof Rectangle2D r2) { + return intersectRectShape(r2, s1, keep2, keep1); } return intersectByArea(s1, s2, keep1, keep2); } @@ -473,8 +468,7 @@ public final class SunGraphics2D */ Shape intersectRectShape(Rectangle2D r, Shape s, boolean keep1, boolean keep2) { - if (s instanceof Rectangle2D) { - Rectangle2D r2 = (Rectangle2D) s; + if (s instanceof Rectangle2D r2) { Rectangle2D outrect; if (!keep1) { outrect = r; @@ -596,15 +590,15 @@ public final class SunGraphics2D } float ptSize = font.getSize2D(); - int txFontType; AffineTransform devAt, textAt=null; if (font.isTransformed()) { textAt = font.getTransform(); textAt.scale(ptSize, ptSize); - txFontType = textAt.getType(); info.originX = (float)textAt.getTranslateX(); info.originY = (float)textAt.getTranslateY(); - textAt.translate(-info.originX, -info.originY); + textAt.setTransform(textAt.getScaleX(), textAt.getShearY(), + textAt.getShearX(), textAt.getScaleY(), + 0, 0); if (transformState >= TRANSFORM_TRANSLATESCALE) { transform.getMatrix(info.devTx = new double[4]); devAt = new AffineTransform(info.devTx); @@ -621,7 +615,6 @@ public final class SunGraphics2D } info.pixelHeight = (int)(Math.abs(scaley)+0.5); } else { - txFontType = AffineTransform.TYPE_IDENTITY; info.originX = info.originY = 0; if (transformState >= TRANSFORM_TRANSLATESCALE) { transform.getMatrix(info.devTx = new double[4]); @@ -783,18 +776,6 @@ public final class SunGraphics2D return info; } - public static boolean isRotated(double [] mtx) { - if ((mtx[0] == mtx[3]) && - (mtx[1] == 0.0) && - (mtx[2] == 0.0) && - (mtx[0] > 0.0)) - { - return false; - } - - return true; - } - public void setFont(Font font) { /* replacing the reference equality test font != this.font with * !font.equals(this.font) did not yield any measurable difference @@ -944,8 +925,7 @@ public final class SunGraphics2D } int newCompState; CompositeType newCompType; - if (comp instanceof AlphaComposite) { - AlphaComposite alphacomp = (AlphaComposite) comp; + if (comp instanceof AlphaComposite alphacomp) { newCompType = CompositeType.forAlphaComposite(alphacomp); if (newCompType == CompositeType.SrcOverNoEa) { if (paintState == PAINT_OPAQUECOLOR || @@ -1000,8 +980,8 @@ public final class SunGraphics2D * @see TexturePaint */ public void setPaint(Paint paint) { - if (paint instanceof Color) { - setColor((Color) paint); + if (paint instanceof Color c) { + setColor(c); return; } if (paint == null || this.paint == paint) { @@ -1162,8 +1142,8 @@ public final class SunGraphics2D } int saveStrokeState = strokeState; stroke = s; - if (s instanceof BasicStroke) { - validateBasicStroke((BasicStroke) s); + if (s instanceof BasicStroke bs) { + validateBasicStroke(bs); } else { strokeState = STROKE_CUSTOM; } @@ -1193,11 +1173,10 @@ public final class SunGraphics2D throw new IllegalArgumentException (hintValue+" is not compatible with "+hintKey); } - if (hintKey instanceof SunHints.Key) { + if (hintKey instanceof SunHints.Key sunKey) { boolean stateChanged; boolean textStateChanged = false; boolean recognized = true; - SunHints.Key sunKey = (SunHints.Key) hintKey; int newHint; if (sunKey == SunHints.KEY_TEXT_ANTIALIAS_LCD_CONTRAST) { newHint = ((Integer)hintValue).intValue(); @@ -1297,7 +1276,6 @@ public final class SunGraphics2D hints.put(hintKey, hintValue); } - /** * Returns the preferences for the rendering algorithms. * @param hintKey The category of hint to be set. The strings @@ -1310,10 +1288,10 @@ public final class SunGraphics2D if (hints != null) { return hints.get(hintKey); } - if (!(hintKey instanceof SunHints.Key)) { + if (!(hintKey instanceof SunHints.Key shk)) { return null; } - int keyindex = ((SunHints.Key)hintKey).getIndex(); + int keyindex = shk.getIndex(); switch (keyindex) { case SunHints.INTKEY_RENDERING: return SunHints.Value.get(SunHints.INTKEY_RENDERING, @@ -1822,8 +1800,8 @@ public final class SunGraphics2D public Rectangle getClipBounds(Rectangle r) { if (clipState != CLIP_DEVICE) { if (transformState <= TRANSFORM_INT_TRANSLATE) { - if (usrClip instanceof Rectangle) { - r.setBounds((Rectangle) usrClip); + if (usrClip instanceof Rectangle usrClipRect) { + r.setBounds(usrClipRect); } else { r.setFrame(usrClip.getBounds2D()); } @@ -1970,8 +1948,7 @@ public final class SunGraphics2D r.translate(tx, ty); return r; } - if (s instanceof Rectangle2D) { - Rectangle2D rect = (Rectangle2D) s; + if (s instanceof Rectangle2D rect) { return new Rectangle2D.Double(rect.getX() + tx, rect.getY() + ty, rect.getWidth(), @@ -1991,10 +1968,9 @@ public final class SunGraphics2D return null; } - if (clip instanceof Rectangle2D && + if (clip instanceof Rectangle2D rect && (tx.getType() & NON_RECTILINEAR_TRANSFORM_MASK) == 0) { - Rectangle2D rect = (Rectangle2D) clip; double[] matrix = new double[4]; matrix[0] = rect.getX(); matrix[1] = rect.getY(); @@ -2180,65 +2156,6 @@ public final class SunGraphics2D } } - /* - public void XcopyArea(int x, int y, int w, int h, int dx, int dy) { - Rectangle rect = new Rectangle(x, y, w, h); - rect = transformBounds(rect, transform); - Point2D point = new Point2D.Float(dx, dy); - Point2D root = new Point2D.Float(0, 0); - point = transform.transform(point, point); - root = transform.transform(root, root); - int fdx = (int)(point.getX()-root.getX()); - int fdy = (int)(point.getY()-root.getY()); - - Rectangle r = getCompBounds().intersection(rect.getBounds()); - - if (r.isEmpty()) { - return; - } - - // Begin Rasterizer for Clip Shape - boolean skipClip = true; - byte[] clipAlpha = null; - - if (clipState == CLIP_SHAPE) { - - int box[] = new int[4]; - - clipRegion.getBounds(box); - Rectangle devR = new Rectangle(box[0], box[1], - box[2] - box[0], - box[3] - box[1]); - if (!devR.isEmpty()) { - OutputManager mgr = getOutputManager(); - RegionIterator ri = clipRegion.getIterator(); - while (ri.nextYRange(box)) { - int spany = box[1]; - int spanh = box[3] - spany; - while (ri.nextXBand(box)) { - int spanx = box[0]; - int spanw = box[2] - spanx; - mgr.copyArea(this, null, - spanw, 0, - spanx, spany, - spanw, spanh, - fdx, fdy, - null); - } - } - } - return; - } - // End Rasterizer for Clip Shape - - getOutputManager().copyArea(this, null, - r.width, 0, - r.x, r.y, r.width, - r.height, fdx, fdy, - null); - } - */ - public void drawLine(int x1, int y1, int x2, int y2) { try { drawpipe.drawLine(this, x1, y1, x2, y2); @@ -2465,8 +2382,8 @@ public final class SunGraphics2D if (paintState <= PAINT_ALPHACOLOR) { validateColor(); } - if (composite instanceof XORComposite) { - Color c = ((XORComposite) composite).getXorColor(); + if (composite instanceof XORComposite xorComp) { + Color c = xorComp.getXorColor(); setComposite(new XORComposite(c, surfaceData)); } validatePipe(); @@ -2668,8 +2585,7 @@ public final class SunGraphics2D } // BufferedImage case: use a simple drawImage call - if (img instanceof BufferedImage) { - BufferedImage bufImg = (BufferedImage)img; + if (img instanceof BufferedImage bufImg) { drawImage(bufImg,xform,null); return; } @@ -2905,21 +2821,6 @@ public final class SunGraphics2D drawRenderedImage(rendering,reverseTransform); } - - - /* - * Transform the bounding box of the BufferedImage - */ - protected Rectangle transformBounds(Rectangle rect, - AffineTransform tx) { - if (tx.isIdentity()) { - return rect; - } - - Shape s = transformShape(tx, rect); - return s.getBounds(); - } - // text rendering methods public void drawString(String str, int x, int y) { if (str == null) { @@ -3130,13 +3031,12 @@ public final class SunGraphics2D invalidateTransform(); } return result; - } else if (img instanceof MultiResolutionImage) { + } else if (img instanceof MultiResolutionImage mrImage) { // get scaled destination image size int width = img.getWidth(observer); int height = img.getHeight(observer); - MultiResolutionImage mrImage = (MultiResolutionImage) img; Image resolutionVariant = getResolutionVariant(mrImage, width, height, dx1, dy1, dx2, dy2, sx1, sy1, sx2, sy2, @@ -3311,8 +3211,7 @@ public final class SunGraphics2D Image resolutionVariant = img.getResolutionVariant(destImageWidth, destImageHeight); - if (resolutionVariant instanceof ToolkitImage - && ((ToolkitImage) resolutionVariant).hasError()) { + if (resolutionVariant instanceof ToolkitImage tki && tki.hasError()) { return null; } diff --git a/src/java.desktop/share/classes/sun/java2d/loops/FontInfo.java b/src/java.desktop/share/classes/sun/java2d/loops/FontInfo.java index 09aa05b785e..10e10a4adde 100644 --- a/src/java.desktop/share/classes/sun/java2d/loops/FontInfo.java +++ b/src/java.desktop/share/classes/sun/java2d/loops/FontInfo.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +26,7 @@ package sun.java2d.loops; import java.awt.Font; +import java.util.Arrays; import sun.font.Font2D; import sun.font.FontStrike; @@ -41,7 +42,7 @@ import sun.font.FontStrike; * time. I am reluctant to add the overhead of that machinery here without * a proven benefit. */ -public class FontInfo implements Cloneable { +public final class FontInfo implements Cloneable { public Font font; public Font2D font2D; public FontStrike fontStrike; @@ -56,15 +57,7 @@ public class FontInfo implements Cloneable { /* lcdSubPixPos is used if FM is ON for HRGB/HBGR LCD text mode */ public boolean lcdSubPixPos; - public String mtx(double[] matrix) { - return ("["+ - matrix[0]+", "+ - matrix[1]+", "+ - matrix[2]+", "+ - matrix[3]+ - "]"); - } - + @Override public Object clone() { try { return super.clone(); @@ -73,15 +66,16 @@ public class FontInfo implements Cloneable { } } + @Override public String toString() { return ("FontInfo["+ "font="+font+", "+ - "devTx="+mtx(devTx)+", "+ - "glyphTx="+mtx(glyphTx)+", "+ + "devTx="+Arrays.toString(devTx)+", "+ + "glyphTx="+Arrays.toString(glyphTx)+", "+ "pixelHeight="+pixelHeight+", "+ "origin=("+originX+","+originY+"), "+ "aaHint="+aaHint+", "+ - "lcdRGBOrder="+(lcdRGBOrder ? "RGB" : "BGR")+ + "lcdRGBOrder="+(lcdRGBOrder ? "RGB" : "BGR")+", "+ "lcdSubPixPos="+lcdSubPixPos+ "]"); } diff --git a/src/java.desktop/share/classes/sun/print/PrintJobDelegate.java b/src/java.desktop/share/classes/sun/print/PrintJobDelegate.java index 5a88d4b9d45..bca3301b5c9 100644 --- a/src/java.desktop/share/classes/sun/print/PrintJobDelegate.java +++ b/src/java.desktop/share/classes/sun/print/PrintJobDelegate.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -526,8 +526,10 @@ public class PrintJobDelegate implements Printable, Runnable { } PageRanges range = (PageRanges)attributes.get(PageRanges.class); - int[][] members = range.getMembers(); - jobAttributes.setPageRanges(members); + if (range != null) { + int[][] members = range.getMembers(); + jobAttributes.setPageRanges(members); + } SheetCollate collation = (SheetCollate)attributes.get(SheetCollate.class); diff --git a/src/java.desktop/share/classes/sun/print/RasterPrinterJob.java b/src/java.desktop/share/classes/sun/print/RasterPrinterJob.java index 32728efde6c..b28723f94a6 100644 --- a/src/java.desktop/share/classes/sun/print/RasterPrinterJob.java +++ b/src/java.desktop/share/classes/sun/print/RasterPrinterJob.java @@ -33,24 +33,23 @@ import java.awt.HeadlessException; import java.awt.KeyboardFocusManager; import java.awt.Rectangle; import java.awt.Shape; +import java.awt.Window; import java.awt.geom.AffineTransform; import java.awt.geom.Point2D; import java.awt.geom.Rectangle2D; import java.awt.image.BufferedImage; import java.awt.print.Book; -import java.awt.print.Pageable; import java.awt.print.PageFormat; +import java.awt.print.Pageable; import java.awt.print.Paper; import java.awt.print.Printable; import java.awt.print.PrinterAbortException; import java.awt.print.PrinterException; import java.awt.print.PrinterJob; -import java.awt.Window; import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.Locale; -import sun.awt.image.ByteInterleavedRaster; import javax.print.Doc; import javax.print.DocFlavor; @@ -69,8 +68,8 @@ import javax.print.attribute.ResolutionSyntax; import javax.print.attribute.Size2DSyntax; import javax.print.attribute.standard.Copies; import javax.print.attribute.standard.Destination; -import javax.print.attribute.standard.DialogTypeSelection; import javax.print.attribute.standard.DialogOwner; +import javax.print.attribute.standard.DialogTypeSelection; import javax.print.attribute.standard.Fidelity; import javax.print.attribute.standard.JobName; import javax.print.attribute.standard.JobSheets; @@ -81,15 +80,17 @@ import javax.print.attribute.standard.MediaSizeName; import javax.print.attribute.standard.OrientationRequested; import javax.print.attribute.standard.OutputBin; import javax.print.attribute.standard.PageRanges; +import javax.print.attribute.standard.PrinterIsAcceptingJobs; import javax.print.attribute.standard.PrinterResolution; import javax.print.attribute.standard.PrinterState; import javax.print.attribute.standard.PrinterStateReason; import javax.print.attribute.standard.PrinterStateReasons; -import javax.print.attribute.standard.PrinterIsAcceptingJobs; import javax.print.attribute.standard.RequestingUserName; import javax.print.attribute.standard.SheetCollate; import javax.print.attribute.standard.Sides; +import sun.awt.image.ByteInterleavedRaster; + import static sun.font.FontUtilities.isIgnorableWhitespace; /** @@ -1613,8 +1614,7 @@ public abstract class RasterPrinterJob extends PrinterJob { } catch (PrinterException pe) { throw pe; } catch (Throwable printError) { - throw (PrinterException) - new PrinterException().initCause(printError.getCause()); + throw (PrinterException) new PrinterException().initCause(printError); } finally { // reset previousPaper in case this job is invoked again. previousPaper = null; diff --git a/src/java.desktop/share/classes/sun/swing/SwingUtilities2.java b/src/java.desktop/share/classes/sun/swing/SwingUtilities2.java index d552bc5c9f2..8c6d6ab9850 100644 --- a/src/java.desktop/share/classes/sun/swing/SwingUtilities2.java +++ b/src/java.desktop/share/classes/sun/swing/SwingUtilities2.java @@ -44,6 +44,7 @@ import java.awt.Rectangle; import java.awt.RenderingHints; import java.awt.Shape; import java.awt.Toolkit; +import java.awt.event.FocusEvent; import java.awt.event.InputEvent; import java.awt.event.KeyEvent; import java.awt.event.MouseEvent; @@ -795,7 +796,7 @@ public class SwingUtilities2 { */ public static void adjustFocus(JComponent c) { if (!c.hasFocus() && c.isRequestFocusEnabled()) { - c.requestFocus(); + c.requestFocus(FocusEvent.Cause.MOUSE_EVENT); } } @@ -1434,15 +1435,6 @@ public class SwingUtilities2 { } } - /** - * checks if the system clipboard can be accessed. - * This is true in a headful environment, false in a headless one - * - */ - public static boolean canAccessSystemClipboard() { - return !GraphicsEnvironment.isHeadless(); - } - public static String displayPropertiesToCSS(Font font, Color fg) { StringBuilder rule = new StringBuilder("body {"); if (font != null) { @@ -1645,24 +1637,24 @@ public class SwingUtilities2 { if (container.isFocusCycleRoot()) { FocusTraversalPolicy policy = container.getFocusTraversalPolicy(); Component comp = policy.getDefaultComponent(container); - if (comp!=null) { - comp.requestFocus(); + if (comp != null) { + comp.requestFocus(FocusEvent.Cause.TRAVERSAL); return comp; } } Container rootAncestor = container.getFocusCycleRootAncestor(); - if (rootAncestor!=null) { + if (rootAncestor != null) { FocusTraversalPolicy policy = rootAncestor.getFocusTraversalPolicy(); Component comp = policy.getComponentAfter(rootAncestor, container); - if (comp!=null && SwingUtilities.isDescendingFrom(comp, container)) { - comp.requestFocus(); + if (comp != null && SwingUtilities.isDescendingFrom(comp, container)) { + comp.requestFocus(FocusEvent.Cause.TRAVERSAL); return comp; } } } if (component.isFocusable()) { - component.requestFocus(); + component.requestFocus(FocusEvent.Cause.TRAVERSAL); return component; } return null; diff --git a/src/java.desktop/share/legal/freetype.md b/src/java.desktop/share/legal/freetype.md index 5df525e2f67..7259c27183f 100644 --- a/src/java.desktop/share/legal/freetype.md +++ b/src/java.desktop/share/legal/freetype.md @@ -1,4 +1,4 @@ -## The FreeType Project: Freetype v2.13.3 +## The FreeType Project: Freetype v2.14.2 ### FreeType Notice @@ -21,25 +21,24 @@ which fits your needs best. ### FreeType License ``` -Copyright (C) 1996-2024 by David Turner, Robert Wilhelm, and Werner Lemberg. -Copyright (C) 2007-2024 by Dereg Clegg and Michael Toftdal. -Copyright (C) 1996-2024 by Just van Rossum, David Turner, Robert Wilhelm, and Werner Lemberg. -Copyright (C) 2022-2024 by David Turner, Robert Wilhelm, Werner Lemberg, George Williams, and -Copyright (C) 2004-2024 by Masatake YAMATO and Redhat K.K. -Copyright (C) 2007-2024 by Derek Clegg and Michael Toftdal. -Copyright (C) 2003-2024 by Masatake YAMATO, Red Hat K.K., -Copyright (C) 1996-2024 by David Turner, Robert Wilhelm, Werner Lemberg, and Dominik Röttsches. -Copyright (C) 2007-2024 by David Turner. -Copyright (C) 2022-2024 by David Turner, Robert Wilhelm, Werner Lemberg, and Moazin Khatti. -Copyright (C) 2007-2024 by Rahul Bhalerao , . -Copyright (C) 2008-2024 by David Turner, Robert Wilhelm, Werner Lemberg, and suzuki toshiya. -Copyright (C) 2013-2024 by Google, Inc. -Copyright (C) 2019-2024 by Nikhil Ramakrishnan, David Turner, Robert Wilhelm, and Werner Lemberg. -Copyright (C) 2009-2024 by Oran Agra and Mickey Gabel. -Copyright (C) 2018-2024 by David Turner, Robert Wilhelm, Dominik Röttsches, and Werner Lemberg. -Copyright (C) 2004-2024 by David Turner, Robert Wilhelm, Werner Lemberg, and George Williams. - - +Copyright (C) 1996-2025 by David Turner, Robert Wilhelm, and Werner Lemberg. +Copyright (C) 2007-2025 by Dereg Clegg and Michael Toftdal. +Copyright (C) 1996-2025 by Just van Rossum, David Turner, Robert Wilhelm, and Werner Lemberg. +Copyright (C) 2022-2025 by David Turner, Robert Wilhelm, Werner Lemberg, George Williams, and +Copyright (C) 2004-2025 by Masatake YAMATO and Redhat K.K. +Copyright (C) 2007-2025 by Derek Clegg and Michael Toftdal. +Copyright (C) 2003-2025 by Masatake YAMATO, Red Hat K.K., +Copyright (C) 1996-2025 by David Turner, Robert Wilhelm, Werner Lemberg, and Dominik Röttsches. +Copyright (C) 2007-2025 by David Turner. +Copyright (C) 2022-2025 by David Turner, Robert Wilhelm, Werner Lemberg, and Moazin Khatti. +Copyright (C) 2007-2025 by Rahul Bhalerao , . +Copyright (C) 2025 by Behdad Esfahbod. +Copyright (C) 2008-2025 by David Turner, Robert Wilhelm, Werner Lemberg, and suzuki toshiya. +Copyright (C) 2013-2025 by Google, Inc. +Copyright (C) 2019-2025 by Nikhil Ramakrishnan, David Turner, Robert Wilhelm, and Werner Lemberg. +Copyright (C) 2009-2025 by Oran Agra and Mickey Gabel. +Copyright (C) 2018-2025 by David Turner, Robert Wilhelm, Dominik Röttsches, and Werner Lemberg. +Copyright (C) 2004-2025 by David Turner, Robert Wilhelm, Werner Lemberg, and George Williams. The FreeType Project LICENSE ---------------------------- @@ -207,6 +206,7 @@ Legal Terms https://www.freetype.org + ``` ### GPL v2 diff --git a/src/java.desktop/share/legal/giflib.md b/src/java.desktop/share/legal/giflib.md index 5697dc7ca9a..781023dd334 100644 --- a/src/java.desktop/share/legal/giflib.md +++ b/src/java.desktop/share/legal/giflib.md @@ -1,9 +1,9 @@ -## GIFLIB v5.2.2 +## GIFLIB v6.1.2 ### GIFLIB License ``` -The GIFLIB distribution is Copyright (c) 1997 Eric S. Raymond += MIT LICENSE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -25,9 +25,15 @@ THE SOFTWARE. --------------------------------- The below applies to the following file(s): +giflib/dgif_lib.c +giflib/gifalloc.c +giflib/gif_err.c giflib/openbsd-reallocarray.c +Copyright (C) 1989 Gershon Elber Copyright (C) 2008 Otto Moerbeek +Copyright (C) Eric S. Raymond + SPDX-License-Identifier: MIT diff --git a/src/java.desktop/share/legal/libpng.md b/src/java.desktop/share/legal/libpng.md index a2ffcca1974..7783fc7ff03 100644 --- a/src/java.desktop/share/legal/libpng.md +++ b/src/java.desktop/share/legal/libpng.md @@ -1,4 +1,4 @@ -## libpng v1.6.55 +## libpng v1.6.57 ### libpng License

    @@ -168,6 +168,7 @@ Authors, for copyright and licensing purposes.
      * Glenn Randers-Pehrson
      * Greg Roelofs
      * Guy Eric Schalnat
    + * Halil Oktay
      * James Yu
      * John Bowler
      * Joshua Inscoe
    @@ -179,6 +180,7 @@ Authors, for copyright and licensing purposes.
      * Mans Rullgard
      * Matt Sarett
      * Mike Klein
    + * Mohammad Seet
      * Pascal Massimino
      * Paul Schmidt
      * Petr Simecek
    @@ -187,12 +189,14 @@ Authors, for copyright and licensing purposes.
      * Sam Bushell
      * Samuel Williams
      * Simon-Pierre Cadieux
    + * Taegu Ha (하태구)
      * Tim Wegner
      * Tobias Stoeckmann
      * Tom Lane
      * Tom Tanner
      * Vadim Barkov
      * Willem van Schaik
    + * Yuelin Wang (王跃林)
      * Zhijie Liang
      * Apple Inc.
         - Zixu Wang (王子旭)
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/config/ftconfig.h b/src/java.desktop/share/native/libfreetype/include/freetype/config/ftconfig.h
    index 0667493fec6..d66c5df9976 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/config/ftconfig.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/config/ftconfig.h
    @@ -4,7 +4,7 @@
      *
      *   ANSI-specific configuration file (specification only).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/config/ftheader.h b/src/java.desktop/share/native/libfreetype/include/freetype/config/ftheader.h
    index f6ef2618ded..16eab9048fc 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/config/ftheader.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/config/ftheader.h
    @@ -4,7 +4,7 @@
      *
      *   Build macros of the FreeType 2 library.
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/config/ftoption.h b/src/java.desktop/share/native/libfreetype/include/freetype/config/ftoption.h
    index d29a0a7cefb..a0a1a410b68 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/config/ftoption.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/config/ftoption.h
    @@ -4,7 +4,7 @@
      *
      *   User-selectable configuration macros (specification only).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -158,12 +158,12 @@ FT_BEGIN_HEADER
     
       /**************************************************************************
        *
    -   * If this macro is defined, try to use an inlined assembler version of the
    -   * @FT_MulFix function, which is a 'hotspot' when loading and hinting
    -   * glyphs, and which should be executed as fast as possible.
    +   * If this macro is defined, try to use an inlined 64-bit or assembler
    +   * version of the @FT_MulFix function, which is a 'hotspot' when loading
    +   * and hinting glyphs, and which should be executed as fast as possible.
        *
    -   * Note that if your compiler or CPU is not supported, this will default to
    -   * the standard and portable implementation found in `ftcalc.c`.
    +   * If your compiler is not C99-compliant or CPU assembly is not supported,
    +   * you can disable this option.
        */
     #define FT_CONFIG_OPTION_INLINE_MULFIX
     
    @@ -293,6 +293,31 @@ FT_BEGIN_HEADER
     /* #define FT_CONFIG_OPTION_USE_HARFBUZZ */
     
     
    +  /**************************************************************************
    +   *
    +   * HarfBuzz dynamic support.
    +   *
    +   *   Define this macro if you want the HarfBuzz library to be loaded at
    +   *   runtime instead of being linked to FreeType.
    +   *
    +   *   This option has no effect if `FT_CONFIG_OPTION_USE_HARFBUZZ` is not
    +   *   defined.
    +   *
    +   *   When this option is enabled, FreeType will try to load the HarfBuzz
    +   *   library at runtime, using `dlopen` or `LoadLibrary`, depending on the
    +   *   platform.  On Microsoft platforms, the library name looked up is
    +   *   `libharfbuzz-0.dll`.  On Apple platforms, the library name looked up
    +   *   is `libharfbuzz.0.dylib`.  On all other platforms, the library name
    +   *   looked up is `libharfbuzz.so.0`.  This name can be overridden by
    +   *   defining the macro `FT_LIBHARFBUZZ` at FreeType compilation time.
    +   *
    +   *   If you use a build system like cmake or the `configure` script,
    +   *   options set by those programs have precedence, overwriting the value
    +   *   here with the configured one.
    +   */
    +/* #define FT_CONFIG_OPTION_USE_HARFBUZZ_DYNAMIC */
    +
    +
       /**************************************************************************
        *
        * Brotli support.
    @@ -679,7 +704,7 @@ FT_BEGIN_HEADER
        * defined.
        *
        * [1]
    -   * https://www.microsoft.com/typography/cleartype/truetypecleartype.aspx
    +   * https://learn.microsoft.com/typography/cleartype/truetypecleartype
        */
     #define TT_CONFIG_OPTION_SUBPIXEL_HINTING
     
    @@ -697,7 +722,7 @@ FT_BEGIN_HEADER
        * flags array which can be used to disambiguate, but old fonts will not
        * have them.
        *
    -   *   https://www.microsoft.com/typography/otspec/glyf.htm
    +   *   https://learn.microsoft.com/typography/opentype/spec/glyf
        *   https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6glyf.html
        */
     #undef TT_CONFIG_OPTION_COMPONENT_OFFSET_SCALED
    @@ -734,7 +759,13 @@ FT_BEGIN_HEADER
       /**************************************************************************
        *
        * Define `TT_CONFIG_OPTION_BDF` if you want to include support for an
    -   * embedded 'BDF~' table within SFNT-based bitmap formats.
    +   * embedded 'BDF~' table within an SFNT-based `.otb` font file.  This table
    +   * is an extension used by X11 to preserve BDF properties after conversion
    +   * to SFNT containers.  See
    +   *
    +   *   https://fontforge.org/docs/techref/non-standard.html#non-standard-bdf
    +   *
    +   * for more details.
        */
     /* #define TT_CONFIG_OPTION_BDF */
     
    @@ -760,10 +791,10 @@ FT_BEGIN_HEADER
       /**************************************************************************
        *
        * Option `TT_CONFIG_OPTION_GPOS_KERNING` enables a basic GPOS kerning
    -   * implementation (for TrueType fonts only).  With this defined, FreeType
    -   * is able to get kerning pair data from the GPOS 'kern' feature as well as
    -   * legacy 'kern' tables; without this defined, FreeType will only be able
    -   * to use legacy 'kern' tables.
    +   * implementation (for TrueType and OpenType fonts only).  With this
    +   * defined, FreeType is able to get kerning pair data from the GPOS 'kern'
    +   * feature as well as legacy 'kern' tables; without this defined, FreeType
    +   * will only be able to use legacy 'kern' tables.
        *
        * Note that FreeType does not support more advanced GPOS layout features;
        * even the 'kern' feature implemented here doesn't handle more
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/config/ftstdlib.h b/src/java.desktop/share/native/libfreetype/include/freetype/config/ftstdlib.h
    index e17aa7b89d5..f846b4456c1 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/config/ftstdlib.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/config/ftstdlib.h
    @@ -5,7 +5,7 @@
      *   ANSI-specific library and header configuration file (specification
      *   only).
      *
    - * Copyright (C) 2002-2024 by
    + * Copyright (C) 2002-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/config/integer-types.h b/src/java.desktop/share/native/libfreetype/include/freetype/config/integer-types.h
    index c27505ffc4b..a0b892ece4b 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/config/integer-types.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/config/integer-types.h
    @@ -4,7 +4,7 @@
      *
      *   FreeType integer types definitions.
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -17,6 +17,8 @@
     #ifndef FREETYPE_CONFIG_INTEGER_TYPES_H_
     #define FREETYPE_CONFIG_INTEGER_TYPES_H_
     
    +FT_BEGIN_HEADER
    +
       /* There are systems (like the Texas Instruments 'C54x) where a `char`  */
       /* has 16~bits.  ANSI~C says that `sizeof(char)` is always~1.  Since an */
       /* `int` has 16~bits also for this system, `sizeof(int)` gives~1 which  */
    @@ -242,9 +244,34 @@
     #endif /* FT_SIZEOF_LONG == (64 / FT_CHAR_BIT) */
     
     #ifdef FT_INT64
    +
       typedef FT_INT64   FT_Int64;
       typedef FT_UINT64  FT_UInt64;
    -#endif
     
    +#  define FT_INT64_ZERO  0
    +
    +#else  /* !FT_INT64 */
    +
    +  /* we need to emulate 64-bit data types if none are available */
    +
    +  typedef struct  FT_Int64_
    +  {
    +    FT_UInt32  lo;
    +    FT_UInt32  hi;
    +
    +  } FT_Int64;
    +
    +  typedef struct  FT_UInt64_
    +  {
    +    FT_UInt32  lo;
    +    FT_UInt32  hi;
    +
    +  } FT_UInt64;
    +
    +#  define FT_INT64_ZERO  { 0, 0 }
    +
    +#endif /* !FT_INT64 */
    +
    +FT_END_HEADER
     
     #endif  /* FREETYPE_CONFIG_INTEGER_TYPES_H_ */
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/config/mac-support.h b/src/java.desktop/share/native/libfreetype/include/freetype/config/mac-support.h
    index 07b6f915bd8..bd350851d56 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/config/mac-support.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/config/mac-support.h
    @@ -4,7 +4,7 @@
      *
      *   Mac/OS X support configuration header.
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -24,6 +24,7 @@
        *   This is the only necessary change, so it is defined here instead
        *   providing a new configuration file.
        */
    +#ifdef FT_MACINTOSH
     #if defined( __APPLE__ ) || ( defined( __MWERKS__ ) && defined( macintosh ) )
       /* No Carbon frameworks for 64bit 10.4.x.                         */
       /* `AvailabilityMacros.h` is available since Mac OS X 10.2,       */
    @@ -36,6 +37,7 @@
         ( MAC_OS_X_VERSION_MIN_REQUIRED <= MAC_OS_X_VERSION_10_4 )
     #undef FT_MACINTOSH
     #endif
    +#endif  /* __APPLE__ ... */
     
     #elif defined( __SC__ ) || defined( __MRC__ )
       /* Classic MacOS compilers */
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/config/public-macros.h b/src/java.desktop/share/native/libfreetype/include/freetype/config/public-macros.h
    index f56581a6ee7..9f28b394737 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/config/public-macros.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/config/public-macros.h
    @@ -4,7 +4,7 @@
      *
      *   Define a set of compiler macros used in public FreeType headers.
      *
    - * Copyright (C) 2020-2024 by
    + * Copyright (C) 2020-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -62,8 +62,8 @@ FT_BEGIN_HEADER
        * because it is needed by `FT_EXPORT`.
        */
     
    -  /* Visual C, mingw */
    -#if defined( _WIN32 )
    +  /* Visual C, MinGW, Cygwin */
    +#if defined( _WIN32 ) || defined( __CYGWIN__ )
     
     #if defined( FT2_BUILD_LIBRARY ) && defined( DLL_EXPORT )
     #define FT_PUBLIC_FUNCTION_ATTRIBUTE  __declspec( dllexport )
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/freetype.h b/src/java.desktop/share/native/libfreetype/include/freetype/freetype.h
    index 58fc33dfe60..e8a1b1e2f3e 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/freetype.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/freetype.h
    @@ -4,7 +4,7 @@
      *
      *   FreeType high-level API and common types (specification only).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -811,7 +811,7 @@ FT_BEGIN_HEADER
        *   FT_ENCODING_MS_SYMBOL ::
        *     Microsoft Symbol encoding, used to encode mathematical symbols and
        *     wingdings.  For more information, see
    -   *     'https://www.microsoft.com/typography/otspec/recom.htm#non-standard-symbol-fonts',
    +   *     'https://learn.microsoft.com/typography/opentype/spec/recom#non-standard-symbol-fonts',
        *     'http://www.kostis.net/charsets/symbol.htm', and
        *     'http://www.kostis.net/charsets/wingding.htm'.
        *
    @@ -1068,12 +1068,12 @@ FT_BEGIN_HEADER
        *     the face in the font file (starting with value~0).  They are set
        *     to~0 if there is only one face in the font file.
        *
    -   *     [Since 2.6.1] Bits 16-30 are relevant to GX and OpenType variation
    -   *     fonts only, holding the named instance index for the current face
    -   *     index (starting with value~1; value~0 indicates font access without
    -   *     a named instance).  For non-variation fonts, bits 16-30 are ignored.
    -   *     If we have the third named instance of face~4, say, `face_index` is
    -   *     set to 0x00030004.
    +   *     [Since 2.6.1] Bits 16-30 are relevant to TrueType GX and OpenType
    +   *     Font Variations only, holding the named instance index for the
    +   *     current face index (starting with value~1; value~0 indicates font
    +   *     access without a named instance).  For non-variation fonts, bits
    +   *     16-30 are ignored.  If we have the third named instance of face~4,
    +   *     say, `face_index` is set to 0x00030004.
        *
        *     Bit 31 is always zero (that is, `face_index` is always a positive
        *     value).
    @@ -1092,10 +1092,10 @@ FT_BEGIN_HEADER
        *     the face; see @FT_STYLE_FLAG_XXX for the details.
        *
        *     [Since 2.6.1] Bits 16-30 hold the number of named instances
    -   *     available for the current face if we have a GX or OpenType variation
    -   *     (sub)font.  Bit 31 is always zero (that is, `style_flags` is always
    -   *     a positive value).  Note that a variation font has always at least
    -   *     one named instance, namely the default instance.
    +   *     available for the current face if we have a TrueType GX or OpenType
    +   *     Font Variation.  Bit 31 is always zero (that is, `style_flags` is
    +   *     always a positive value).  Note that a variation font has always at
    +   *     least one named instance, namely the default instance.
        *
        *   num_glyphs ::
        *     The number of glyphs in the face.  If the face is scalable and has
    @@ -1159,7 +1159,7 @@ FT_BEGIN_HEADER
        *     Note that the bounding box might be off by (at least) one pixel for
        *     hinted fonts.  See @FT_Size_Metrics for further discussion.
        *
    -   *     Note that the bounding box does not vary in OpenType variation fonts
    +   *     Note that the bounding box does not vary in OpenType Font Variations
        *     and should only be used in relation to the default instance.
        *
        *   units_per_EM ::
    @@ -1218,7 +1218,7 @@ FT_BEGIN_HEADER
        *   Fields may be changed after a call to @FT_Attach_File or
        *   @FT_Attach_Stream.
        *
    -   *   For an OpenType variation font, the values of the following fields can
    +   *   For OpenType Font Variations, the values of the following fields can
        *   change after a call to @FT_Set_Var_Design_Coordinates (and friends) if
        *   the font contains an 'MVAR' table: `ascender`, `descender`, `height`,
        *   `underline_position`, and `underline_thickness`.
    @@ -1336,7 +1336,7 @@ FT_BEGIN_HEADER
        *   FT_FACE_FLAG_MULTIPLE_MASTERS ::
        *     The face contains multiple masters and is capable of interpolating
        *     between them.  Supported formats are Adobe MM, TrueType GX, and
    -   *     OpenType variation fonts.
    +   *     OpenType Font Variations.
        *
        *     See section @multiple_masters for API details.
        *
    @@ -1609,7 +1609,7 @@ FT_BEGIN_HEADER
        *
        * @description:
        *   A macro that returns true whenever a face object is a named instance
    -   *   of a GX or OpenType variation font.
    +   *   of a TrueType GX or OpenType Font Variations.
        *
        *   [Since 2.9] Changing the design coordinates with
        *   @FT_Set_Var_Design_Coordinates or @FT_Set_Var_Blend_Coordinates does
    @@ -2147,7 +2147,7 @@ FT_BEGIN_HEADER
        *     freed.
        *
        *     [Since 2.10.1] If @FT_LOAD_NO_SCALE is set, outline coordinates of
    -   *     OpenType variation fonts for a selected instance are internally
    +   *     OpenType Font Variations for a selected instance are internally
        *     handled as 26.6 fractional font units but returned as (rounded)
        *     integers, as expected.  To get unrounded font units, don't use
        *     @FT_LOAD_NO_SCALE but load the glyph with @FT_LOAD_NO_HINTING and
    @@ -2640,14 +2640,14 @@ FT_BEGIN_HEADER
        *     the face in the font file (starting with value~0).  Set it to~0 if
        *     there is only one face in the font file.
        *
    -   *     [Since 2.6.1] Bits 16-30 are relevant to GX and OpenType variation
    -   *     fonts only, specifying the named instance index for the current face
    -   *     index (starting with value~1; value~0 makes FreeType ignore named
    -   *     instances).  For non-variation fonts, bits 16-30 are ignored.
    -   *     Assuming that you want to access the third named instance in face~4,
    -   *     `face_index` should be set to 0x00030004.  If you want to access
    -   *     face~4 without variation handling, simply set `face_index` to
    -   *     value~4.
    +   *     [Since 2.6.1] Bits 16-30 are relevant to TrueType GX and OpenType
    +   *     Font Variations only, specifying the named instance index for the
    +   *     current face index (starting with value~1; value~0 makes FreeType
    +   *     ignore named instances).  For non-variation fonts, bits 16-30 are
    +   *     ignored.  Assuming that you want to access the third named instance
    +   *     in face~4, `face_index` should be set to 0x00030004.  If you want
    +   *     to access face~4 without variation handling, simply set
    +   *     `face_index` to value~4.
        *
        *     `FT_Open_Face` and its siblings can be used to quickly check whether
        *     the font format of a given font resource is supported by FreeType.
    @@ -2914,11 +2914,11 @@ FT_BEGIN_HEADER
        *   of the available glyphs at a given ppem value is available.  FreeType
        *   silently uses outlines if there is no bitmap for a given glyph index.
        *
    -   *   For GX and OpenType variation fonts, a bitmap strike makes sense only
    -   *   if the default instance is active (that is, no glyph variation takes
    -   *   place); otherwise, FreeType simply ignores bitmap strikes.  The same
    -   *   is true for all named instances that are different from the default
    -   *   instance.
    +   *   For TrueType GX and OpenType Font Variations, a bitmap strike makes
    +   *   sense only if the default instance is active (that is, no glyph
    +   *   variation takes place); otherwise, FreeType simply ignores bitmap
    +   *   strikes.  The same is true for all named instances that are different
    +   *   from the default instance.
        *
        *   Don't use this function if you are using the FreeType cache API.
        */
    @@ -3078,7 +3078,7 @@ FT_BEGIN_HEADER
        *   is dependent entirely on how the size is defined in the source face.
        *   The font designer chooses the final size of each glyph relative to
        *   this size.  For more information refer to
    -   *   'https://www.freetype.org/freetype2/docs/glyphs/glyphs-2.html'.
    +   *   'https://freetype.org/freetype2/docs/glyphs/glyphs-2.html'.
        *
        *   Contrary to @FT_Set_Char_Size, this function doesn't have special code
        *   to normalize zero-valued widths, heights, or resolutions, which are
    @@ -3441,8 +3441,10 @@ FT_BEGIN_HEADER
        *     blending of the color glyph layers associated with the glyph index,
        *     using the same bitmap format as embedded color bitmap images.  This
        *     is mainly for convenience and works only for glyphs in 'COLR' v0
    -   *     tables (or glyphs in 'COLR' v1 tables that exclusively use v0
    -   *     features).  For full control of color layers use
    +   *     tables.  **There is no rendering support for 'COLR' v1** (with the
    +   *     exception of v1 tables that exclusively use v0 features)!  You need
    +   *     a graphics library like Skia or Cairo to interpret the graphics
    +   *     commands stored in v1 tables.  For full control of color layers use
        *     @FT_Get_Color_Glyph_Layer and FreeType's color functions like
        *     @FT_Palette_Select instead of setting @FT_LOAD_COLOR for rendering
        *     so that the client application can handle blending by itself.
    @@ -3895,8 +3897,10 @@ FT_BEGIN_HEADER
        *
        *   This process can cost performance.  There is an approximation that
        *   does not need to know about the background color; see
    -   *   https://bel.fi/alankila/lcd/ and
    -   *   https://bel.fi/alankila/lcd/alpcor.html for details.
    +   *   https://web.archive.org/web/20211019204945/https://bel.fi/alankila/lcd/
    +   *   and
    +   *   https://web.archive.org/web/20210211002939/https://bel.fi/alankila/lcd/alpcor.html
    +   *   for details.
        *
        *   **ATTENTION**: Linear blending is even more important when dealing
        *   with subpixel-rendered glyphs to prevent color-fringing!  A
    @@ -3993,13 +3997,13 @@ FT_BEGIN_HEADER
        *   out of the scope of this API function -- they can be implemented
        *   through format-specific interfaces.
        *
    -   *   Note that, for TrueType fonts only, this can extract data from both
    -   *   the 'kern' table and the basic, pair-wise kerning feature from the
    -   *   GPOS table (with `TT_CONFIG_OPTION_GPOS_KERNING` enabled), though
    -   *   FreeType does not support the more advanced GPOS layout features; use
    -   *   a library like HarfBuzz for those instead.  If a font has both a
    -   *   'kern' table and kern features of a GPOS table, the 'kern' table will
    -   *   be used.
    +   *   Note that, for TrueType and OpenType fonts only, this can extract data
    +   *   from both the 'kern' table and the basic, pair-wise kerning feature
    +   *   from the GPOS table (with `TT_CONFIG_OPTION_GPOS_KERNING` enabled),
    +   *   though FreeType does not support the more advanced GPOS layout
    +   *   features; use a library like HarfBuzz for those instead.  If a font
    +   *   has both a 'kern' table and kern features of a GPOS table, the 'kern'
    +   *   table will be used.
        *
        *   Also note for right-to-left scripts, the functionality may differ for
        *   fonts with GPOS tables vs. 'kern' tables.  For GPOS, right-to-left
    @@ -4314,14 +4318,13 @@ FT_BEGIN_HEADER
        *     property `no-stem-darkening` provided by the 'autofit', 'cff',
        *     'type1', and 't1cid' modules; see @no-stem-darkening).
        *
    -   *   * @FT_PARAM_TAG_LCD_FILTER_WEIGHTS (LCD filter weights, corresponding
    -   *     to function @FT_Library_SetLcdFilterWeights).
    -   *
        *   * @FT_PARAM_TAG_RANDOM_SEED (seed value for the CFF, Type~1, and CID
        *     'random' operator, corresponding to the `random-seed` property
        *     provided by the 'cff', 'type1', and 't1cid' modules; see
        *     @random-seed).
        *
    +   *   * @FT_PARAM_TAG_LCD_FILTER_WEIGHTS (no longer supported).
    +   *
        *   Pass `NULL` as `data` in @FT_Parameter for a given tag to reset the
        *   option and use the library or module default again.
        *
    @@ -4348,25 +4351,17 @@ FT_BEGIN_HEADER
        *     FT_Bool              darken_stems = 1;
        *
        *     FT_Parameter         property2;
    -   *     FT_LcdFiveTapFilter  custom_weight =
    -   *                            { 0x11, 0x44, 0x56, 0x44, 0x11 };
    -   *
    -   *     FT_Parameter         property3;
        *     FT_Int32             random_seed = 314159265;
        *
    -   *     FT_Parameter         properties[3] = { property1,
    -   *                                            property2,
    -   *                                            property3 };
    +   *     FT_Parameter         properties[2] = { property1,
    +   *                                            property2 };
        *
        *
        *     property1.tag  = FT_PARAM_TAG_STEM_DARKENING;
        *     property1.data = &darken_stems;
        *
    -   *     property2.tag  = FT_PARAM_TAG_LCD_FILTER_WEIGHTS;
    -   *     property2.data = custom_weight;
    -   *
    -   *     property3.tag  = FT_PARAM_TAG_RANDOM_SEED;
    -   *     property3.data = &random_seed;
    +   *     property2.tag  = FT_PARAM_TAG_RANDOM_SEED;
    +   *     property2.data = &random_seed;
        *
        *     FT_Face_Properties( face, 3, properties );
        *   ```
    @@ -4377,7 +4372,7 @@ FT_BEGIN_HEADER
        *     FT_Parameter  property;
        *
        *
    -   *     property.tag  = FT_PARAM_TAG_LCD_FILTER_WEIGHTS;
    +   *     property.tag  = FT_PARAM_TAG_STEM_DARKENING;
        *     property.data = NULL;
        *
        *     FT_Face_Properties( face, 1, &property );
    @@ -4530,7 +4525,7 @@ FT_BEGIN_HEADER
        *   table description in the OpenType specification for the meaning of the
        *   various flags (which get synthesized for non-OpenType subglyphs).
        *
    -   *     https://docs.microsoft.com/en-us/typography/opentype/spec/glyf#composite-glyph-description
    +   *     https://learn.microsoft.com/typography/opentype/spec/glyf#composite-glyph-description
        *
        * @values:
        *   FT_SUBGLYPH_FLAG_ARGS_ARE_WORDS ::
    @@ -4593,7 +4588,7 @@ FT_BEGIN_HEADER
        *   interpreted depending on the flags returned in `*p_flags`.  See the
        *   OpenType specification for details.
        *
    -   *     https://docs.microsoft.com/en-us/typography/opentype/spec/glyf#composite-glyph-description
    +   *     https://learn.microsoft.com/typography/opentype/spec/glyf#composite-glyph-description
        *
        */
       FT_EXPORT( FT_Error )
    @@ -4619,7 +4614,7 @@ FT_BEGIN_HEADER
        *   associated with a font.
        *
        *   See
    -   *   https://www.adobe.com/content/dam/Adobe/en/devnet/acrobat/pdfs/FontPolicies.pdf
    +   *   https://adobe-type-tools.github.io/font-tech-notes/pdfs/AcrobatDC_FontPolicies.pdf
        *   for more details.
        *
        * @values:
    @@ -5173,8 +5168,8 @@ FT_BEGIN_HEADER
        *
        */
     #define FREETYPE_MAJOR  2
    -#define FREETYPE_MINOR  13
    -#define FREETYPE_PATCH  3
    +#define FREETYPE_MINOR  14
    +#define FREETYPE_PATCH  2
     
     
       /**************************************************************************
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/ftadvanc.h b/src/java.desktop/share/native/libfreetype/include/freetype/ftadvanc.h
    index 85b8ba2554b..62a856ccbd7 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/ftadvanc.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/ftadvanc.h
    @@ -4,7 +4,7 @@
      *
      *   Quick computation of advance widths (specification only).
      *
    - * Copyright (C) 2008-2024 by
    + * Copyright (C) 2008-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/ftbbox.h b/src/java.desktop/share/native/libfreetype/include/freetype/ftbbox.h
    index 12bbfa63a62..348b4b3a268 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/ftbbox.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/ftbbox.h
    @@ -4,7 +4,7 @@
      *
      *   FreeType exact bbox computation (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/ftbdf.h b/src/java.desktop/share/native/libfreetype/include/freetype/ftbdf.h
    index 6f63b0b1e78..ab142249217 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/ftbdf.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/ftbdf.h
    @@ -4,7 +4,7 @@
      *
      *   FreeType API for accessing BDF-specific strings (specification).
      *
    - * Copyright (C) 2002-2024 by
    + * Copyright (C) 2002-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -44,7 +44,8 @@ FT_BEGIN_HEADER
        *
        * @description:
        *   This section contains the declaration of functions specific to BDF and
    -   *   PCF fonts.
    +   *   PCF fonts.  They also work for SFNT bitmap fonts that contain a 'BDF~'
    +   *   table like X11's `.otb` fonts.
        *
        */
     
    @@ -151,7 +152,9 @@ FT_BEGIN_HEADER
        *   FreeType error code.  0~means success.
        *
        * @note:
    -   *   This function only works with BDF faces, returning an error otherwise.
    +   *   This function only works with BDF faces and SFNT fonts that have a
    +   *   'BDF~' table, returning an error otherwise.  For the latter, a bitmap
    +   *   strike size must be selected first.
        */
       FT_EXPORT( FT_Error )
       FT_Get_BDF_Charset_ID( FT_Face       face,
    @@ -165,7 +168,7 @@ FT_BEGIN_HEADER
        *    FT_Get_BDF_Property
        *
        * @description:
    -   *    Retrieve a BDF property from a BDF or PCF font file.
    +   *    Retrieve a BDF property from a BDF or PCF font.
        *
        * @input:
        *    face ::
    @@ -196,6 +199,9 @@ FT_BEGIN_HEADER
        *
        *   In case of error, `aproperty->type` is always set to
        *   @BDF_PROPERTY_TYPE_NONE.
    +   *
    +   *   This also works with SFNT fonts that have a 'BDF~' table, after a
    +   *   bitmap strike size has been selected.
        */
       FT_EXPORT( FT_Error )
       FT_Get_BDF_Property( FT_Face           face,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/ftbitmap.h b/src/java.desktop/share/native/libfreetype/include/freetype/ftbitmap.h
    index df9d462652e..a22d43adf14 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/ftbitmap.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/ftbitmap.h
    @@ -4,7 +4,7 @@
      *
      *   FreeType utility functions for bitmaps (specification).
      *
    - * Copyright (C) 2004-2024 by
    + * Copyright (C) 2004-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/ftcid.h b/src/java.desktop/share/native/libfreetype/include/freetype/ftcid.h
    index 96b2a90fc59..7cda8ff3f39 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/ftcid.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/ftcid.h
    @@ -4,7 +4,7 @@
      *
      *   FreeType API for accessing CID font information (specification).
      *
    - * Copyright (C) 2007-2024 by
    + * Copyright (C) 2007-2025 by
      * Dereg Clegg and Michael Toftdal.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/ftcolor.h b/src/java.desktop/share/native/libfreetype/include/freetype/ftcolor.h
    index 420720ddf22..129b1a23fb0 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/ftcolor.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/ftcolor.h
    @@ -4,7 +4,7 @@
      *
      *   FreeType's glyph color management (specification).
      *
    - * Copyright (C) 2018-2024 by
    + * Copyright (C) 2018-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -317,6 +317,15 @@ FT_BEGIN_HEADER
        * @description:
        *   The functions described here allow access of colored glyph layer data
        *   in OpenType's 'COLR' tables.
    +   *
    +   *   Note that FreeType does *not* provide rendering in general of glyphs
    +   *   that use a 'COLR' table!  While FreeType has very limited rendering
    +   *   support for 'COLR' v0 tables (without a possibility to change the
    +   *   color palette) via @FT_Render_Glyph, there is no such convenience
    +   *   code for 'COLR' v1 tables -- while it appears that v1 is simply an
    +   *   'improved' version of v0, this is not the case: it is a completely
    +   *   different color font format, and you need a dedicated graphics
    +   *   library like Skia or Cairo to handle a v1 table's drawing commands.
        */
     
     
    @@ -359,7 +368,7 @@ FT_BEGIN_HEADER
        *   iteratively retrieve the colored glyph layers associated with the
        *   current glyph slot.
        *
    -   *     https://docs.microsoft.com/en-us/typography/opentype/spec/colr
    +   *     https://learn.microsoft.com/typography/opentype/spec/colr
        *
        *   The glyph layer data for a given glyph index, if present, provides an
        *   alternative, multi-color glyph representation: Instead of rendering
    @@ -1518,7 +1527,7 @@ FT_BEGIN_HEADER
        *
        * @return:
        *   Value~1 if a clip box is found.  If no clip box is found or an error
    -   *   occured, value~0 is returned.
    +   *   occurred, value~0 is returned.
        *
        * @note:
        *   To retrieve the clip box in font units, reset scale to units-per-em
    @@ -1646,7 +1655,7 @@ FT_BEGIN_HEADER
        *
        * @return:
        *   Value~1 if everything is OK.  Value~0 if no details can be found for
    -   *   this paint or any other error occured.
    +   *   this paint or any other error occurred.
        *
        * @since:
        *   2.13
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/ftdriver.h b/src/java.desktop/share/native/libfreetype/include/freetype/ftdriver.h
    index 1b7f539f5e2..b65a06ab69b 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/ftdriver.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/ftdriver.h
    @@ -4,7 +4,7 @@
      *
      *   FreeType API for controlling driver modules (specification only).
      *
    - * Copyright (C) 2017-2024 by
    + * Copyright (C) 2017-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -282,7 +282,7 @@ FT_BEGIN_HEADER
        *   minimize hinting techniques that were problematic with the extra
        *   resolution of ClearType; see
        *   http://rastertragedy.com/RTRCh4.htm#Sec1 and
    -   *   https://www.microsoft.com/typography/cleartype/truetypecleartype.aspx.
    +   *   https://learn.microsoft.com/typography/cleartype/truetypecleartype.
        *   This technique is not to be confused with ClearType compatible widths.
        *   ClearType backward compatibility has no direct impact on changing
        *   advance widths, but there might be an indirect impact on disabling
    @@ -784,7 +784,7 @@ FT_BEGIN_HEADER
        *
        *   Details on subpixel hinting and some of the necessary tweaks can be
        *   found in Greg Hitchcock's whitepaper at
    -   *   'https://www.microsoft.com/typography/cleartype/truetypecleartype.aspx'.
    +   *   'https://learn.microsoft.com/typography/cleartype/truetypecleartype'.
        *   Note that FreeType currently doesn't really 'subpixel hint' (6x1, 6x2,
        *   or 6x5 supersampling) like discussed in the paper.  Depending on the
        *   chosen interpreter, it simply ignores instructions on vertical stems
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/fterrdef.h b/src/java.desktop/share/native/libfreetype/include/freetype/fterrdef.h
    index 710ca91bbdd..3e591bede8d 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/fterrdef.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/fterrdef.h
    @@ -4,7 +4,7 @@
      *
      *   FreeType error codes (specification).
      *
    - * Copyright (C) 2002-2024 by
    + * Copyright (C) 2002-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/fterrors.h b/src/java.desktop/share/native/libfreetype/include/freetype/fterrors.h
    index 27c0ece5c1c..eca494f90c0 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/fterrors.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/fterrors.h
    @@ -4,7 +4,7 @@
      *
      *   FreeType error code handling (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/ftfntfmt.h b/src/java.desktop/share/native/libfreetype/include/freetype/ftfntfmt.h
    index 7c8b0874a81..5df82447d0e 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/ftfntfmt.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/ftfntfmt.h
    @@ -4,7 +4,7 @@
      *
      *   Support functions for font formats.
      *
    - * Copyright (C) 2002-2024 by
    + * Copyright (C) 2002-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/ftgasp.h b/src/java.desktop/share/native/libfreetype/include/freetype/ftgasp.h
    index 30e5a9bf82b..77e5a7e7bfd 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/ftgasp.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/ftgasp.h
    @@ -4,7 +4,7 @@
      *
      *   Access of TrueType's 'gasp' table (specification).
      *
    - * Copyright (C) 2007-2024 by
    + * Copyright (C) 2007-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/ftglyph.h b/src/java.desktop/share/native/libfreetype/include/freetype/ftglyph.h
    index dc1eb8873ae..3691781cf52 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/ftglyph.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/ftglyph.h
    @@ -4,7 +4,7 @@
      *
      *   FreeType convenience functions to handle glyphs (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/ftgzip.h b/src/java.desktop/share/native/libfreetype/include/freetype/ftgzip.h
    index 9516dc030ac..e26c334c11a 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/ftgzip.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/ftgzip.h
    @@ -4,7 +4,7 @@
      *
      *   Gzip-compressed stream support.
      *
    - * Copyright (C) 2002-2024 by
    + * Copyright (C) 2002-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/ftimage.h b/src/java.desktop/share/native/libfreetype/include/freetype/ftimage.h
    index 2b4b4ac60ae..a4dc724f349 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/ftimage.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/ftimage.h
    @@ -5,7 +5,7 @@
      *   FreeType glyph image formats and default raster interface
      *   (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -267,6 +267,10 @@ FT_BEGIN_HEADER
        *   *logical* one.  For example, if @FT_Pixel_Mode is set to
        *   `FT_PIXEL_MODE_LCD`, the logical width is a just a third of the
        *   physical one.
    +   *
    +   *   An empty bitmap with a NULL `buffer` is valid, with `rows` and/or
    +   *   `pitch` also set to 0.  Such bitmaps might be produced while rendering
    +   *   empty or degenerate outlines.
        */
       typedef struct  FT_Bitmap_
       {
    @@ -439,7 +443,7 @@ FT_BEGIN_HEADER
        *   rasterizer; see the `tags` field in @FT_Outline.
        *
        *   Please refer to the description of the 'SCANTYPE' instruction in the
    -   *   [OpenType specification](https://learn.microsoft.com/en-us/typography/opentype/spec/tt_instructions#scantype)
    +   *   [OpenType specification](https://learn.microsoft.com/typography/opentype/spec/tt_instructions#scantype)
        *   how simple drop-outs, smart drop-outs, and stubs are defined.
        */
     #define FT_OUTLINE_NONE             0x0
    @@ -871,7 +875,7 @@ FT_BEGIN_HEADER
        */
       typedef struct  FT_Span_
       {
    -    short           x;
    +    unsigned short  x;
         unsigned short  len;
         unsigned char   coverage;
     
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/ftincrem.h b/src/java.desktop/share/native/libfreetype/include/freetype/ftincrem.h
    index 816581b78eb..2233044754e 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/ftincrem.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/ftincrem.h
    @@ -4,7 +4,7 @@
      *
      *   FreeType incremental loading (specification).
      *
    - * Copyright (C) 2002-2024 by
    + * Copyright (C) 2002-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/ftlcdfil.h b/src/java.desktop/share/native/libfreetype/include/freetype/ftlcdfil.h
    index 25274dc4ac2..37bb5e1b8fb 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/ftlcdfil.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/ftlcdfil.h
    @@ -5,7 +5,7 @@
      *   FreeType API for color filtering of subpixel bitmap glyphs
      *   (specification).
      *
    - * Copyright (C) 2006-2024 by
    + * Copyright (C) 2006-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -145,16 +145,10 @@ FT_BEGIN_HEADER
        *
        *   FT_LCD_FILTER_LEGACY ::
        *   FT_LCD_FILTER_LEGACY1 ::
    -   *     This filter corresponds to the original libXft color filter.  It
    -   *     provides high contrast output but can exhibit really bad color
    -   *     fringes if glyphs are not extremely well hinted to the pixel grid.
    -   *     This filter is only provided for comparison purposes, and might be
    -   *     disabled or stay unsupported in the future. The second value is
    -   *     provided for compatibility with FontConfig, which historically used
    -   *     different enumeration, sometimes incorrectly forwarded to FreeType.
    +   *     The legacy libXft color filter is no longer supported and ignored.
        *
        * @since:
    -   *   2.3.0 (`FT_LCD_FILTER_LEGACY1` since 2.6.2)
    +   *   2.3.0
        */
       typedef enum  FT_LcdFilter_
       {
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/ftlist.h b/src/java.desktop/share/native/libfreetype/include/freetype/ftlist.h
    index 972fbfa2fe4..14958b0ff37 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/ftlist.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/ftlist.h
    @@ -4,7 +4,7 @@
      *
      *   Generic list support for FreeType (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/ftlogging.h b/src/java.desktop/share/native/libfreetype/include/freetype/ftlogging.h
    index 1813cfc2c27..d155171136c 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/ftlogging.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/ftlogging.h
    @@ -4,7 +4,7 @@
      *
      *   Additional debugging APIs.
      *
    - * Copyright (C) 2020-2024 by
    + * Copyright (C) 2020-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/ftmac.h b/src/java.desktop/share/native/libfreetype/include/freetype/ftmac.h
    index e4efde33dd8..c5ac49101a4 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/ftmac.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/ftmac.h
    @@ -4,7 +4,7 @@
      *
      *   Additional Mac-specific API.
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * Just van Rossum, David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/ftmm.h b/src/java.desktop/share/native/libfreetype/include/freetype/ftmm.h
    index 35ed039c89b..ff0bbab59f9 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/ftmm.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/ftmm.h
    @@ -2,9 +2,9 @@
      *
      * ftmm.h
      *
    - *   FreeType Multiple Master font interface (specification).
    + *   FreeType variation font interface (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -37,24 +37,79 @@ FT_BEGIN_HEADER
        *   multiple_masters
        *
        * @title:
    -   *   Multiple Masters
    +   *   OpenType Font Variations, TrueType GX, and Adobe MM Fonts
        *
        * @abstract:
    -   *   How to manage Multiple Masters fonts.
    +   *   How to manage variable fonts with multiple design axes.
        *
        * @description:
    -   *   The following types and functions are used to manage Multiple Master
    -   *   fonts, i.e., the selection of specific design instances by setting
    -   *   design axis coordinates.
    +   *   The following types and functions manage OpenType Font Variations,
    +   *   Adobe Multiple Master (MM) fonts, and Apple TrueType GX fonts.  These
    +   *   formats have in common that they allow the selection of specific
    +   *   design instances by setting design coordinates for one or more axes
    +   *   like font weight or width.
        *
    -   *   Besides Adobe MM fonts, the interface supports Apple's TrueType GX and
    -   *   OpenType variation fonts.  Some of the routines only work with Adobe
    -   *   MM fonts, others will work with all three types.  They are similar
    -   *   enough that a consistent interface makes sense.
    +   *   For historical reasons there are two interfaces.  The first, older one
    +   *   can be used with Adobe MM fonts only, and the second, newer one is a
    +   *   unified interface that handles all three font formats.  However, some
    +   *   differences remain and are documented accordingly; in particular,
    +   *   Adobe MM fonts don't have named instances (see below).
        *
    -   *   For Adobe MM fonts, macro @FT_IS_SFNT returns false.  For GX and
    -   *   OpenType variation fonts, it returns true.
    +   *   For Adobe MM fonts, macro @FT_IS_SFNT returns false.  For TrueType GX
    +   *   and OpenType Font Variations, it returns true.
        *
    +   *   We use mostly the terminology of the OpenType standard.  Here are some
    +   *   important technical terms.
    +   *
    +   *   * A 'named instance' is a tuple of design coordinates that has a
    +   *     string ID (i.e., an index into the font's 'name' table) associated
    +   *     with it.  The font can tell the user that, for example,
    +   *     [Weight=700,Width=110] is 'Bold'.  Another name for 'named instance'
    +   *     is 'named style'.
    +   *
    +   *       Adobe MM fonts don't have named instances.
    +   *
    +   *   * The 'default instance' of a variation font is that instance for
    +   *     which the nth axis coordinate is equal to the nth default axis
    +   *     coordinate (i.e., `axis[n].def` as specified in the @FT_MM_Var
    +   *     structure), with~n covering all axes.  In TrueType GX and OpenType
    +   *     Font Variations, the default instance is explicitly given.  In Adobe
    +   *     MM fonts, the `WeightVector` entry as found in the font file is
    +   *     taken as the default instance.
    +   *
    +   *       For TrueType GX and OpenType Font Variations, FreeType synthesizes
    +   *       a named instance for the default instance if the font does not
    +   *       contain such an entry.
    +   *
    +   *   * 'Design coordinates' are the axis values found in a variation font
    +   *      file.  Their meaning is specified by the font designer and the
    +   *      values are rather arbitrary.
    +   *
    +   *       For example, the 'weight' axis in design coordinates might vary
    +   *       between 100 (thin) and 900 (heavy) in font~A, while font~B
    +   *       contains values between 400 (normal) and 800 (extra bold).
    +   *
    +   *   * 'Normalized coordinates' are design coordinates mapped to a standard
    +   *     range; they are also called 'blend coordinates'.
    +   *
    +   *       For TrueType GX and OpenType Font Variations, the range is [-1;1],
    +   *       with the minimum mapped to value~-1, the default mapped to
    +   *       value~0, and the maximum mapped to value~1, and all other
    +   *       coordinates mapped to intervening points.  Please look up the
    +   *       [OpenType
    +   *       specification](https://learn.microsoft.com/en-us/typography/opentype/spec/otvaroverview)
    +   *       on how this mapping works in detail.
    +   *
    +   *       For Adobe MM fonts, this standard range is [0;1], with the minimum
    +   *       mapped to value~0 and the maximum mapped to value~1, and all other
    +   *       coordinates mapped to intervening points.  Please look up [Adobe
    +   *       TechNote
    +   *       #5015](https://adobe-type-tools.github.io/font-tech-notes/pdfs/5015.Type1_Supp.pdf)
    +   *       on how this mapping works in detail.
    +   *
    +   *       Assuming that the two fonts in the previous example are OpenType
    +   *       Font Variations, both font~A's [100;900] and font~B's [400;800]
    +   *       coordinate ranges get mapped to [-1;1].
        */
     
     
    @@ -64,14 +119,14 @@ FT_BEGIN_HEADER
        *   T1_MAX_MM_XXX
        *
        * @description:
    -   *   Multiple Masters limits as defined in their specifications.
    +   *   Adobe MM font limits as defined in their specifications.
        *
        * @values:
        *   T1_MAX_MM_AXIS ::
    -   *     The maximum number of Multiple Masters axes.
    +   *     The maximum number of Adobe MM font axes.
        *
        *   T1_MAX_MM_DESIGNS ::
    -   *     The maximum number of Multiple Masters designs.
    +   *     The maximum number of Adobe MM font designs.
        *
        *   T1_MAX_MM_MAP_POINTS ::
        *     The maximum number of elements in a design map.
    @@ -88,11 +143,10 @@ FT_BEGIN_HEADER
        *   FT_MM_Axis
        *
        * @description:
    -   *   A structure to model a given axis in design space for Multiple Masters
    -   *   fonts.
    +   *   A structure to model a given axis in design space for Adobe MM fonts.
        *
    -   *   This structure can't be used for TrueType GX or OpenType variation
    -   *   fonts.
    +   *   This structure can't be used with TrueType GX or OpenType Font
    +   *   Variations.
        *
        * @fields:
        *   name ::
    @@ -119,17 +173,17 @@ FT_BEGIN_HEADER
        *   FT_Multi_Master
        *
        * @description:
    -   *   A structure to model the axes and space of a Multiple Masters font.
    +   *   A structure to model the axes and space of an Adobe MM font.
        *
    -   *   This structure can't be used for TrueType GX or OpenType variation
    -   *   fonts.
    +   *   This structure can't be used with TrueType GX or OpenType Font
    +   *   Variations.
        *
        * @fields:
        *   num_axis ::
        *     Number of axes.  Cannot exceed~4.
        *
        *   num_designs ::
    -   *     Number of designs; should be normally 2^num_axis even though the
    +   *     Number of designs; should be normally `2^num_axis` even though the
        *     Type~1 specification strangely allows for intermediate designs to be
        *     present.  This number cannot exceed~16.
        *
    @@ -151,13 +205,13 @@ FT_BEGIN_HEADER
        *   FT_Var_Axis
        *
        * @description:
    -   *   A structure to model a given axis in design space for Multiple
    -   *   Masters, TrueType GX, and OpenType variation fonts.
    +   *   A structure to model a given axis in design space for Adobe MM fonts,
    +   *   TrueType GX, and OpenType Font Variations.
        *
        * @fields:
        *   name ::
        *     The axis's name.  Not always meaningful for TrueType GX or OpenType
    -   *     variation fonts.
    +   *     Font Variations.
        *
        *   minimum ::
        *     The axis's minimum design coordinate.
    @@ -171,17 +225,17 @@ FT_BEGIN_HEADER
        *
        *   tag ::
        *     The axis's tag (the equivalent to 'name' for TrueType GX and
    -   *     OpenType variation fonts).  FreeType provides default values for
    +   *     OpenType Font Variations).  FreeType provides default values for
        *     Adobe MM fonts if possible.
        *
        *   strid ::
        *     The axis name entry in the font's 'name' table.  This is another
        *     (and often better) version of the 'name' field for TrueType GX or
    -   *     OpenType variation fonts.  Not meaningful for Adobe MM fonts.
    +   *     OpenType Font Variations.  Not meaningful for Adobe MM fonts.
        *
        * @note:
        *   The fields `minimum`, `def`, and `maximum` are 16.16 fractional values
    -   *   for TrueType GX and OpenType variation fonts.  For Adobe MM fonts, the
    +   *   for TrueType GX and OpenType Font Variations.  For Adobe MM fonts, the
        *   values are whole numbers (i.e., the fractional part is zero).
        */
       typedef struct  FT_Var_Axis_
    @@ -205,7 +259,7 @@ FT_BEGIN_HEADER
        *
        * @description:
        *   A structure to model a named instance in a TrueType GX or OpenType
    -   *   variation font.
    +   *   Font Variations.
        *
        *   This structure can't be used for Adobe MM fonts.
        *
    @@ -215,11 +269,11 @@ FT_BEGIN_HEADER
        *     entry for each axis.
        *
        *   strid ::
    -   *     The entry in 'name' table identifying this instance.
    +   *     An index into the 'name' table identifying this instance.
        *
        *   psid ::
    -   *     The entry in 'name' table identifying a PostScript name for this
    -   *     instance.  Value 0xFFFF indicates a missing entry.
    +   *     An index into the 'name' table identifying a PostScript name for
    +   *     this instance.  Value 0xFFFF indicates a missing entry.
        */
       typedef struct  FT_Var_Named_Style_
       {
    @@ -236,39 +290,33 @@ FT_BEGIN_HEADER
        *   FT_MM_Var
        *
        * @description:
    -   *   A structure to model the axes and space of an Adobe MM, TrueType GX,
    -   *   or OpenType variation font.
    +   *   A structure to model the axes and space of Adobe MM fonts, TrueType
    +   *   GX, or OpenType Font Variations.
        *
        *   Some fields are specific to one format and not to the others.
        *
        * @fields:
        *   num_axis ::
        *     The number of axes.  The maximum value is~4 for Adobe MM fonts; no
    -   *     limit in TrueType GX or OpenType variation fonts.
    +   *     limit in TrueType GX or OpenType Font Variations.
        *
        *   num_designs ::
    -   *     The number of designs; should be normally 2^num_axis for Adobe MM
    -   *     fonts.  Not meaningful for TrueType GX or OpenType variation fonts
    +   *     The number of designs; should be normally `2^num_axis` for Adobe MM
    +   *     fonts.  Not meaningful for TrueType GX or OpenType Font Variations
        *     (where every glyph could have a different number of designs).
        *
        *   num_namedstyles ::
    -   *     The number of named styles; a 'named style' is a tuple of design
    -   *     coordinates that has a string ID (in the 'name' table) associated
    -   *     with it.  The font can tell the user that, for example,
    -   *     [Weight=1.5,Width=1.1] is 'Bold'.  Another name for 'named style' is
    -   *     'named instance'.
    -   *
    -   *     For Adobe Multiple Masters fonts, this value is always zero because
    -   *     the format does not support named styles.
    +   *     The number of named instances.  For Adobe MM fonts, this value is
    +   *     always zero.
        *
        *   axis ::
    -   *     An axis descriptor table.  TrueType GX and OpenType variation fonts
    +   *     An axis descriptor table.  TrueType GX and OpenType Font Variations
        *     contain slightly more data than Adobe MM fonts.  Memory management
        *     of this pointer is done internally by FreeType.
        *
        *   namedstyle ::
    -   *     A named style (instance) table.  Only meaningful for TrueType GX and
    -   *     OpenType variation fonts.  Memory management of this pointer is done
    +   *     An array of named instances.  Only meaningful for TrueType GX and
    +   *     OpenType Font Variations.  Memory management of this pointer is done
        *     internally by FreeType.
        */
       typedef struct  FT_MM_Var_
    @@ -290,8 +338,8 @@ FT_BEGIN_HEADER
        * @description:
        *   Retrieve a variation descriptor of a given Adobe MM font.
        *
    -   *   This function can't be used with TrueType GX or OpenType variation
    -   *   fonts.
    +   *   This function can't be used with TrueType GX or OpenType Font
    +   *   Variations.
        *
        * @input:
        *   face ::
    @@ -299,7 +347,7 @@ FT_BEGIN_HEADER
        *
        * @output:
        *   amaster ::
    -   *     The Multiple Masters descriptor.
    +   *     The Adobe MM font's variation descriptor.
        *
        * @return:
        *   FreeType error code.  0~means success.
    @@ -366,8 +414,8 @@ FT_BEGIN_HEADER
        *   For Adobe MM fonts, choose an interpolated font design through design
        *   coordinates.
        *
    -   *   This function can't be used with TrueType GX or OpenType variation
    -   *   fonts.
    +   *   This function can't be used with TrueType GX or OpenType Font
    +   *   Variations.
        *
        * @inout:
        *   face ::
    @@ -391,8 +439,8 @@ FT_BEGIN_HEADER
        *
        *   [Since 2.9] If `num_coords` is larger than zero, this function sets
        *   the @FT_FACE_FLAG_VARIATION bit in @FT_Face's `face_flags` field
    -   *   (i.e., @FT_IS_VARIATION will return true).  If `num_coords` is zero,
    -   *   this bit flag gets unset.
    +   *   (i.e., @FT_IS_VARIATION returns true).  If `num_coords` is zero, this
    +   *   bit flag gets unset.
        */
       FT_EXPORT( FT_Error )
       FT_Set_MM_Design_Coordinates( FT_Face   face,
    @@ -428,7 +476,7 @@ FT_BEGIN_HEADER
        *
        * @note:
        *   The design coordinates are 16.16 fractional values for TrueType GX and
    -   *   OpenType variation fonts.  For Adobe MM fonts, the values are supposed
    +   *   OpenType Font Variations.  For Adobe MM fonts, the values are supposed
        *   to be whole numbers (i.e., the fractional part is zero).
        *
        *   [Since 2.8.1] To reset all axes to the default values, call the
    @@ -438,8 +486,14 @@ FT_BEGIN_HEADER
        *
        *   [Since 2.9] If `num_coords` is larger than zero, this function sets
        *   the @FT_FACE_FLAG_VARIATION bit in @FT_Face's `face_flags` field
    -   *   (i.e., @FT_IS_VARIATION will return true).  If `num_coords` is zero,
    -   *   this bit flag gets unset.
    +   *   (i.e., @FT_IS_VARIATION returns true).  If `num_coords` is zero, this
    +   *   bit flag gets unset.
    +   *
    +   *   [Since 2.14] This function also sets the @FT_FACE_FLAG_VARIATION bit
    +   *   in @FT_Face's `face_flags` field (i.e., @FT_IS_VARIATION returns
    +   *   true) if any of the provided coordinates is different from the face's
    +   *   default value for the corresponding axis, that is, the set up face is
    +   *   not at its default position.
        */
       FT_EXPORT( FT_Error )
       FT_Set_Var_Design_Coordinates( FT_Face    face,
    @@ -468,14 +522,14 @@ FT_BEGIN_HEADER
        *
        * @output:
        *   coords ::
    -   *     The design coordinates array.
    +   *     The design coordinates array, which must be allocated by the user.
        *
        * @return:
        *   FreeType error code.  0~means success.
        *
        * @note:
        *   The design coordinates are 16.16 fractional values for TrueType GX and
    -   *   OpenType variation fonts.  For Adobe MM fonts, the values are whole
    +   *   OpenType Font Variations.  For Adobe MM fonts, the values are whole
        *   numbers (i.e., the fractional part is zero).
        *
        * @since:
    @@ -493,8 +547,7 @@ FT_BEGIN_HEADER
        *   FT_Set_MM_Blend_Coordinates
        *
        * @description:
    -   *   Choose an interpolated font design through normalized blend
    -   *   coordinates.
    +   *   Choose an interpolated font design through normalized coordinates.
        *
        *   This function works with all supported variation formats.
        *
    @@ -509,9 +562,10 @@ FT_BEGIN_HEADER
        *     the number of axes, use default values for the remaining axes.
        *
        *   coords ::
    -   *     The design coordinates array.  Each element is a 16.16 fractional
    -   *     value and must be between 0 and 1.0 for Adobe MM fonts, and between
    -   *     -1.0 and 1.0 for TrueType GX and OpenType variation fonts.
    +   *     The normalized coordinates array.  Each element is a 16.16
    +   *     fractional value and must be between 0 and 1.0 for Adobe MM fonts,
    +   *     and between -1.0 and 1.0 for TrueType GX and OpenType Font
    +   *     Variations.
        *
        * @return:
        *   FreeType error code.  0~means success.
    @@ -524,8 +578,14 @@ FT_BEGIN_HEADER
        *
        *   [Since 2.9] If `num_coords` is larger than zero, this function sets
        *   the @FT_FACE_FLAG_VARIATION bit in @FT_Face's `face_flags` field
    -   *   (i.e., @FT_IS_VARIATION will return true).  If `num_coords` is zero,
    -   *   this bit flag gets unset.
    +   *   (i.e., @FT_IS_VARIATION returns true).  If `num_coords` is zero, this
    +   *   bit flag gets unset.
    +   *
    +   *   [Since 2.14] This function also sets the @FT_FACE_FLAG_VARIATION bit
    +   *   in @FT_Face's `face_flags` field (i.e., @FT_IS_VARIATION returns
    +   *   true) if any of the provided coordinates is different from the face's
    +   *   default value for the corresponding axis, that is, the set up face is
    +   *   not at its default position.
        */
       FT_EXPORT( FT_Error )
       FT_Set_MM_Blend_Coordinates( FT_Face    face,
    @@ -539,8 +599,8 @@ FT_BEGIN_HEADER
        *   FT_Get_MM_Blend_Coordinates
        *
        * @description:
    -   *   Get the normalized blend coordinates of the currently selected
    -   *   interpolated font.
    +   *   Get the normalized coordinates of the currently selected interpolated
    +   *   font.
        *
        *   This function works with all supported variation formats.
        *
    @@ -549,14 +609,14 @@ FT_BEGIN_HEADER
        *     A handle to the source face.
        *
        *   num_coords ::
    -   *     The number of normalized blend coordinates to retrieve.  If it is
    -   *     larger than the number of axes, set the excess values to~0.5 for
    -   *     Adobe MM fonts, and to~0 for TrueType GX and OpenType variation
    -   *     fonts.
    +   *     The number of normalized coordinates to retrieve.  If it is larger
    +   *     than the number of axes, set the excess values to~0.5 for Adobe MM
    +   *     fonts, and to~0 for TrueType GX and OpenType Font Variations.
        *
        * @output:
        *   coords ::
    -   *     The normalized blend coordinates array (as 16.16 fractional values).
    +   *     The normalized coordinates array (as 16.16 fractional values), which
    +   *     must be allocated by the user.
        *
        * @return:
        *   FreeType error code.  0~means success.
    @@ -610,8 +670,8 @@ FT_BEGIN_HEADER
        *   For Adobe MM fonts, choose an interpolated font design by directly
        *   setting the weight vector.
        *
    -   *   This function can't be used with TrueType GX or OpenType variation
    -   *   fonts.
    +   *   This function can't be used with TrueType GX or OpenType Font
    +   *   Variations.
        *
        * @inout:
        *   face ::
    @@ -630,16 +690,16 @@ FT_BEGIN_HEADER
        *   FreeType error code.  0~means success.
        *
        * @note:
    -   *   Adobe Multiple Master fonts limit the number of designs, and thus the
    -   *   length of the weight vector to 16~elements.
    +   *   Adobe MM fonts limit the number of designs, and thus the length of the
    +   *   weight vector, to 16~elements.
        *
        *   If `len` is larger than zero, this function sets the
        *   @FT_FACE_FLAG_VARIATION bit in @FT_Face's `face_flags` field (i.e.,
    -   *   @FT_IS_VARIATION will return true).  If `len` is zero, this bit flag
    -   *   is unset and the weight vector array is reset to the default values.
    +   *   @FT_IS_VARIATION returns true).  If `len` is zero, this bit flag is
    +   *   unset and the weight vector array is reset to the default values.
        *
        *   The Adobe documentation also states that the values in the
    -   *   WeightVector array must total 1.0 +/-~0.001.  In practice this does
    +   *   `WeightVector` array must total 1.0 +/-~0.001.  In practice this does
        *   not seem to be enforced, so is not enforced here, either.
        *
        * @since:
    @@ -659,8 +719,8 @@ FT_BEGIN_HEADER
        * @description:
        *   For Adobe MM fonts, retrieve the current weight vector of the font.
        *
    -   *   This function can't be used with TrueType GX or OpenType variation
    -   *   fonts.
    +   *   This function can't be used with TrueType GX or OpenType Font
    +   *   Variations.
        *
        * @inout:
        *   face ::
    @@ -677,14 +737,14 @@ FT_BEGIN_HEADER
        *
        * @output:
        *   weightvector ::
    -   *     An array to be filled.
    +   *     An array to be filled; it must be allocated by the user.
        *
        * @return:
        *   FreeType error code.  0~means success.
        *
        * @note:
    -   *   Adobe Multiple Master fonts limit the number of designs, and thus the
    -   *   length of the WeightVector to~16.
    +   *   Adobe MM fonts limit the number of designs, and thus the length of the
    +   *   weight vector, to~16 elements.
        *
        * @since:
        *   2.10
    @@ -760,8 +820,8 @@ FT_BEGIN_HEADER
        *     A handle to the source face.
        *
        *   instance_index ::
    -   *     The index of the requested instance, starting with value 1.  If set
    -   *     to value 0, FreeType switches to font access without a named
    +   *     The index of the requested instance, starting with value~1.  If set
    +   *     to value~0, FreeType switches to font access without a named
        *     instance.
        *
        * @return:
    @@ -771,11 +831,11 @@ FT_BEGIN_HEADER
        *   The function uses the value of `instance_index` to set bits 16-30 of
        *   the face's `face_index` field.  It also resets any variation applied
        *   to the font, and the @FT_FACE_FLAG_VARIATION bit of the face's
    -   *   `face_flags` field gets reset to zero (i.e., @FT_IS_VARIATION will
    -   *   return false).
    +   *   `face_flags` field gets reset to zero (i.e., @FT_IS_VARIATION returns
    +   *   false).
        *
    -   *   For Adobe MM fonts (which don't have named instances) this function
    -   *   simply resets the current face to the default instance.
    +   *   For Adobe MM fonts, this function resets the current face to the
    +   *   default instance.
        *
        * @since:
        *   2.9
    @@ -794,10 +854,6 @@ FT_BEGIN_HEADER
        *   Retrieve the index of the default named instance, to be used with
        *   @FT_Set_Named_Instance.
        *
    -   *   The default instance of a variation font is that instance for which
    -   *   the nth axis coordinate is equal to `axis[n].def` (as specified in the
    -   *   @FT_MM_Var structure), with~n covering all axes.
    -   *
        *   FreeType synthesizes a named instance for the default instance if the
        *   font does not contain such an entry.
        *
    @@ -813,8 +869,8 @@ FT_BEGIN_HEADER
        *   FreeType error code.  0~means success.
        *
        * @note:
    -   *   For Adobe MM fonts (which don't have named instances) this function
    -   *   always returns zero for `instance_index`.
    +   *   For Adobe MM fonts, this function always returns zero for
    +   *   `instance_index`.
        *
        * @since:
        *   2.13.1
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/ftmodapi.h b/src/java.desktop/share/native/libfreetype/include/freetype/ftmodapi.h
    index 0ee715898f7..2669e4a03b3 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/ftmodapi.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/ftmodapi.h
    @@ -4,7 +4,7 @@
      *
      *   FreeType modules public interface (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/ftmoderr.h b/src/java.desktop/share/native/libfreetype/include/freetype/ftmoderr.h
    index 6722fbf8b70..8e2ef2f01f8 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/ftmoderr.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/ftmoderr.h
    @@ -4,7 +4,7 @@
      *
      *   FreeType module error offsets (specification).
      *
    - * Copyright (C) 2001-2024 by
    + * Copyright (C) 2001-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/ftoutln.h b/src/java.desktop/share/native/libfreetype/include/freetype/ftoutln.h
    index 44e94b4f5bb..2545ca8486b 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/ftoutln.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/ftoutln.h
    @@ -5,7 +5,7 @@
      *   Support for the FT_Outline type used to store glyph shapes of
      *   most scalable font formats (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/ftparams.h b/src/java.desktop/share/native/libfreetype/include/freetype/ftparams.h
    index 43bf69c202f..94dcd6399a6 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/ftparams.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/ftparams.h
    @@ -4,7 +4,7 @@
      *
      *   FreeType API for possible FT_Parameter tags (specification only).
      *
    - * Copyright (C) 2017-2024 by
    + * Copyright (C) 2017-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -133,11 +133,8 @@ FT_BEGIN_HEADER
        *   FT_PARAM_TAG_LCD_FILTER_WEIGHTS
        *
        * @description:
    -   *   An @FT_Parameter tag to be used with @FT_Face_Properties.  The
    -   *   corresponding argument specifies the five LCD filter weights for a
    -   *   given face (if using @FT_LOAD_TARGET_LCD, for example), overriding the
    -   *   global default values or the values set up with
    -   *   @FT_Library_SetLcdFilterWeights.
    +   *   Overriding global LCD filter weights with custom values for a given
    +   *   face is no longer supported and ignored.
        *
        * @since:
        *   2.8
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/ftrender.h b/src/java.desktop/share/native/libfreetype/include/freetype/ftrender.h
    index dc5018a1b54..cc3102073b1 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/ftrender.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/ftrender.h
    @@ -4,7 +4,7 @@
      *
      *   FreeType renderer modules public interface (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/ftsizes.h b/src/java.desktop/share/native/libfreetype/include/freetype/ftsizes.h
    index 4ef5c7955df..fdb89f24ccc 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/ftsizes.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/ftsizes.h
    @@ -4,7 +4,7 @@
      *
      *   FreeType size objects management (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/ftsnames.h b/src/java.desktop/share/native/libfreetype/include/freetype/ftsnames.h
    index d5d5cd93103..99728574db6 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/ftsnames.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/ftsnames.h
    @@ -7,7 +7,7 @@
      *
      *   This is _not_ used to retrieve glyph names!
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/ftstroke.h b/src/java.desktop/share/native/libfreetype/include/freetype/ftstroke.h
    index 41626dc9d7b..2c4761c768d 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/ftstroke.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/ftstroke.h
    @@ -4,7 +4,7 @@
      *
      *   FreeType path stroker (specification).
      *
    - * Copyright (C) 2002-2024 by
    + * Copyright (C) 2002-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/ftsynth.h b/src/java.desktop/share/native/libfreetype/include/freetype/ftsynth.h
    index 43081b6c330..93499a4b4f1 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/ftsynth.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/ftsynth.h
    @@ -5,7 +5,7 @@
      *   FreeType synthesizing code for emboldening and slanting
      *   (specification).
      *
    - * Copyright (C) 2000-2024 by
    + * Copyright (C) 2000-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/ftsystem.h b/src/java.desktop/share/native/libfreetype/include/freetype/ftsystem.h
    index 1eacb3af398..1de9f8e603d 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/ftsystem.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/ftsystem.h
    @@ -4,7 +4,7 @@
      *
      *   FreeType low-level system interface definition (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/fttrigon.h b/src/java.desktop/share/native/libfreetype/include/freetype/fttrigon.h
    index a5299e938d4..ed7bd06a78f 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/fttrigon.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/fttrigon.h
    @@ -4,7 +4,7 @@
      *
      *   FreeType trigonometric functions (specification).
      *
    - * Copyright (C) 2001-2024 by
    + * Copyright (C) 2001-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/fttypes.h b/src/java.desktop/share/native/libfreetype/include/freetype/fttypes.h
    index 27815143a64..e207c5ebe09 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/fttypes.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/fttypes.h
    @@ -4,7 +4,7 @@
      *
      *   FreeType simple types definitions (specification only).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/autohint.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/autohint.h
    index 8865d53b389..987e704e9b0 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/autohint.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/autohint.h
    @@ -4,7 +4,7 @@
      *
      *   High-level 'autohint' module-specific interface (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/cffotypes.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/cffotypes.h
    index 36b0390a5a5..26ee43bb9a9 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/cffotypes.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/cffotypes.h
    @@ -4,7 +4,7 @@
      *
      *   Basic OpenType/CFF object type definitions (specification).
      *
    - * Copyright (C) 2017-2024 by
    + * Copyright (C) 2017-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/cfftypes.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/cfftypes.h
    index ef2e8e7569c..754122fa646 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/cfftypes.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/cfftypes.h
    @@ -5,7 +5,7 @@
      *   Basic OpenType/CFF type definitions and interface (specification
      *   only).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -191,8 +191,8 @@ FT_BEGIN_HEADER
         FT_UInt    weight;
         FT_Bool    is_fixed_pitch;
         FT_Fixed   italic_angle;
    -    FT_Fixed   underline_position;
    -    FT_Fixed   underline_thickness;
    +    FT_Short   underline_position;
    +    FT_UShort  underline_thickness;
         FT_Int     paint_type;
         FT_Int     charstring_type;
         FT_Matrix  font_matrix;
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/compiler-macros.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/compiler-macros.h
    index 876f66e2561..e6d0166d888 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/compiler-macros.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/compiler-macros.h
    @@ -4,7 +4,7 @@
      *
      *   Compiler-specific macro definitions used internally by FreeType.
      *
    - * Copyright (C) 2020-2024 by
    + * Copyright (C) 2020-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -128,8 +128,8 @@ FT_BEGIN_HEADER
        * before a function declaration.
        */
     
    -  /* Visual C, mingw */
    -#if defined( _WIN32 )
    +  /* Visual C, MinGW, Cygwin */
    +#if defined( _WIN32 ) || defined( __CYGWIN__ )
     #define FT_INTERNAL_FUNCTION_ATTRIBUTE  /* empty */
     
       /* gcc, clang */
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftcalc.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftcalc.h
    index 71128a2df90..16a732224ef 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftcalc.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftcalc.h
    @@ -4,7 +4,7 @@
      *
      *   Arithmetic computations (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -27,17 +27,87 @@
     FT_BEGIN_HEADER
     
     
    +  /*
    +   * The following macros have two purposes.
    +   *
    +   * - Tag places where overflow is expected and harmless.
    +   *
    +   * - Avoid run-time undefined behavior sanitizer errors.
    +   *
    +   * Use with care!
    +   */
    +#define ADD_INT( a, b )                           \
    +          (FT_Int)( (FT_UInt)(a) + (FT_UInt)(b) )
    +#define SUB_INT( a, b )                           \
    +          (FT_Int)( (FT_UInt)(a) - (FT_UInt)(b) )
    +#define MUL_INT( a, b )                           \
    +          (FT_Int)( (FT_UInt)(a) * (FT_UInt)(b) )
    +#define NEG_INT( a )                              \
    +          (FT_Int)( (FT_UInt)0 - (FT_UInt)(a) )
    +
    +#define ADD_LONG( a, b )                             \
    +          (FT_Long)( (FT_ULong)(a) + (FT_ULong)(b) )
    +#define SUB_LONG( a, b )                             \
    +          (FT_Long)( (FT_ULong)(a) - (FT_ULong)(b) )
    +#define MUL_LONG( a, b )                             \
    +          (FT_Long)( (FT_ULong)(a) * (FT_ULong)(b) )
    +#define NEG_LONG( a )                                \
    +          (FT_Long)( (FT_ULong)0 - (FT_ULong)(a) )
    +
    +#define ADD_INT32( a, b )                               \
    +          (FT_Int32)( (FT_UInt32)(a) + (FT_UInt32)(b) )
    +#define SUB_INT32( a, b )                               \
    +          (FT_Int32)( (FT_UInt32)(a) - (FT_UInt32)(b) )
    +#define MUL_INT32( a, b )                               \
    +          (FT_Int32)( (FT_UInt32)(a) * (FT_UInt32)(b) )
    +#define NEG_INT32( a )                                  \
    +          (FT_Int32)( (FT_UInt32)0 - (FT_UInt32)(a) )
    +
    +#ifdef FT_INT64
    +
    +#define ADD_INT64( a, b )                               \
    +          (FT_Int64)( (FT_UInt64)(a) + (FT_UInt64)(b) )
    +#define SUB_INT64( a, b )                               \
    +          (FT_Int64)( (FT_UInt64)(a) - (FT_UInt64)(b) )
    +#define MUL_INT64( a, b )                               \
    +          (FT_Int64)( (FT_UInt64)(a) * (FT_UInt64)(b) )
    +#define NEG_INT64( a )                                  \
    +          (FT_Int64)( (FT_UInt64)0 - (FT_UInt64)(a) )
    +
    +#endif /* FT_INT64 */
    +
    +
       /**************************************************************************
        *
        * FT_MulDiv() and FT_MulFix() are declared in freetype.h.
        *
        */
     
    -#ifndef  FT_CONFIG_OPTION_NO_ASSEMBLER
    -  /* Provide assembler fragments for performance-critical functions. */
    -  /* These must be defined `static __inline__' with GCC.             */
    +#ifdef FT_CONFIG_OPTION_INLINE_MULFIX
     
    -#if defined( __CC_ARM ) || defined( __ARMCC__ )  /* RVCT */
    +#ifdef FT_INT64
    +
    +  static inline FT_Long
    +  FT_MulFix_64( FT_Long  a,
    +                FT_Long  b )
    +  {
    +    FT_Int64  ab = MUL_INT64( a, b );
    +
    +
    +    ab = ADD_INT64( ab, 0x8000 + ( ab >> 63 ) );  /* rounding phase */
    +
    +    return (FT_Long)( ab >> 16 );
    +  }
    +
    +
    +#define FT_MulFix( a, b )  FT_MulFix_64( a, b )
    +
    +#elif !defined( FT_CONFIG_OPTION_NO_ASSEMBLER )
    +  /* Provide 32-bit assembler fragments for optimized FT_MulFix. */
    +  /* These must be defined `static __inline__' or similar.       */
    +
    +#if defined( __arm__ )                                 && \
    +    ( defined( __thumb2__ ) || !defined( __thumb__ ) )
     
     #define FT_MULFIX_ASSEMBLER  FT_MulFix_arm
     
    @@ -49,6 +119,7 @@ FT_BEGIN_HEADER
       {
         FT_Int32  t, t2;
     
    +#if defined( __CC_ARM ) || defined( __ARMCC__ )  /* RVCT */
     
         __asm
         {
    @@ -60,28 +131,8 @@ FT_BEGIN_HEADER
           mov   a,  t2, lsr #16         /* a   = t2 >> 16 */
           orr   a,  a,  t,  lsl #16     /* a  |= t << 16 */
         }
    -    return a;
    -  }
    -
    -#endif /* __CC_ARM || __ARMCC__ */
    -
    -
    -#ifdef __GNUC__
    -
    -#if defined( __arm__ )                                 && \
    -    ( !defined( __thumb__ ) || defined( __thumb2__ ) ) && \
    -    !( defined( __CC_ARM ) || defined( __ARMCC__ ) )
    -
    -#define FT_MULFIX_ASSEMBLER  FT_MulFix_arm
    -
    -  /* documentation is in freetype.h */
    -
    -  static __inline__ FT_Int32
    -  FT_MulFix_arm( FT_Int32  a,
    -                 FT_Int32  b )
    -  {
    -    FT_Int32  t, t2;
     
    +#elif defined( __GNUC__ )
     
         __asm__ __volatile__ (
           "smull  %1, %2, %4, %3\n\t"       /* (lo=%1,hi=%2) = a*b */
    @@ -98,26 +149,25 @@ FT_BEGIN_HEADER
           : "=r"(a), "=&r"(t2), "=&r"(t)
           : "r"(a), "r"(b)
           : "cc" );
    +
    +#endif
    +
         return a;
       }
     
    -#endif /* __arm__                      && */
    -       /* ( __thumb2__ || !__thumb__ ) && */
    -       /* !( __CC_ARM || __ARMCC__ )      */
    -
    -
    -#if defined( __i386__ )
    +#elif defined( __i386__ ) || defined( _M_IX86 )
     
     #define FT_MULFIX_ASSEMBLER  FT_MulFix_i386
     
       /* documentation is in freetype.h */
     
    -  static __inline__ FT_Int32
    +  static __inline FT_Int32
       FT_MulFix_i386( FT_Int32  a,
                       FT_Int32  b )
       {
         FT_Int32  result;
     
    +#if defined( __GNUC__ )
     
         __asm__ __volatile__ (
           "imul  %%edx\n"
    @@ -132,27 +182,8 @@ FT_BEGIN_HEADER
           : "=a"(result), "=d"(b)
           : "a"(a), "d"(b)
           : "%ecx", "cc" );
    -    return result;
    -  }
     
    -#endif /* i386 */
    -
    -#endif /* __GNUC__ */
    -
    -
    -#ifdef _MSC_VER /* Visual C++ */
    -
    -#ifdef _M_IX86
    -
    -#define FT_MULFIX_ASSEMBLER  FT_MulFix_i386
    -
    -  /* documentation is in freetype.h */
    -
    -  static __inline FT_Int32
    -  FT_MulFix_i386( FT_Int32  a,
    -                  FT_Int32  b )
    -  {
    -    FT_Int32  result;
    +#elif defined( _MSC_VER )
     
         __asm
         {
    @@ -169,81 +200,21 @@ FT_BEGIN_HEADER
           add eax, edx
           mov result, eax
         }
    +
    +#endif
    +
         return result;
       }
     
    -#endif /* _M_IX86 */
    +#endif /* __i386__ || _M_IX86 */
     
    -#endif /* _MSC_VER */
    -
    -
    -#if defined( __GNUC__ ) && defined( __x86_64__ )
    -
    -#define FT_MULFIX_ASSEMBLER  FT_MulFix_x86_64
    -
    -  static __inline__ FT_Int32
    -  FT_MulFix_x86_64( FT_Int32  a,
    -                    FT_Int32  b )
    -  {
    -    /* Temporarily disable the warning that C90 doesn't support */
    -    /* `long long'.                                             */
    -#if __GNUC__ > 4 || ( __GNUC__ == 4 && __GNUC_MINOR__ >= 6 )
    -#pragma GCC diagnostic push
    -#pragma GCC diagnostic ignored "-Wlong-long"
    -#endif
    -
    -#if 1
    -    /* Technically not an assembly fragment, but GCC does a really good */
    -    /* job at inlining it and generating good machine code for it.      */
    -    long long  ret, tmp;
    -
    -
    -    ret  = (long long)a * b;
    -    tmp  = ret >> 63;
    -    ret += 0x8000 + tmp;
    -
    -    return (FT_Int32)( ret >> 16 );
    -#else
    -
    -    /* For some reason, GCC 4.6 on Ubuntu 12.04 generates invalid machine  */
    -    /* code from the lines below.  The main issue is that `wide_a' is not  */
    -    /* properly initialized by sign-extending `a'.  Instead, the generated */
    -    /* machine code assumes that the register that contains `a' on input   */
    -    /* can be used directly as a 64-bit value, which is wrong most of the  */
    -    /* time.                                                               */
    -    long long  wide_a = (long long)a;
    -    long long  wide_b = (long long)b;
    -    long long  result;
    -
    -
    -    __asm__ __volatile__ (
    -      "imul %2, %1\n"
    -      "mov %1, %0\n"
    -      "sar $63, %0\n"
    -      "lea 0x8000(%1, %0), %0\n"
    -      "sar $16, %0\n"
    -      : "=&r"(result), "=&r"(wide_a)
    -      : "r"(wide_b)
    -      : "cc" );
    -
    -    return (FT_Int32)result;
    -#endif
    -
    -#if __GNUC__ > 4 || ( __GNUC__ == 4 && __GNUC_MINOR__ >= 6 )
    -#pragma GCC diagnostic pop
    -#endif
    -  }
    -
    -#endif /* __GNUC__ && __x86_64__ */
    -
    -#endif /* !FT_CONFIG_OPTION_NO_ASSEMBLER */
    -
    -
    -#ifdef FT_CONFIG_OPTION_INLINE_MULFIX
     #ifdef FT_MULFIX_ASSEMBLER
     #define FT_MulFix( a, b )  FT_MULFIX_ASSEMBLER( (FT_Int32)(a), (FT_Int32)(b) )
     #endif
    -#endif
    +
    +#endif /* !FT_CONFIG_OPTION_NO_ASSEMBLER */
    +
    +#endif /* FT_CONFIG_OPTION_INLINE_MULFIX */
     
     
       /**************************************************************************
    @@ -278,40 +249,6 @@ FT_BEGIN_HEADER
                           FT_Long  c );
     
     
    -  /**************************************************************************
    -   *
    -   * @function:
    -   *   FT_MulAddFix
    -   *
    -   * @description:
    -   *   Compute `(s[0] * f[0] + s[1] * f[1] + ...) / 0x10000`, where `s[n]` is
    -   *   usually a 16.16 scalar.
    -   *
    -   * @input:
    -   *   s ::
    -   *     The array of scalars.
    -   *   f ::
    -   *     The array of factors.
    -   *   count ::
    -   *     The number of entries in the array.
    -   *
    -   * @return:
    -   *   The result of `(s[0] * f[0] + s[1] * f[1] + ...) / 0x10000`.
    -   *
    -   * @note:
    -   *   This function is currently used for the scaled delta computation of
    -   *   variation stores.  It internally uses 64-bit data types when
    -   *   available, otherwise it emulates 64-bit math by using 32-bit
    -   *   operations, which produce a correct result but most likely at a slower
    -   *   performance in comparison to the implementation base on `int64_t`.
    -   *
    -   */
    -  FT_BASE( FT_Int32 )
    -  FT_MulAddFix( FT_Fixed*  s,
    -                FT_Int32*  f,
    -                FT_UInt    count );
    -
    -
       /*
        * A variant of FT_Matrix_Multiply which scales its result afterwards.  The
        * idea is that both `a' and `b' are scaled by factors of 10 so that the
    @@ -455,6 +392,10 @@ FT_BEGIN_HEADER
     
     #define FT_MSB( x )  FT_MSB_i386( x )
     
    +#elif defined( __CC_ARM )
    +
    +#define FT_MSB( x )  ( 31 - __clz( x ) )
    +
     #elif defined( __SunOS_5_11 )
     
     #include 
    @@ -526,55 +467,6 @@ FT_BEGIN_HEADER
     
     #define ROUND_F26DOT6( x )     ( ( (x) + 32 - ( x < 0 ) ) & -64 )
     
    -  /*
    -   * The following macros have two purposes.
    -   *
    -   * - Tag places where overflow is expected and harmless.
    -   *
    -   * - Avoid run-time sanitizer errors.
    -   *
    -   * Use with care!
    -   */
    -#define ADD_INT( a, b )                           \
    -          (FT_Int)( (FT_UInt)(a) + (FT_UInt)(b) )
    -#define SUB_INT( a, b )                           \
    -          (FT_Int)( (FT_UInt)(a) - (FT_UInt)(b) )
    -#define MUL_INT( a, b )                           \
    -          (FT_Int)( (FT_UInt)(a) * (FT_UInt)(b) )
    -#define NEG_INT( a )                              \
    -          (FT_Int)( (FT_UInt)0 - (FT_UInt)(a) )
    -
    -#define ADD_LONG( a, b )                             \
    -          (FT_Long)( (FT_ULong)(a) + (FT_ULong)(b) )
    -#define SUB_LONG( a, b )                             \
    -          (FT_Long)( (FT_ULong)(a) - (FT_ULong)(b) )
    -#define MUL_LONG( a, b )                             \
    -          (FT_Long)( (FT_ULong)(a) * (FT_ULong)(b) )
    -#define NEG_LONG( a )                                \
    -          (FT_Long)( (FT_ULong)0 - (FT_ULong)(a) )
    -
    -#define ADD_INT32( a, b )                               \
    -          (FT_Int32)( (FT_UInt32)(a) + (FT_UInt32)(b) )
    -#define SUB_INT32( a, b )                               \
    -          (FT_Int32)( (FT_UInt32)(a) - (FT_UInt32)(b) )
    -#define MUL_INT32( a, b )                               \
    -          (FT_Int32)( (FT_UInt32)(a) * (FT_UInt32)(b) )
    -#define NEG_INT32( a )                                  \
    -          (FT_Int32)( (FT_UInt32)0 - (FT_UInt32)(a) )
    -
    -#ifdef FT_INT64
    -
    -#define ADD_INT64( a, b )                               \
    -          (FT_Int64)( (FT_UInt64)(a) + (FT_UInt64)(b) )
    -#define SUB_INT64( a, b )                               \
    -          (FT_Int64)( (FT_UInt64)(a) - (FT_UInt64)(b) )
    -#define MUL_INT64( a, b )                               \
    -          (FT_Int64)( (FT_UInt64)(a) * (FT_UInt64)(b) )
    -#define NEG_INT64( a )                                  \
    -          (FT_Int64)( (FT_UInt64)0 - (FT_UInt64)(a) )
    -
    -#endif /* FT_INT64 */
    -
     
     FT_END_HEADER
     
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftdebug.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftdebug.h
    index d7fa8dc93cf..d7facf40d12 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftdebug.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftdebug.h
    @@ -4,7 +4,7 @@
      *
      *   Debugging and logging component (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftdrv.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftdrv.h
    index 5609b3ef12b..24be4dad36b 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftdrv.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftdrv.h
    @@ -4,7 +4,7 @@
      *
      *   FreeType internal font driver interface (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftgloadr.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftgloadr.h
    index f1c155b162c..8f2a54c015b 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftgloadr.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftgloadr.h
    @@ -4,7 +4,7 @@
      *
      *   The FreeType glyph loader (specification).
      *
    - * Copyright (C) 2002-2024 by
    + * Copyright (C) 2002-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/fthash.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/fthash.h
    index 622ec76bb9a..642d21e21c6 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/fthash.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/fthash.h
    @@ -117,6 +117,18 @@ FT_BEGIN_HEADER
                           FT_Hash    hash,
                           FT_Memory  memory );
     
    +  FT_Error
    +  ft_hash_str_insert_no_overwrite( const char*  key,
    +                                   size_t       data,
    +                                   FT_Hash      hash,
    +                                   FT_Memory    memory );
    +
    +  FT_Error
    +  ft_hash_num_insert_no_overwrite( FT_Int     num,
    +                                   size_t     data,
    +                                   FT_Hash    hash,
    +                                   FT_Memory  memory );
    +
       size_t*
       ft_hash_str_lookup( const char*  key,
                           FT_Hash      hash );
    @@ -125,6 +137,17 @@ FT_BEGIN_HEADER
       ft_hash_num_lookup( FT_Int   num,
                           FT_Hash  hash );
     
    +  FT_Bool
    +  ft_hash_num_iterator( FT_UInt  *idx,
    +                        FT_Int   *key,
    +                        size_t   *value,
    +                        FT_Hash   hash );
    +
    +  FT_Bool
    +  ft_hash_str_iterator( FT_UInt      *idx,
    +                        const char*  *key,
    +                        size_t       *value,
    +                        FT_Hash       hash );
     
     FT_END_HEADER
     
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftmemory.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftmemory.h
    index 4e05a29f13a..c75c33f2895 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftmemory.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftmemory.h
    @@ -4,7 +4,7 @@
      *
      *   The FreeType memory management macros (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftmmtypes.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftmmtypes.h
    index 8449e7a010d..be3747bbf94 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftmmtypes.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftmmtypes.h
    @@ -5,7 +5,7 @@
      *   OpenType Variations type definitions for internal use
      *   with the multi-masters service (specification).
      *
    - * Copyright (C) 2022-2024 by
    + * Copyright (C) 2022-2025 by
      * David Turner, Robert Wilhelm, Werner Lemberg, George Williams, and
      * Dominik Röttsches.
      *
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftobjs.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftobjs.h
    index a1e93298fdb..d4d7bc00fe9 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftobjs.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftobjs.h
    @@ -4,7 +4,7 @@
      *
      *   The FreeType private base classes (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -275,6 +275,28 @@ FT_BEGIN_HEADER
                       FT_GlyphSlot    slot,
                       FT_Render_Mode  mode );
     
    +
    +  /**************************************************************************
    +   *
    +   * @Function:
    +   *   find_unicode_charmap
    +   *
    +   * @Description:
    +   *   This function finds a Unicode charmap, if there is one.  And if there
    +   *   is more than one, it tries to favour the more extensive one, i.e., one
    +   *   that supports UCS-4 against those which are limited to the BMP (UCS-2
    +   *   encoding.)
    +   *
    +   *   If a unicode charmap is found, `face->charmap` is set to it.
    +   *
    +   *   This function is called from `open_face`, from `FT_Select_Charmap(...,
    +   *   FT_ENCODING_UNICODE)`, and also from `afadjust.c` in the 'autofit'
    +   *   module.
    +   */
    +  FT_BASE( FT_Error )
    +  find_unicode_charmap( FT_Face  face );
    +
    +
     #ifdef FT_CONFIG_OPTION_SUBPIXEL_RENDERING
     
       typedef void  (*FT_Bitmap_LcdFilterFunc)( FT_Bitmap*      bitmap,
    @@ -343,11 +365,6 @@ FT_BEGIN_HEADER
        *     Value~0 means to use the font's value.  Value~-1 means to use the
        *     CFF driver's default.
        *
    -   *   lcd_weights ::
    -   *   lcd_filter_func ::
    -   *     These fields specify the LCD filtering weights and callback function
    -   *     for ClearType-style subpixel rendering.
    -   *
        *   refcount ::
        *     A counter initialized to~1 at the time an @FT_Face structure is
        *     created.  @FT_Reference_Face increments this counter, and
    @@ -369,11 +386,6 @@ FT_BEGIN_HEADER
         FT_Char              no_stem_darkening;
         FT_Int32             random_seed;
     
    -#ifdef FT_CONFIG_OPTION_SUBPIXEL_RENDERING
    -    FT_LcdFiveTapFilter      lcd_weights;      /* filter weights, if any */
    -    FT_Bitmap_LcdFilterFunc  lcd_filter_func;  /* filtering callback     */
    -#endif
    -
         FT_Int  refcount;
     
       } FT_Face_InternalRec;
    @@ -498,9 +510,9 @@ FT_BEGIN_HEADER
        */
       typedef struct  FT_ModuleRec_
       {
    -    FT_Module_Class*  clazz;
    -    FT_Library        library;
    -    FT_Memory         memory;
    +    const FT_Module_Class*  clazz;
    +    FT_Library              library;
    +    FT_Memory               memory;
     
       } FT_ModuleRec;
     
    @@ -702,9 +714,9 @@ FT_BEGIN_HEADER
                                   const FT_Vector*  origin );
     
       /* Allocate a new bitmap buffer in a glyph slot. */
    +  /* Dimensions must be preset in advance.         */
       FT_BASE( FT_Error )
    -  ft_glyphslot_alloc_bitmap( FT_GlyphSlot  slot,
    -                             FT_ULong      size );
    +  ft_glyphslot_alloc_bitmap( FT_GlyphSlot  slot );
     
     
       /* Set the bitmap buffer in a glyph slot to a given pointer.  The buffer */
    @@ -867,10 +879,6 @@ FT_BEGIN_HEADER
        *   lcd_weights ::
        *     The LCD filter weights for ClearType-style subpixel rendering.
        *
    -   *   lcd_filter_func ::
    -   *     The LCD filtering callback function for for ClearType-style subpixel
    -   *     rendering.
    -   *
        *   lcd_geometry ::
        *     This array specifies LCD subpixel geometry and controls Harmony LCD
        *     rendering technique, alternative to ClearType.
    @@ -904,7 +912,6 @@ FT_BEGIN_HEADER
     
     #ifdef FT_CONFIG_OPTION_SUBPIXEL_RENDERING
         FT_LcdFiveTapFilter      lcd_weights;      /* filter weights, if any */
    -    FT_Bitmap_LcdFilterFunc  lcd_filter_func;  /* filtering callback     */
     #else
         FT_Vector                lcd_geometry[3];  /* RGB subpixel positions */
     #endif
    @@ -973,17 +980,6 @@ FT_BEGIN_HEADER
     #endif /* !FT_CONFIG_OPTION_NO_DEFAULT_SYSTEM */
     
     
    -  /* Define default raster's interface.  The default raster is located in  */
    -  /* `src/base/ftraster.c'.                                                */
    -  /*                                                                       */
    -  /* Client applications can register new rasters through the              */
    -  /* FT_Set_Raster() API.                                                  */
    -
    -#ifndef FT_NO_DEFAULT_RASTER
    -  FT_EXPORT_VAR( FT_Raster_Funcs )  ft_default_raster;
    -#endif
    -
    -
       /**************************************************************************
        *
        * @macro:
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftpsprop.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftpsprop.h
    index 4f11aa16ba1..18a954d22f5 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftpsprop.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftpsprop.h
    @@ -4,7 +4,7 @@
      *
      *   Get and set properties of PostScript drivers (specification).
      *
    - * Copyright (C) 2017-2024 by
    + * Copyright (C) 2017-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftrfork.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftrfork.h
    index 05c1d6c48b5..e077f98bfb9 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftrfork.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftrfork.h
    @@ -4,7 +4,7 @@
      *
      *   Embedded resource forks accessor (specification).
      *
    - * Copyright (C) 2004-2024 by
    + * Copyright (C) 2004-2025 by
      * Masatake YAMATO and Redhat K.K.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftserv.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftserv.h
    index 8c35dbd7139..ce11bba19b2 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftserv.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftserv.h
    @@ -4,7 +4,7 @@
      *
      *   The FreeType services (specification only).
      *
    - * Copyright (C) 2003-2024 by
    + * Copyright (C) 2003-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftstream.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftstream.h
    index fd52f767ef7..20c1dd7c4b0 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftstream.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftstream.h
    @@ -4,7 +4,7 @@
      *
      *   Stream handling (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/fttrace.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/fttrace.h
    index 42595a29ff3..3fd592800e2 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/fttrace.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/fttrace.h
    @@ -4,7 +4,7 @@
      *
      *   Tracing handling (specification only).
      *
    - * Copyright (C) 2002-2024 by
    + * Copyright (C) 2002-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -19,7 +19,7 @@
       /* definitions of trace levels for FreeType 2 */
     
       /* the maximum string length (if the argument to `FT_TRACE_DEF` */
    -  /* gets used as a string) plus one charachter for ':' plus      */
    +  /* gets used as a string) plus one character for ':' plus       */
       /* another one for the trace level                              */
     #define FT_MAX_TRACE_LEVEL_LENGTH  (9 + 1 + 1)
     
    @@ -159,6 +159,7 @@ FT_TRACE_DEF( gxvprop )
     FT_TRACE_DEF( gxvtrak )
     
       /* autofit components */
    +FT_TRACE_DEF( afadjust )
     FT_TRACE_DEF( afcjk )
     FT_TRACE_DEF( afglobal )
     FT_TRACE_DEF( afhints )
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftvalid.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftvalid.h
    index a1312f2aba6..03a726c82cb 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftvalid.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/ftvalid.h
    @@ -4,7 +4,7 @@
      *
      *   FreeType validation support (specification).
      *
    - * Copyright (C) 2004-2024 by
    + * Copyright (C) 2004-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/psaux.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/psaux.h
    index 745d2cb56b7..344be0f19a7 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/psaux.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/psaux.h
    @@ -5,7 +5,7 @@
      *   Auxiliary functions and data structures related to PostScript fonts
      *   (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/pshints.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/pshints.h
    index dba6c7303fd..96c5d84f058 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/pshints.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/pshints.h
    @@ -6,7 +6,7 @@
      *   recorders (specification only).  These are used to support native
      *   T1/T2 hints in the 'type1', 'cid', and 'cff' font drivers.
      *
    - * Copyright (C) 2001-2024 by
    + * Copyright (C) 2001-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svbdf.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svbdf.h
    index 89e9c2e5de8..5bd51da23f4 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svbdf.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svbdf.h
    @@ -4,7 +4,7 @@
      *
      *   The FreeType BDF services (specification).
      *
    - * Copyright (C) 2003-2024 by
    + * Copyright (C) 2003-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svcfftl.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svcfftl.h
    index 3cb483c344f..c97bf84fb2e 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svcfftl.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svcfftl.h
    @@ -4,7 +4,7 @@
      *
      *   The FreeType CFF tables loader service (specification).
      *
    - * Copyright (C) 2017-2024 by
    + * Copyright (C) 2017-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svcid.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svcid.h
    index 8362cb8724d..748a8caf887 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svcid.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svcid.h
    @@ -4,7 +4,7 @@
      *
      *   The FreeType CID font services (specification).
      *
    - * Copyright (C) 2007-2024 by
    + * Copyright (C) 2007-2025 by
      * Derek Clegg and Michael Toftdal.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svfntfmt.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svfntfmt.h
    index 6b837e79fcd..690fdc2a24f 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svfntfmt.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svfntfmt.h
    @@ -4,7 +4,7 @@
      *
      *   The FreeType font format service (specification only).
      *
    - * Copyright (C) 2003-2024 by
    + * Copyright (C) 2003-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svgldict.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svgldict.h
    index 6126ec9ada4..7128d6f3d7a 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svgldict.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svgldict.h
    @@ -4,7 +4,7 @@
      *
      *   The FreeType glyph dictionary services (specification).
      *
    - * Copyright (C) 2003-2024 by
    + * Copyright (C) 2003-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svgxval.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svgxval.h
    index 29cf5528189..1ca3e0a031b 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svgxval.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svgxval.h
    @@ -4,7 +4,7 @@
      *
      *   FreeType API for validating TrueTypeGX/AAT tables (specification).
      *
    - * Copyright (C) 2004-2024 by
    + * Copyright (C) 2004-2025 by
      * Masatake YAMATO, Red Hat K.K.,
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svkern.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svkern.h
    index ac1bc30c412..8a3d59bec6d 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svkern.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svkern.h
    @@ -4,7 +4,7 @@
      *
      *   The FreeType Kerning service (specification).
      *
    - * Copyright (C) 2006-2024 by
    + * Copyright (C) 2006-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svmetric.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svmetric.h
    index 8b3563b25ca..4dde3a8151a 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svmetric.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svmetric.h
    @@ -4,7 +4,7 @@
      *
      *   The FreeType services for metrics variations (specification).
      *
    - * Copyright (C) 2016-2024 by
    + * Copyright (C) 2016-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -77,7 +77,7 @@ FT_BEGIN_HEADER
       typedef void
       (*FT_Metrics_Adjust_Func)( FT_Face  face );
     
    -  typedef FT_Error
    +  typedef void
       (*FT_Size_Reset_Func)( FT_Size  size );
     
     
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svmm.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svmm.h
    index 5288fadf375..9be133e2db0 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svmm.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svmm.h
    @@ -4,7 +4,7 @@
      *
      *   The FreeType Multiple Masters and GX var services (specification).
      *
    - * Copyright (C) 2003-2024 by
    + * Copyright (C) 2003-2025 by
      * David Turner, Robert Wilhelm, Werner Lemberg, and Dominik Röttsches.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svotval.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svotval.h
    index 7aea7ec11f0..933e5de98da 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svotval.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svotval.h
    @@ -4,7 +4,7 @@
      *
      *   The FreeType OpenType validation service (specification).
      *
    - * Copyright (C) 2004-2024 by
    + * Copyright (C) 2004-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svpfr.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svpfr.h
    index b2fac6d086b..c81b6a68a8b 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svpfr.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svpfr.h
    @@ -4,7 +4,7 @@
      *
      *   Internal PFR service functions (specification).
      *
    - * Copyright (C) 2003-2024 by
    + * Copyright (C) 2003-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svpostnm.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svpostnm.h
    index d19f3adc6d5..33864ebc344 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svpostnm.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svpostnm.h
    @@ -4,7 +4,7 @@
      *
      *   The FreeType PostScript name services (specification).
      *
    - * Copyright (C) 2003-2024 by
    + * Copyright (C) 2003-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svprop.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svprop.h
    index ba39c0dd4da..0eb79c885d8 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svprop.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svprop.h
    @@ -4,7 +4,7 @@
      *
      *   The FreeType property service (specification).
      *
    - * Copyright (C) 2012-2024 by
    + * Copyright (C) 2012-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svpscmap.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svpscmap.h
    index d4908ee41aa..8f85d12157c 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svpscmap.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svpscmap.h
    @@ -4,7 +4,7 @@
      *
      *   The FreeType PostScript charmap service (specification).
      *
    - * Copyright (C) 2003-2024 by
    + * Copyright (C) 2003-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svpsinfo.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svpsinfo.h
    index 2aadcdd02a1..83de04478df 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svpsinfo.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svpsinfo.h
    @@ -4,7 +4,7 @@
      *
      *   The FreeType PostScript info service (specification).
      *
    - * Copyright (C) 2003-2024 by
    + * Copyright (C) 2003-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svsfnt.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svsfnt.h
    index 9e0f4ff202e..9bf5e3473c4 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svsfnt.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svsfnt.h
    @@ -4,7 +4,7 @@
      *
      *   The FreeType SFNT table loading service (specification).
      *
    - * Copyright (C) 2003-2024 by
    + * Copyright (C) 2003-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svttcmap.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svttcmap.h
    index 250886bcc5d..fc9b0aeb8e3 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svttcmap.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svttcmap.h
    @@ -4,7 +4,7 @@
      *
      *   The FreeType TrueType/sfnt cmap extra information service.
      *
    - * Copyright (C) 2003-2024 by
    + * Copyright (C) 2003-2025 by
      * Masatake YAMATO, Redhat K.K.,
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svtteng.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svtteng.h
    index 14967529a9a..979e9ea102e 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svtteng.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svtteng.h
    @@ -4,7 +4,7 @@
      *
      *   The FreeType TrueType engine query service (specification).
      *
    - * Copyright (C) 2006-2024 by
    + * Copyright (C) 2006-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svttglyf.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svttglyf.h
    index f190b3985d0..e4f54c10037 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svttglyf.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svttglyf.h
    @@ -4,7 +4,7 @@
      *
      *   The FreeType TrueType glyph service.
      *
    - * Copyright (C) 2007-2024 by
    + * Copyright (C) 2007-2025 by
      * David Turner.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svwinfnt.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svwinfnt.h
    index 49f3fb7f775..ff887ffdc03 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svwinfnt.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/services/svwinfnt.h
    @@ -4,7 +4,7 @@
      *
      *   The FreeType Windows FNT/FONT service (specification).
      *
    - * Copyright (C) 2003-2024 by
    + * Copyright (C) 2003-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/sfnt.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/sfnt.h
    index 35e4e73af02..adba2178877 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/sfnt.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/sfnt.h
    @@ -4,7 +4,7 @@
      *
      *   High-level 'sfnt' driver interface (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -612,7 +612,7 @@ FT_BEGIN_HEADER
        *
        * @return:
        *   Value~1 if a ClipBox is found.  If no clip box is found or an
    -   *   error occured, value~0 is returned.
    +   *   error occurred, value~0 is returned.
        */
       typedef FT_Bool
       ( *TT_Get_Color_Glyph_ClipBox_Func )( TT_Face      face,
    @@ -707,7 +707,7 @@ FT_BEGIN_HEADER
        *
        * @return:
        *   Value~1 if everything is OK.  Value~0 if no details can be found for
    -   *   this paint or any other error occured.
    +   *   this paint or any other error occurred.
        */
       typedef FT_Bool
       ( *TT_Get_Paint_Func )( TT_Face         face,
    @@ -808,7 +808,7 @@ FT_BEGIN_HEADER
        *     corresponding (1,0) Apple entry.
        *
        * @return:
    -   *   1 if there is either a win or apple entry (or both), 0 otheriwse.
    +   *   1 if there is either a win or apple entry (or both), 0 otherwise.
        */
       typedef FT_Bool
       (*TT_Get_Name_ID_Func)( TT_Face    face,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/svginterface.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/svginterface.h
    index 68c99efb10a..20c73b2fbd2 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/svginterface.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/svginterface.h
    @@ -4,7 +4,7 @@
      *
      *   Interface of ot-svg module (specification only).
      *
    - * Copyright (C) 2022-2024 by
    + * Copyright (C) 2022-2025 by
      * David Turner, Robert Wilhelm, Werner Lemberg, and Moazin Khatti.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/t1types.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/t1types.h
    index 1821ae5cc83..5b26e4620d0 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/t1types.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/t1types.h
    @@ -5,7 +5,7 @@
      *   Basic Type1/Type2 type definitions and interface (specification
      *   only).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/tttypes.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/tttypes.h
    index 7053e656a7e..d0e5eee89bc 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/tttypes.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/tttypes.h
    @@ -5,7 +5,7 @@
      *   Basic SFNT/TrueType type definitions and interface (specification
      *   only).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -930,8 +930,8 @@ FT_BEGIN_HEADER
        *   resolution and scaling independent parts of a TrueType font resource.
        *
        * @note:
    -   *   The TT_Face structure is also used as a 'parent class' for the
    -   *   OpenType-CFF class (T2_Face).
    +   *   The TT_Face structure is also used for CFF support; see file
    +   *   `cffotypes.h`.
        */
       typedef struct TT_FaceRec_*  TT_Face;
     
    @@ -1276,10 +1276,6 @@ FT_BEGIN_HEADER
        *
        *     If varied by the `CVAR' table, non-integer values are possible.
        *
    -   *   interpreter ::
    -   *     A pointer to the TrueType bytecode interpreters field is also used
    -   *     to hook the debugger in 'ttdebug'.
    -   *
        *   extra ::
        *     Reserved for third-party font drivers.
        *
    @@ -1521,10 +1517,6 @@ FT_BEGIN_HEADER
         FT_ULong              cvt_size;
         FT_Int32*             cvt;
     
    -    /* A pointer to the bytecode interpreter to use.  This is also */
    -    /* used to hook the debugger for the `ttdebug' utility.        */
    -    TT_Interpreter        interpreter;
    -
     
         /************************************************************************
          *
    @@ -1582,11 +1574,6 @@ FT_BEGIN_HEADER
         FT_UInt32             kern_avail_bits;
         FT_UInt32             kern_order_bits;
     
    -#ifdef TT_CONFIG_OPTION_GPOS_KERNING
    -    FT_Byte*              gpos_table;
    -    FT_Bool               gpos_kerning_available;
    -#endif
    -
     #ifdef TT_CONFIG_OPTION_BDF
         TT_BDFRec             bdf;
     #endif /* TT_CONFIG_OPTION_BDF */
    @@ -1608,6 +1595,15 @@ FT_BEGIN_HEADER
         /* since 2.12 */
         void*                 svg;
     
    +#ifdef TT_CONFIG_OPTION_GPOS_KERNING
    +    /* since 2.13.3 */
    +    FT_Byte*              gpos_table;
    +    /* since 2.14 */
    +    /* This is actually an array of GPOS lookup subtables. */
    +    FT_UInt32*            gpos_lookups_kerning;
    +    FT_UInt               num_gpos_lookups_kerning;
    +#endif
    +
       } TT_FaceRec;
     
     
    @@ -1621,15 +1617,6 @@ FT_BEGIN_HEADER
        *   coordinates.
        *
        * @fields:
    -   *   memory ::
    -   *     A handle to the memory manager.
    -   *
    -   *   max_points ::
    -   *     The maximum size in points of the zone.
    -   *
    -   *   max_contours ::
    -   *     Max size in links contours of the zone.
    -   *
        *   n_points ::
        *     The current number of points in the zone.
        *
    @@ -1653,9 +1640,6 @@ FT_BEGIN_HEADER
        */
       typedef struct  TT_GlyphZoneRec_
       {
    -    FT_Memory   memory;
    -    FT_UShort   max_points;
    -    FT_UShort   max_contours;
         FT_UShort   n_points;    /* number of points in zone    */
         FT_UShort   n_contours;  /* number of contours          */
     
    @@ -1714,7 +1698,6 @@ FT_BEGIN_HEADER
         TT_GlyphZoneRec  zone;
     
         TT_ExecContext   exec;
    -    FT_Byte*         instructions;
         FT_ULong         ins_pos;
     
         /* for possible extensibility in other formats */
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/internal/wofftypes.h b/src/java.desktop/share/native/libfreetype/include/freetype/internal/wofftypes.h
    index 4a169d12f57..7d5b7df0fa1 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/internal/wofftypes.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/internal/wofftypes.h
    @@ -5,7 +5,7 @@
      *   Basic WOFF/WOFF2 type definitions and interface (specification
      *   only).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/otsvg.h b/src/java.desktop/share/native/libfreetype/include/freetype/otsvg.h
    index 9d356938cc7..326bbcd0153 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/otsvg.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/otsvg.h
    @@ -4,7 +4,7 @@
      *
      *   Interface for OT-SVG support related things (specification).
      *
    - * Copyright (C) 2022-2024 by
    + * Copyright (C) 2022-2025 by
      * David Turner, Robert Wilhelm, Werner Lemberg, and Moazin Khatti.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/t1tables.h b/src/java.desktop/share/native/libfreetype/include/freetype/t1tables.h
    index fbd558aa34d..fe769f607fa 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/t1tables.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/t1tables.h
    @@ -5,7 +5,7 @@
      *   Basic Type 1/Type 2 tables definitions and interface (specification
      *   only).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -92,7 +92,7 @@ FT_BEGIN_HEADER
         FT_String*  full_name;
         FT_String*  family_name;
         FT_String*  weight;
    -    FT_Long     italic_angle;
    +    FT_Fixed    italic_angle;
         FT_Bool     is_fixed_pitch;
         FT_Short    underline_position;
         FT_UShort   underline_thickness;
    @@ -645,7 +645,7 @@ FT_BEGIN_HEADER
         PS_DICT_UNDERLINE_POSITION,     /* FT_Short   */
         PS_DICT_UNDERLINE_THICKNESS,    /* FT_UShort  */
         PS_DICT_FS_TYPE,                /* FT_UShort  */
    -    PS_DICT_ITALIC_ANGLE,           /* FT_Long    */
    +    PS_DICT_ITALIC_ANGLE,           /* FT_Fixed   */
     
         PS_DICT_MAX = PS_DICT_ITALIC_ANGLE
     
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/ttnameid.h b/src/java.desktop/share/native/libfreetype/include/freetype/ttnameid.h
    index d5d470e380f..3ef61091cc9 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/ttnameid.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/ttnameid.h
    @@ -4,7 +4,7 @@
      *
      *   TrueType name ID definitions (specification only).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -436,7 +436,7 @@ FT_BEGIN_HEADER
        *
        *   The canonical source for Microsoft's IDs is
        *
    -   *     https://docs.microsoft.com/en-us/windows/desktop/Intl/language-identifier-constants-and-strings ,
    +   *     https://learn.microsoft.com/windows/win32/intl/language-identifier-constants-and-strings ,
        *
        *   however, we only provide macros for language identifiers present in
        *   the OpenType specification: Microsoft has abandoned the concept of
    @@ -847,113 +847,113 @@ FT_BEGIN_HEADER
       /* --------------- */
     
       /* Bit  0   Basic Latin */
    -#define TT_UCR_BASIC_LATIN                     (1L <<  0) /* U+0020-U+007E */
    +#define TT_UCR_BASIC_LATIN                    (1UL <<  0) /* U+0020-U+007E */
       /* Bit  1   C1 Controls and Latin-1 Supplement */
    -#define TT_UCR_LATIN1_SUPPLEMENT               (1L <<  1) /* U+0080-U+00FF */
    +#define TT_UCR_LATIN1_SUPPLEMENT              (1UL <<  1) /* U+0080-U+00FF */
       /* Bit  2   Latin Extended-A */
    -#define TT_UCR_LATIN_EXTENDED_A                (1L <<  2) /* U+0100-U+017F */
    +#define TT_UCR_LATIN_EXTENDED_A               (1UL <<  2) /* U+0100-U+017F */
       /* Bit  3   Latin Extended-B */
    -#define TT_UCR_LATIN_EXTENDED_B                (1L <<  3) /* U+0180-U+024F */
    +#define TT_UCR_LATIN_EXTENDED_B               (1UL <<  3) /* U+0180-U+024F */
       /* Bit  4   IPA Extensions                 */
       /*          Phonetic Extensions            */
       /*          Phonetic Extensions Supplement */
    -#define TT_UCR_IPA_EXTENSIONS                  (1L <<  4) /* U+0250-U+02AF */
    +#define TT_UCR_IPA_EXTENSIONS                 (1UL <<  4) /* U+0250-U+02AF */
                                                               /* U+1D00-U+1D7F */
                                                               /* U+1D80-U+1DBF */
       /* Bit  5   Spacing Modifier Letters */
       /*          Modifier Tone Letters    */
    -#define TT_UCR_SPACING_MODIFIER                (1L <<  5) /* U+02B0-U+02FF */
    +#define TT_UCR_SPACING_MODIFIER               (1UL <<  5) /* U+02B0-U+02FF */
                                                               /* U+A700-U+A71F */
       /* Bit  6   Combining Diacritical Marks            */
       /*          Combining Diacritical Marks Supplement */
    -#define TT_UCR_COMBINING_DIACRITICAL_MARKS     (1L <<  6) /* U+0300-U+036F */
    +#define TT_UCR_COMBINING_DIACRITICAL_MARKS    (1UL <<  6) /* U+0300-U+036F */
                                                               /* U+1DC0-U+1DFF */
       /* Bit  7   Greek and Coptic */
    -#define TT_UCR_GREEK                           (1L <<  7) /* U+0370-U+03FF */
    +#define TT_UCR_GREEK                          (1UL <<  7) /* U+0370-U+03FF */
       /* Bit  8   Coptic */
    -#define TT_UCR_COPTIC                          (1L <<  8) /* U+2C80-U+2CFF */
    +#define TT_UCR_COPTIC                         (1UL <<  8) /* U+2C80-U+2CFF */
       /* Bit  9   Cyrillic            */
       /*          Cyrillic Supplement */
       /*          Cyrillic Extended-A */
       /*          Cyrillic Extended-B */
    -#define TT_UCR_CYRILLIC                        (1L <<  9) /* U+0400-U+04FF */
    +#define TT_UCR_CYRILLIC                       (1UL <<  9) /* U+0400-U+04FF */
                                                               /* U+0500-U+052F */
                                                               /* U+2DE0-U+2DFF */
                                                               /* U+A640-U+A69F */
       /* Bit 10   Armenian */
    -#define TT_UCR_ARMENIAN                        (1L << 10) /* U+0530-U+058F */
    +#define TT_UCR_ARMENIAN                       (1UL << 10) /* U+0530-U+058F */
       /* Bit 11   Hebrew */
    -#define TT_UCR_HEBREW                          (1L << 11) /* U+0590-U+05FF */
    +#define TT_UCR_HEBREW                         (1UL << 11) /* U+0590-U+05FF */
       /* Bit 12   Vai */
    -#define TT_UCR_VAI                             (1L << 12) /* U+A500-U+A63F */
    +#define TT_UCR_VAI                            (1UL << 12) /* U+A500-U+A63F */
       /* Bit 13   Arabic            */
       /*          Arabic Supplement */
    -#define TT_UCR_ARABIC                          (1L << 13) /* U+0600-U+06FF */
    +#define TT_UCR_ARABIC                         (1UL << 13) /* U+0600-U+06FF */
                                                               /* U+0750-U+077F */
       /* Bit 14   NKo */
    -#define TT_UCR_NKO                             (1L << 14) /* U+07C0-U+07FF */
    +#define TT_UCR_NKO                            (1UL << 14) /* U+07C0-U+07FF */
       /* Bit 15   Devanagari */
    -#define TT_UCR_DEVANAGARI                      (1L << 15) /* U+0900-U+097F */
    -  /* Bit 16   Bengali */
    -#define TT_UCR_BENGALI                         (1L << 16) /* U+0980-U+09FF */
    +#define TT_UCR_DEVANAGARI                     (1UL << 15) /* U+0900-U+097F */
    +  /* Bit 16   Bangla (Bengali) */
    +#define TT_UCR_BENGALI                        (1UL << 16) /* U+0980-U+09FF */
       /* Bit 17   Gurmukhi */
    -#define TT_UCR_GURMUKHI                        (1L << 17) /* U+0A00-U+0A7F */
    +#define TT_UCR_GURMUKHI                       (1UL << 17) /* U+0A00-U+0A7F */
       /* Bit 18   Gujarati */
    -#define TT_UCR_GUJARATI                        (1L << 18) /* U+0A80-U+0AFF */
    -  /* Bit 19   Oriya */
    -#define TT_UCR_ORIYA                           (1L << 19) /* U+0B00-U+0B7F */
    +#define TT_UCR_GUJARATI                       (1UL << 18) /* U+0A80-U+0AFF */
    +  /* Bit 19   Oriya (Odia) */
    +#define TT_UCR_ORIYA                          (1UL << 19) /* U+0B00-U+0B7F */
       /* Bit 20   Tamil */
    -#define TT_UCR_TAMIL                           (1L << 20) /* U+0B80-U+0BFF */
    +#define TT_UCR_TAMIL                          (1UL << 20) /* U+0B80-U+0BFF */
       /* Bit 21   Telugu */
    -#define TT_UCR_TELUGU                          (1L << 21) /* U+0C00-U+0C7F */
    +#define TT_UCR_TELUGU                         (1UL << 21) /* U+0C00-U+0C7F */
       /* Bit 22   Kannada */
    -#define TT_UCR_KANNADA                         (1L << 22) /* U+0C80-U+0CFF */
    +#define TT_UCR_KANNADA                        (1UL << 22) /* U+0C80-U+0CFF */
       /* Bit 23   Malayalam */
    -#define TT_UCR_MALAYALAM                       (1L << 23) /* U+0D00-U+0D7F */
    +#define TT_UCR_MALAYALAM                      (1UL << 23) /* U+0D00-U+0D7F */
       /* Bit 24   Thai */
    -#define TT_UCR_THAI                            (1L << 24) /* U+0E00-U+0E7F */
    +#define TT_UCR_THAI                           (1UL << 24) /* U+0E00-U+0E7F */
       /* Bit 25   Lao */
    -#define TT_UCR_LAO                             (1L << 25) /* U+0E80-U+0EFF */
    +#define TT_UCR_LAO                            (1UL << 25) /* U+0E80-U+0EFF */
       /* Bit 26   Georgian            */
       /*          Georgian Supplement */
    -#define TT_UCR_GEORGIAN                        (1L << 26) /* U+10A0-U+10FF */
    +#define TT_UCR_GEORGIAN                       (1UL << 26) /* U+10A0-U+10FF */
                                                               /* U+2D00-U+2D2F */
       /* Bit 27   Balinese */
    -#define TT_UCR_BALINESE                        (1L << 27) /* U+1B00-U+1B7F */
    +#define TT_UCR_BALINESE                       (1UL << 27) /* U+1B00-U+1B7F */
       /* Bit 28   Hangul Jamo */
    -#define TT_UCR_HANGUL_JAMO                     (1L << 28) /* U+1100-U+11FF */
    +#define TT_UCR_HANGUL_JAMO                    (1UL << 28) /* U+1100-U+11FF */
       /* Bit 29   Latin Extended Additional */
       /*          Latin Extended-C          */
       /*          Latin Extended-D          */
    -#define TT_UCR_LATIN_EXTENDED_ADDITIONAL       (1L << 29) /* U+1E00-U+1EFF */
    +#define TT_UCR_LATIN_EXTENDED_ADDITIONAL      (1UL << 29) /* U+1E00-U+1EFF */
                                                               /* U+2C60-U+2C7F */
                                                               /* U+A720-U+A7FF */
       /* Bit 30   Greek Extended */
    -#define TT_UCR_GREEK_EXTENDED                  (1L << 30) /* U+1F00-U+1FFF */
    +#define TT_UCR_GREEK_EXTENDED                 (1UL << 30) /* U+1F00-U+1FFF */
       /* Bit 31   General Punctuation      */
       /*          Supplemental Punctuation */
    -#define TT_UCR_GENERAL_PUNCTUATION             (1L << 31) /* U+2000-U+206F */
    +#define TT_UCR_GENERAL_PUNCTUATION            (1UL << 31) /* U+2000-U+206F */
                                                               /* U+2E00-U+2E7F */
     
       /* ulUnicodeRange2 */
       /* --------------- */
     
       /* Bit 32   Superscripts And Subscripts */
    -#define TT_UCR_SUPERSCRIPTS_SUBSCRIPTS         (1L <<  0) /* U+2070-U+209F */
    +#define TT_UCR_SUPERSCRIPTS_SUBSCRIPTS        (1UL <<  0) /* U+2070-U+209F */
       /* Bit 33   Currency Symbols */
    -#define TT_UCR_CURRENCY_SYMBOLS                (1L <<  1) /* U+20A0-U+20CF */
    +#define TT_UCR_CURRENCY_SYMBOLS               (1UL <<  1) /* U+20A0-U+20CF */
       /* Bit 34   Combining Diacritical Marks For Symbols */
     #define TT_UCR_COMBINING_DIACRITICAL_MARKS_SYMB \
    -                                               (1L <<  2) /* U+20D0-U+20FF */
    +                                              (1UL <<  2) /* U+20D0-U+20FF */
       /* Bit 35   Letterlike Symbols */
    -#define TT_UCR_LETTERLIKE_SYMBOLS              (1L <<  3) /* U+2100-U+214F */
    +#define TT_UCR_LETTERLIKE_SYMBOLS             (1UL <<  3) /* U+2100-U+214F */
       /* Bit 36   Number Forms */
    -#define TT_UCR_NUMBER_FORMS                    (1L <<  4) /* U+2150-U+218F */
    +#define TT_UCR_NUMBER_FORMS                   (1UL <<  4) /* U+2150-U+218F */
       /* Bit 37   Arrows                           */
       /*          Supplemental Arrows-A            */
       /*          Supplemental Arrows-B            */
       /*          Miscellaneous Symbols and Arrows */
    -#define TT_UCR_ARROWS                          (1L <<  5) /* U+2190-U+21FF */
    +#define TT_UCR_ARROWS                         (1UL <<  5) /* U+2190-U+21FF */
                                                               /* U+27F0-U+27FF */
                                                               /* U+2900-U+297F */
                                                               /* U+2B00-U+2BFF */
    @@ -961,52 +961,52 @@ FT_BEGIN_HEADER
       /*          Supplemental Mathematical Operators  */
       /*          Miscellaneous Mathematical Symbols-A */
       /*          Miscellaneous Mathematical Symbols-B */
    -#define TT_UCR_MATHEMATICAL_OPERATORS          (1L <<  6) /* U+2200-U+22FF */
    +#define TT_UCR_MATHEMATICAL_OPERATORS         (1UL <<  6) /* U+2200-U+22FF */
                                                               /* U+2A00-U+2AFF */
                                                               /* U+27C0-U+27EF */
                                                               /* U+2980-U+29FF */
       /* Bit 39 Miscellaneous Technical */
    -#define TT_UCR_MISCELLANEOUS_TECHNICAL         (1L <<  7) /* U+2300-U+23FF */
    +#define TT_UCR_MISCELLANEOUS_TECHNICAL        (1UL <<  7) /* U+2300-U+23FF */
       /* Bit 40   Control Pictures */
    -#define TT_UCR_CONTROL_PICTURES                (1L <<  8) /* U+2400-U+243F */
    +#define TT_UCR_CONTROL_PICTURES               (1UL <<  8) /* U+2400-U+243F */
       /* Bit 41   Optical Character Recognition */
    -#define TT_UCR_OCR                             (1L <<  9) /* U+2440-U+245F */
    +#define TT_UCR_OCR                            (1UL <<  9) /* U+2440-U+245F */
       /* Bit 42   Enclosed Alphanumerics */
    -#define TT_UCR_ENCLOSED_ALPHANUMERICS          (1L << 10) /* U+2460-U+24FF */
    +#define TT_UCR_ENCLOSED_ALPHANUMERICS         (1UL << 10) /* U+2460-U+24FF */
       /* Bit 43   Box Drawing */
    -#define TT_UCR_BOX_DRAWING                     (1L << 11) /* U+2500-U+257F */
    +#define TT_UCR_BOX_DRAWING                    (1UL << 11) /* U+2500-U+257F */
       /* Bit 44   Block Elements */
    -#define TT_UCR_BLOCK_ELEMENTS                  (1L << 12) /* U+2580-U+259F */
    +#define TT_UCR_BLOCK_ELEMENTS                 (1UL << 12) /* U+2580-U+259F */
       /* Bit 45   Geometric Shapes */
    -#define TT_UCR_GEOMETRIC_SHAPES                (1L << 13) /* U+25A0-U+25FF */
    +#define TT_UCR_GEOMETRIC_SHAPES               (1UL << 13) /* U+25A0-U+25FF */
       /* Bit 46   Miscellaneous Symbols */
    -#define TT_UCR_MISCELLANEOUS_SYMBOLS           (1L << 14) /* U+2600-U+26FF */
    +#define TT_UCR_MISCELLANEOUS_SYMBOLS          (1UL << 14) /* U+2600-U+26FF */
       /* Bit 47   Dingbats */
    -#define TT_UCR_DINGBATS                        (1L << 15) /* U+2700-U+27BF */
    +#define TT_UCR_DINGBATS                       (1UL << 15) /* U+2700-U+27BF */
       /* Bit 48   CJK Symbols and Punctuation */
    -#define TT_UCR_CJK_SYMBOLS                     (1L << 16) /* U+3000-U+303F */
    +#define TT_UCR_CJK_SYMBOLS                    (1UL << 16) /* U+3000-U+303F */
       /* Bit 49   Hiragana */
    -#define TT_UCR_HIRAGANA                        (1L << 17) /* U+3040-U+309F */
    +#define TT_UCR_HIRAGANA                       (1UL << 17) /* U+3040-U+309F */
       /* Bit 50   Katakana                     */
       /*          Katakana Phonetic Extensions */
    -#define TT_UCR_KATAKANA                        (1L << 18) /* U+30A0-U+30FF */
    +#define TT_UCR_KATAKANA                       (1UL << 18) /* U+30A0-U+30FF */
                                                               /* U+31F0-U+31FF */
       /* Bit 51   Bopomofo          */
       /*          Bopomofo Extended */
    -#define TT_UCR_BOPOMOFO                        (1L << 19) /* U+3100-U+312F */
    +#define TT_UCR_BOPOMOFO                       (1UL << 19) /* U+3100-U+312F */
                                                               /* U+31A0-U+31BF */
       /* Bit 52   Hangul Compatibility Jamo */
    -#define TT_UCR_HANGUL_COMPATIBILITY_JAMO       (1L << 20) /* U+3130-U+318F */
    +#define TT_UCR_HANGUL_COMPATIBILITY_JAMO      (1UL << 20) /* U+3130-U+318F */
       /* Bit 53   Phags-Pa */
    -#define TT_UCR_CJK_MISC                        (1L << 21) /* U+A840-U+A87F */
    -#define TT_UCR_KANBUN  TT_UCR_CJK_MISC /* deprecated */
    -#define TT_UCR_PHAGSPA
    +#define TT_UCR_PHAGSPA                        (1UL << 21) /* U+A840-U+A87F */
    +#define TT_UCR_KANBUN    TT_UCR_PHAGSPA  /* deprecated */
    +#define TT_UCR_CJK_MISC  TT_UCR_PHAGSPA  /* deprecated */
       /* Bit 54   Enclosed CJK Letters and Months */
    -#define TT_UCR_ENCLOSED_CJK_LETTERS_MONTHS     (1L << 22) /* U+3200-U+32FF */
    +#define TT_UCR_ENCLOSED_CJK_LETTERS_MONTHS    (1UL << 22) /* U+3200-U+32FF */
       /* Bit 55   CJK Compatibility */
    -#define TT_UCR_CJK_COMPATIBILITY               (1L << 23) /* U+3300-U+33FF */
    +#define TT_UCR_CJK_COMPATIBILITY              (1UL << 23) /* U+3300-U+33FF */
       /* Bit 56   Hangul Syllables */
    -#define TT_UCR_HANGUL                          (1L << 24) /* U+AC00-U+D7A3 */
    +#define TT_UCR_HANGUL                         (1UL << 24) /* U+AC00-U+D7A3 */
       /* Bit 57   High Surrogates              */
       /*          High Private Use Surrogates  */
       /*          Low Surrogates               */
    @@ -1017,12 +1017,12 @@ FT_BEGIN_HEADER
       /* Basic Multilingual Plane that is      */
       /* supported by this font.  So it really */
       /* means >= U+10000.                     */
    -#define TT_UCR_SURROGATES                      (1L << 25) /* U+D800-U+DB7F */
    +#define TT_UCR_SURROGATES                     (1UL << 25) /* U+D800-U+DB7F */
                                                               /* U+DB80-U+DBFF */
                                                               /* U+DC00-U+DFFF */
     #define TT_UCR_NON_PLANE_0  TT_UCR_SURROGATES
       /* Bit 58  Phoenician */
    -#define TT_UCR_PHOENICIAN                      (1L << 26) /*U+10900-U+1091F*/
    +#define TT_UCR_PHOENICIAN                     (1UL << 26) /*U+10900-U+1091F*/
       /* Bit 59   CJK Unified Ideographs             */
       /*          CJK Radicals Supplement            */
       /*          Kangxi Radicals                    */
    @@ -1030,7 +1030,7 @@ FT_BEGIN_HEADER
       /*          CJK Unified Ideographs Extension A */
       /*          CJK Unified Ideographs Extension B */
       /*          Kanbun                             */
    -#define TT_UCR_CJK_UNIFIED_IDEOGRAPHS          (1L << 27) /* U+4E00-U+9FFF */
    +#define TT_UCR_CJK_UNIFIED_IDEOGRAPHS         (1UL << 27) /* U+4E00-U+9FFF */
                                                               /* U+2E80-U+2EFF */
                                                               /* U+2F00-U+2FDF */
                                                               /* U+2FF0-U+2FFF */
    @@ -1038,178 +1038,178 @@ FT_BEGIN_HEADER
                                                               /*U+20000-U+2A6DF*/
                                                               /* U+3190-U+319F */
       /* Bit 60   Private Use */
    -#define TT_UCR_PRIVATE_USE                     (1L << 28) /* U+E000-U+F8FF */
    +#define TT_UCR_PRIVATE_USE                    (1UL << 28) /* U+E000-U+F8FF */
       /* Bit 61   CJK Strokes                             */
       /*          CJK Compatibility Ideographs            */
       /*          CJK Compatibility Ideographs Supplement */
    -#define TT_UCR_CJK_COMPATIBILITY_IDEOGRAPHS    (1L << 29) /* U+31C0-U+31EF */
    +#define TT_UCR_CJK_COMPATIBILITY_IDEOGRAPHS   (1UL << 29) /* U+31C0-U+31EF */
                                                               /* U+F900-U+FAFF */
                                                               /*U+2F800-U+2FA1F*/
       /* Bit 62   Alphabetic Presentation Forms */
    -#define TT_UCR_ALPHABETIC_PRESENTATION_FORMS   (1L << 30) /* U+FB00-U+FB4F */
    +#define TT_UCR_ALPHABETIC_PRESENTATION_FORMS  (1UL << 30) /* U+FB00-U+FB4F */
       /* Bit 63   Arabic Presentation Forms-A */
    -#define TT_UCR_ARABIC_PRESENTATION_FORMS_A     (1L << 31) /* U+FB50-U+FDFF */
    +#define TT_UCR_ARABIC_PRESENTATION_FORMS_A    (1UL << 31) /* U+FB50-U+FDFF */
     
       /* ulUnicodeRange3 */
       /* --------------- */
     
       /* Bit 64   Combining Half Marks */
    -#define TT_UCR_COMBINING_HALF_MARKS            (1L <<  0) /* U+FE20-U+FE2F */
    +#define TT_UCR_COMBINING_HALF_MARKS           (1UL <<  0) /* U+FE20-U+FE2F */
       /* Bit 65   Vertical forms          */
       /*          CJK Compatibility Forms */
    -#define TT_UCR_CJK_COMPATIBILITY_FORMS         (1L <<  1) /* U+FE10-U+FE1F */
    +#define TT_UCR_CJK_COMPATIBILITY_FORMS        (1UL <<  1) /* U+FE10-U+FE1F */
                                                               /* U+FE30-U+FE4F */
       /* Bit 66   Small Form Variants */
    -#define TT_UCR_SMALL_FORM_VARIANTS             (1L <<  2) /* U+FE50-U+FE6F */
    +#define TT_UCR_SMALL_FORM_VARIANTS            (1UL <<  2) /* U+FE50-U+FE6F */
       /* Bit 67   Arabic Presentation Forms-B */
    -#define TT_UCR_ARABIC_PRESENTATION_FORMS_B     (1L <<  3) /* U+FE70-U+FEFE */
    +#define TT_UCR_ARABIC_PRESENTATION_FORMS_B    (1UL <<  3) /* U+FE70-U+FEFF */
       /* Bit 68   Halfwidth and Fullwidth Forms */
    -#define TT_UCR_HALFWIDTH_FULLWIDTH_FORMS       (1L <<  4) /* U+FF00-U+FFEF */
    +#define TT_UCR_HALFWIDTH_FULLWIDTH_FORMS      (1UL <<  4) /* U+FF00-U+FFEF */
       /* Bit 69   Specials */
    -#define TT_UCR_SPECIALS                        (1L <<  5) /* U+FFF0-U+FFFD */
    +#define TT_UCR_SPECIALS                       (1UL <<  5) /* U+FFF0-U+FFFF */
       /* Bit 70   Tibetan */
    -#define TT_UCR_TIBETAN                         (1L <<  6) /* U+0F00-U+0FFF */
    +#define TT_UCR_TIBETAN                        (1UL <<  6) /* U+0F00-U+0FFF */
       /* Bit 71   Syriac */
    -#define TT_UCR_SYRIAC                          (1L <<  7) /* U+0700-U+074F */
    +#define TT_UCR_SYRIAC                         (1UL <<  7) /* U+0700-U+074F */
       /* Bit 72   Thaana */
    -#define TT_UCR_THAANA                          (1L <<  8) /* U+0780-U+07BF */
    +#define TT_UCR_THAANA                         (1UL <<  8) /* U+0780-U+07BF */
       /* Bit 73   Sinhala */
    -#define TT_UCR_SINHALA                         (1L <<  9) /* U+0D80-U+0DFF */
    +#define TT_UCR_SINHALA                        (1UL <<  9) /* U+0D80-U+0DFF */
       /* Bit 74   Myanmar */
    -#define TT_UCR_MYANMAR                         (1L << 10) /* U+1000-U+109F */
    +#define TT_UCR_MYANMAR                        (1UL << 10) /* U+1000-U+109F */
       /* Bit 75   Ethiopic            */
       /*          Ethiopic Supplement */
       /*          Ethiopic Extended   */
    -#define TT_UCR_ETHIOPIC                        (1L << 11) /* U+1200-U+137F */
    +#define TT_UCR_ETHIOPIC                       (1UL << 11) /* U+1200-U+137F */
                                                               /* U+1380-U+139F */
                                                               /* U+2D80-U+2DDF */
       /* Bit 76   Cherokee */
    -#define TT_UCR_CHEROKEE                        (1L << 12) /* U+13A0-U+13FF */
    +#define TT_UCR_CHEROKEE                       (1UL << 12) /* U+13A0-U+13FF */
       /* Bit 77   Unified Canadian Aboriginal Syllabics */
    -#define TT_UCR_CANADIAN_ABORIGINAL_SYLLABICS   (1L << 13) /* U+1400-U+167F */
    +#define TT_UCR_CANADIAN_ABORIGINAL_SYLLABICS  (1UL << 13) /* U+1400-U+167F */
       /* Bit 78   Ogham */
    -#define TT_UCR_OGHAM                           (1L << 14) /* U+1680-U+169F */
    +#define TT_UCR_OGHAM                          (1UL << 14) /* U+1680-U+169F */
       /* Bit 79   Runic */
    -#define TT_UCR_RUNIC                           (1L << 15) /* U+16A0-U+16FF */
    +#define TT_UCR_RUNIC                          (1UL << 15) /* U+16A0-U+16FF */
       /* Bit 80   Khmer         */
       /*          Khmer Symbols */
    -#define TT_UCR_KHMER                           (1L << 16) /* U+1780-U+17FF */
    +#define TT_UCR_KHMER                          (1UL << 16) /* U+1780-U+17FF */
                                                               /* U+19E0-U+19FF */
       /* Bit 81   Mongolian */
    -#define TT_UCR_MONGOLIAN                       (1L << 17) /* U+1800-U+18AF */
    +#define TT_UCR_MONGOLIAN                      (1UL << 17) /* U+1800-U+18AF */
       /* Bit 82   Braille Patterns */
    -#define TT_UCR_BRAILLE                         (1L << 18) /* U+2800-U+28FF */
    +#define TT_UCR_BRAILLE                        (1UL << 18) /* U+2800-U+28FF */
       /* Bit 83   Yi Syllables */
       /*          Yi Radicals  */
    -#define TT_UCR_YI                              (1L << 19) /* U+A000-U+A48F */
    +#define TT_UCR_YI                             (1UL << 19) /* U+A000-U+A48F */
                                                               /* U+A490-U+A4CF */
       /* Bit 84   Tagalog  */
       /*          Hanunoo  */
       /*          Buhid    */
       /*          Tagbanwa */
    -#define TT_UCR_PHILIPPINE                      (1L << 20) /* U+1700-U+171F */
    +#define TT_UCR_PHILIPPINE                     (1UL << 20) /* U+1700-U+171F */
                                                               /* U+1720-U+173F */
                                                               /* U+1740-U+175F */
                                                               /* U+1760-U+177F */
       /* Bit 85   Old Italic */
    -#define TT_UCR_OLD_ITALIC                      (1L << 21) /*U+10300-U+1032F*/
    +#define TT_UCR_OLD_ITALIC                     (1UL << 21) /*U+10300-U+1032F*/
       /* Bit 86   Gothic */
    -#define TT_UCR_GOTHIC                          (1L << 22) /*U+10330-U+1034F*/
    +#define TT_UCR_GOTHIC                         (1UL << 22) /*U+10330-U+1034F*/
       /* Bit 87   Deseret */
    -#define TT_UCR_DESERET                         (1L << 23) /*U+10400-U+1044F*/
    +#define TT_UCR_DESERET                        (1UL << 23) /*U+10400-U+1044F*/
       /* Bit 88   Byzantine Musical Symbols      */
       /*          Musical Symbols                */
       /*          Ancient Greek Musical Notation */
    -#define TT_UCR_MUSICAL_SYMBOLS                 (1L << 24) /*U+1D000-U+1D0FF*/
    +#define TT_UCR_MUSICAL_SYMBOLS                (1UL << 24) /*U+1D000-U+1D0FF*/
                                                               /*U+1D100-U+1D1FF*/
                                                               /*U+1D200-U+1D24F*/
       /* Bit 89   Mathematical Alphanumeric Symbols */
    -#define TT_UCR_MATH_ALPHANUMERIC_SYMBOLS       (1L << 25) /*U+1D400-U+1D7FF*/
    +#define TT_UCR_MATH_ALPHANUMERIC_SYMBOLS      (1UL << 25) /*U+1D400-U+1D7FF*/
       /* Bit 90   Private Use (plane 15) */
       /*          Private Use (plane 16) */
    -#define TT_UCR_PRIVATE_USE_SUPPLEMENTARY       (1L << 26) /*U+F0000-U+FFFFD*/
    +#define TT_UCR_PRIVATE_USE_SUPPLEMENTARY      (1UL << 26) /*U+F0000-U+FFFFD*/
                                                             /*U+100000-U+10FFFD*/
       /* Bit 91   Variation Selectors            */
       /*          Variation Selectors Supplement */
    -#define TT_UCR_VARIATION_SELECTORS             (1L << 27) /* U+FE00-U+FE0F */
    +#define TT_UCR_VARIATION_SELECTORS            (1UL << 27) /* U+FE00-U+FE0F */
                                                               /*U+E0100-U+E01EF*/
       /* Bit 92   Tags */
    -#define TT_UCR_TAGS                            (1L << 28) /*U+E0000-U+E007F*/
    +#define TT_UCR_TAGS                           (1UL << 28) /*U+E0000-U+E007F*/
       /* Bit 93   Limbu */
    -#define TT_UCR_LIMBU                           (1L << 29) /* U+1900-U+194F */
    +#define TT_UCR_LIMBU                          (1UL << 29) /* U+1900-U+194F */
       /* Bit 94   Tai Le */
    -#define TT_UCR_TAI_LE                          (1L << 30) /* U+1950-U+197F */
    +#define TT_UCR_TAI_LE                         (1UL << 30) /* U+1950-U+197F */
       /* Bit 95   New Tai Lue */
    -#define TT_UCR_NEW_TAI_LUE                     (1L << 31) /* U+1980-U+19DF */
    +#define TT_UCR_NEW_TAI_LUE                    (1UL << 31) /* U+1980-U+19DF */
     
       /* ulUnicodeRange4 */
       /* --------------- */
     
       /* Bit 96   Buginese */
    -#define TT_UCR_BUGINESE                        (1L <<  0) /* U+1A00-U+1A1F */
    +#define TT_UCR_BUGINESE                       (1UL <<  0) /* U+1A00-U+1A1F */
       /* Bit 97   Glagolitic */
    -#define TT_UCR_GLAGOLITIC                      (1L <<  1) /* U+2C00-U+2C5F */
    +#define TT_UCR_GLAGOLITIC                     (1UL <<  1) /* U+2C00-U+2C5F */
       /* Bit 98   Tifinagh */
    -#define TT_UCR_TIFINAGH                        (1L <<  2) /* U+2D30-U+2D7F */
    +#define TT_UCR_TIFINAGH                       (1UL <<  2) /* U+2D30-U+2D7F */
       /* Bit 99   Yijing Hexagram Symbols */
    -#define TT_UCR_YIJING                          (1L <<  3) /* U+4DC0-U+4DFF */
    +#define TT_UCR_YIJING                         (1UL <<  3) /* U+4DC0-U+4DFF */
       /* Bit 100  Syloti Nagri */
    -#define TT_UCR_SYLOTI_NAGRI                    (1L <<  4) /* U+A800-U+A82F */
    +#define TT_UCR_SYLOTI_NAGRI                   (1UL <<  4) /* U+A800-U+A82F */
       /* Bit 101  Linear B Syllabary */
       /*          Linear B Ideograms */
       /*          Aegean Numbers     */
    -#define TT_UCR_LINEAR_B                        (1L <<  5) /*U+10000-U+1007F*/
    +#define TT_UCR_LINEAR_B                       (1UL <<  5) /*U+10000-U+1007F*/
                                                               /*U+10080-U+100FF*/
                                                               /*U+10100-U+1013F*/
       /* Bit 102  Ancient Greek Numbers */
    -#define TT_UCR_ANCIENT_GREEK_NUMBERS           (1L <<  6) /*U+10140-U+1018F*/
    +#define TT_UCR_ANCIENT_GREEK_NUMBERS          (1UL <<  6) /*U+10140-U+1018F*/
       /* Bit 103  Ugaritic */
    -#define TT_UCR_UGARITIC                        (1L <<  7) /*U+10380-U+1039F*/
    +#define TT_UCR_UGARITIC                       (1UL <<  7) /*U+10380-U+1039F*/
       /* Bit 104  Old Persian */
    -#define TT_UCR_OLD_PERSIAN                     (1L <<  8) /*U+103A0-U+103DF*/
    +#define TT_UCR_OLD_PERSIAN                    (1UL <<  8) /*U+103A0-U+103DF*/
       /* Bit 105  Shavian */
    -#define TT_UCR_SHAVIAN                         (1L <<  9) /*U+10450-U+1047F*/
    +#define TT_UCR_SHAVIAN                        (1UL <<  9) /*U+10450-U+1047F*/
       /* Bit 106  Osmanya */
    -#define TT_UCR_OSMANYA                         (1L << 10) /*U+10480-U+104AF*/
    +#define TT_UCR_OSMANYA                        (1UL << 10) /*U+10480-U+104AF*/
       /* Bit 107  Cypriot Syllabary */
    -#define TT_UCR_CYPRIOT_SYLLABARY               (1L << 11) /*U+10800-U+1083F*/
    +#define TT_UCR_CYPRIOT_SYLLABARY              (1UL << 11) /*U+10800-U+1083F*/
       /* Bit 108  Kharoshthi */
    -#define TT_UCR_KHAROSHTHI                      (1L << 12) /*U+10A00-U+10A5F*/
    +#define TT_UCR_KHAROSHTHI                     (1UL << 12) /*U+10A00-U+10A5F*/
       /* Bit 109  Tai Xuan Jing Symbols */
    -#define TT_UCR_TAI_XUAN_JING                   (1L << 13) /*U+1D300-U+1D35F*/
    +#define TT_UCR_TAI_XUAN_JING                  (1UL << 13) /*U+1D300-U+1D35F*/
       /* Bit 110  Cuneiform                         */
       /*          Cuneiform Numbers and Punctuation */
    -#define TT_UCR_CUNEIFORM                       (1L << 14) /*U+12000-U+123FF*/
    +#define TT_UCR_CUNEIFORM                      (1UL << 14) /*U+12000-U+123FF*/
                                                               /*U+12400-U+1247F*/
       /* Bit 111  Counting Rod Numerals */
    -#define TT_UCR_COUNTING_ROD_NUMERALS           (1L << 15) /*U+1D360-U+1D37F*/
    +#define TT_UCR_COUNTING_ROD_NUMERALS          (1UL << 15) /*U+1D360-U+1D37F*/
       /* Bit 112  Sundanese */
    -#define TT_UCR_SUNDANESE                       (1L << 16) /* U+1B80-U+1BBF */
    +#define TT_UCR_SUNDANESE                      (1UL << 16) /* U+1B80-U+1BBF */
       /* Bit 113  Lepcha */
    -#define TT_UCR_LEPCHA                          (1L << 17) /* U+1C00-U+1C4F */
    +#define TT_UCR_LEPCHA                         (1UL << 17) /* U+1C00-U+1C4F */
       /* Bit 114  Ol Chiki */
    -#define TT_UCR_OL_CHIKI                        (1L << 18) /* U+1C50-U+1C7F */
    +#define TT_UCR_OL_CHIKI                       (1UL << 18) /* U+1C50-U+1C7F */
       /* Bit 115  Saurashtra */
    -#define TT_UCR_SAURASHTRA                      (1L << 19) /* U+A880-U+A8DF */
    +#define TT_UCR_SAURASHTRA                     (1UL << 19) /* U+A880-U+A8DF */
       /* Bit 116  Kayah Li */
    -#define TT_UCR_KAYAH_LI                        (1L << 20) /* U+A900-U+A92F */
    +#define TT_UCR_KAYAH_LI                       (1UL << 20) /* U+A900-U+A92F */
       /* Bit 117  Rejang */
    -#define TT_UCR_REJANG                          (1L << 21) /* U+A930-U+A95F */
    +#define TT_UCR_REJANG                         (1UL << 21) /* U+A930-U+A95F */
       /* Bit 118  Cham */
    -#define TT_UCR_CHAM                            (1L << 22) /* U+AA00-U+AA5F */
    +#define TT_UCR_CHAM                           (1UL << 22) /* U+AA00-U+AA5F */
       /* Bit 119  Ancient Symbols */
    -#define TT_UCR_ANCIENT_SYMBOLS                 (1L << 23) /*U+10190-U+101CF*/
    +#define TT_UCR_ANCIENT_SYMBOLS                (1UL << 23) /*U+10190-U+101CF*/
       /* Bit 120  Phaistos Disc */
    -#define TT_UCR_PHAISTOS_DISC                   (1L << 24) /*U+101D0-U+101FF*/
    +#define TT_UCR_PHAISTOS_DISC                  (1UL << 24) /*U+101D0-U+101FF*/
       /* Bit 121  Carian */
       /*          Lycian */
       /*          Lydian */
    -#define TT_UCR_OLD_ANATOLIAN                   (1L << 25) /*U+102A0-U+102DF*/
    +#define TT_UCR_OLD_ANATOLIAN                  (1UL << 25) /*U+102A0-U+102DF*/
                                                               /*U+10280-U+1029F*/
                                                               /*U+10920-U+1093F*/
       /* Bit 122  Domino Tiles  */
       /*          Mahjong Tiles */
    -#define TT_UCR_GAME_TILES                      (1L << 26) /*U+1F030-U+1F09F*/
    +#define TT_UCR_GAME_TILES                     (1UL << 26) /*U+1F030-U+1F09F*/
                                                               /*U+1F000-U+1F02F*/
       /* Bit 123-127 Reserved for process-internal usage */
     
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/tttables.h b/src/java.desktop/share/native/libfreetype/include/freetype/tttables.h
    index 2cf0ff1bc61..aa4336435d9 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/tttables.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/tttables.h
    @@ -5,7 +5,7 @@
      *   Basic SFNT/TrueType tables definitions and interface
      *   (specification only).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -192,7 +192,7 @@ FT_BEGIN_HEADER
        *     A pointer into the 'hmtx' table.
        *
        * @note:
    -   *   For an OpenType variation font, the values of the following fields can
    +   *   For OpenType Font Variations, the values of the following fields can
        *   change after a call to @FT_Set_Var_Design_Coordinates (and friends) if
        *   the font contains an 'MVAR' table: `caret_Slope_Rise`,
        *   `caret_Slope_Run`, and `caret_Offset`.
    @@ -310,7 +310,7 @@ FT_BEGIN_HEADER
        *     A pointer into the 'vmtx' table.
        *
        * @note:
    -   *   For an OpenType variation font, the values of the following fields can
    +   *   For OpenType Font Variations, the values of the following fields can
        *   change after a call to @FT_Set_Var_Design_Coordinates (and friends) if
        *   the font contains an 'MVAR' table: `Ascender`, `Descender`,
        *   `Line_Gap`, `caret_Slope_Rise`, `caret_Slope_Run`, and `caret_Offset`.
    @@ -359,7 +359,7 @@ FT_BEGIN_HEADER
        *   table.  In this case, the `version` field is always set to 0xFFFF.
        *
        * @note:
    -   *   For an OpenType variation font, the values of the following fields can
    +   *   For OpenType Font Variations, the values of the following fields can
        *   change after a call to @FT_Set_Var_Design_Coordinates (and friends) if
        *   the font contains an 'MVAR' table: `sCapHeight`, `sTypoAscender`,
        *   `sTypoDescender`, `sTypoLineGap`, `sxHeight`, `usWinAscent`,
    @@ -442,7 +442,7 @@ FT_BEGIN_HEADER
        *   them.
        *
        * @note:
    -   *   For an OpenType variation font, the values of the following fields can
    +   *   For OpenType Font Variations, the values of the following fields can
        *   change after a call to @FT_Set_Var_Design_Coordinates (and friends) if
        *   the font contains an 'MVAR' table: `underlinePosition` and
        *   `underlineThickness`.
    @@ -705,6 +705,9 @@ FT_BEGIN_HEADER
        *     definitions found in the @FT_TRUETYPE_TAGS_H file, or forge a new
        *     one with @FT_MAKE_TAG.
        *
    +   *     [Since 2.14] Use value~1 if you want to access the table directory
    +   *     of the (currently selected) font.
    +   *
        *   offset ::
        *     The starting offset in the table (or file if tag~==~0).
        *
    diff --git a/src/java.desktop/share/native/libfreetype/include/freetype/tttags.h b/src/java.desktop/share/native/libfreetype/include/freetype/tttags.h
    index da0af5d3f23..56bb0a3ee5e 100644
    --- a/src/java.desktop/share/native/libfreetype/include/freetype/tttags.h
    +++ b/src/java.desktop/share/native/libfreetype/include/freetype/tttags.h
    @@ -4,7 +4,7 @@
      *
      *   Tags for TrueType and OpenType tables (specification only).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/include/ft2build.h b/src/java.desktop/share/native/libfreetype/include/ft2build.h
    index d3d7685039c..3008aea7cf5 100644
    --- a/src/java.desktop/share/native/libfreetype/include/ft2build.h
    +++ b/src/java.desktop/share/native/libfreetype/include/ft2build.h
    @@ -4,7 +4,7 @@
      *
      *   FreeType 2 build and setup macros.
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/autofit/afadjust.c b/src/java.desktop/share/native/libfreetype/src/autofit/afadjust.c
    new file mode 100644
    index 00000000000..a1aa45914d0
    --- /dev/null
    +++ b/src/java.desktop/share/native/libfreetype/src/autofit/afadjust.c
    @@ -0,0 +1,1621 @@
    +/****************************************************************************
    + *
    + * afadjust.c
    + *
    + *   Auto-fitter routines to adjust components based on charcode (body).
    + *
    + * Copyright (C) 2023-2025 by
    + * David Turner, Robert Wilhelm, and Werner Lemberg.
    + *
    + * Written by Craig White .
    + *
    + * This file is part of the FreeType project, and may only be used,
    + * modified, and distributed under the terms of the FreeType project
    + * license, LICENSE.TXT.  By continuing to use, modify, or distribute
    + * this file you indicate that you have read the license and
    + * understand and accept it fully.
    + *
    + */
    +
    +#include "afadjust.h"
    +#ifdef FT_CONFIG_OPTION_USE_HARFBUZZ
    +#  include "afgsub.h"
    +#endif
    +
    +#include 
    +#include 
    +#include 
    +#include 
    +
    +#define AF_ADJUSTMENT_DATABASE_LENGTH           \
    +          ( sizeof ( adjustment_database ) /    \
    +            sizeof ( adjustment_database[0] ) )
    +
    +#undef  FT_COMPONENT
    +#define FT_COMPONENT  afadjust
    +
    +
    +  typedef struct  AF_AdjustmentDatabaseEntry_
    +  {
    +    FT_UInt32  codepoint;
    +    FT_UInt32  flags;
    +
    +  } AF_AdjustmentDatabaseEntry;
    +
    +
    +  /*
    +    All entries in this list must be sorted by ascending Unicode code
    +    points.  The table entries are 3 numbers consisting of:
    +
    +    - Unicode code point.
    +    - The vertical adjustment type.  This should be a combination of the
    +      AF_ADJUST_XXX and AF_IGNORE_XXX macros.
    +  */
    +  static AF_AdjustmentDatabaseEntry  adjustment_database[] =
    +  {
    +    /* C0 Controls and Basic Latin */
    +    { 0x21,  AF_ADJUST_UP | AF_ADJUST_NO_HEIGHT_CHECK }, /* ! */
    +    { 0x51,  AF_IGNORE_CAPITAL_BOTTOM } , /* Q */
    +    { 0x3F,  AF_ADJUST_UP | AF_ADJUST_NO_HEIGHT_CHECK }, /* ? */
    +    { 0x69,  AF_ADJUST_UP }, /* i */
    +    { 0x6A,  AF_ADJUST_UP }, /* j */
    +#if 0
    +    /* XXX TODO */
    +    { 0x7E,  AF_ADJUST_TILDE_TOP }, /* ~ */
    +#endif
    +
    +    /* C1 Controls and Latin-1 Supplement */
    +    { 0xA1,  AF_ADJUST_UP }, /* ¡ */
    +    { 0xA6,  AF_ADJUST_UP | AF_ADJUST_NO_HEIGHT_CHECK }, /* ¦ */
    +    { 0xAA,  AF_ADJUST_UP }, /* ª */
    +    { 0xBA,  AF_ADJUST_UP }, /* º */
    +    { 0xBF,  AF_ADJUST_UP }, /* ¿ */
    +
    +    { 0xC0,  AF_ADJUST_UP }, /* À */
    +    { 0xC1,  AF_ADJUST_UP }, /* Á */
    +    { 0xC2,  AF_ADJUST_UP }, /* Â */
    +    { 0xC3,  AF_ADJUST_UP | AF_ADJUST_TILDE_TOP }, /* Ã */
    +    { 0xC4,  AF_ADJUST_UP }, /* Ä */
    +    { 0xC5,  AF_ADJUST_UP }, /* Å */
    +    { 0xC7,  AF_IGNORE_CAPITAL_BOTTOM }, /* Ç */
    +    { 0xC8,  AF_ADJUST_UP }, /* È */
    +    { 0xC9,  AF_ADJUST_UP }, /* É */
    +    { 0xCA,  AF_ADJUST_UP }, /* Ê */
    +    { 0xCB,  AF_ADJUST_UP }, /* Ë */
    +    { 0xCC,  AF_ADJUST_UP }, /* Ì */
    +    { 0xCD,  AF_ADJUST_UP }, /* Í */
    +    { 0xCE,  AF_ADJUST_UP }, /* Î */
    +    { 0xCF,  AF_ADJUST_UP }, /* Ï */
    +
    +    { 0xD1,  AF_ADJUST_UP | AF_ADJUST_TILDE_TOP }, /* Ñ */
    +    { 0xD2,  AF_ADJUST_UP }, /* Ò */
    +    { 0xD3,  AF_ADJUST_UP }, /* Ó */
    +    { 0xD4,  AF_ADJUST_UP }, /* Ô */
    +    { 0xD5,  AF_ADJUST_UP | AF_ADJUST_TILDE_TOP }, /* Õ */
    +    { 0xD6,  AF_ADJUST_UP }, /* Ö */
    +    { 0xD8,  AF_IGNORE_CAPITAL_TOP | AF_IGNORE_CAPITAL_BOTTOM }, /* Ø */
    +    { 0xD9,  AF_ADJUST_UP }, /* Ù */
    +    { 0xDA,  AF_ADJUST_UP }, /* Ú */
    +    { 0xDB,  AF_ADJUST_UP }, /* Û */
    +    { 0xDC,  AF_ADJUST_UP }, /* Ü */
    +    { 0xDD,  AF_ADJUST_UP }, /* Ý */
    +
    +    { 0xE0,  AF_ADJUST_UP }, /* à */
    +    { 0xE1,  AF_ADJUST_UP }, /* á */
    +    { 0xE2,  AF_ADJUST_UP }, /* â */
    +    { 0xE3,  AF_ADJUST_UP | AF_ADJUST_TILDE_TOP }, /* ã */
    +    { 0xE4,  AF_ADJUST_UP }, /* ä */
    +    { 0xE5,  AF_ADJUST_UP }, /* å */
    +    { 0xE7,  AF_IGNORE_SMALL_BOTTOM }, /* ç */
    +    { 0xE8,  AF_ADJUST_UP }, /* è */
    +    { 0xE9,  AF_ADJUST_UP }, /* é */
    +    { 0xEA,  AF_ADJUST_UP }, /* ê */
    +    { 0xEB,  AF_ADJUST_UP }, /* ë */
    +    { 0xEC,  AF_ADJUST_UP }, /* ì */
    +    { 0xED,  AF_ADJUST_UP }, /* í */
    +    { 0xEE,  AF_ADJUST_UP }, /* î */
    +    { 0xEF,  AF_ADJUST_UP }, /* ï */
    +
    +    { 0xF1,  AF_ADJUST_UP | AF_ADJUST_TILDE_TOP }, /* ñ */
    +    { 0xF2,  AF_ADJUST_UP }, /* ò */
    +    { 0xF3,  AF_ADJUST_UP }, /* ó */
    +    { 0xF4,  AF_ADJUST_UP }, /* ô */
    +    { 0xF5,  AF_ADJUST_UP | AF_ADJUST_TILDE_TOP }, /* õ */
    +    { 0xF6,  AF_ADJUST_UP }, /* ö */
    +    { 0xF8,  AF_IGNORE_SMALL_TOP | AF_IGNORE_SMALL_BOTTOM }, /* ø */
    +    { 0xF9,  AF_ADJUST_UP }, /* ù */
    +    { 0xFA,  AF_ADJUST_UP }, /* ú */
    +    { 0xFB,  AF_ADJUST_UP }, /* û */
    +    { 0xFC,  AF_ADJUST_UP }, /* ü */
    +    { 0xFD,  AF_ADJUST_UP }, /* ý */
    +    { 0xFF,  AF_ADJUST_UP }, /* ÿ */
    +
    +    /* Latin Extended-A */
    +    { 0x100, AF_ADJUST_UP }, /* Ā */
    +    { 0x101, AF_ADJUST_UP }, /* ā */
    +    { 0x102, AF_ADJUST_UP }, /* Ă */
    +    { 0x103, AF_ADJUST_UP }, /* ă */
    +    { 0x104, AF_IGNORE_CAPITAL_BOTTOM }, /* Ą */
    +    { 0x105, AF_IGNORE_SMALL_BOTTOM }, /* ą */
    +    { 0x106, AF_ADJUST_UP }, /* Ć */
    +    { 0x107, AF_ADJUST_UP }, /* ć */
    +    { 0x108, AF_ADJUST_UP }, /* Ĉ */
    +    { 0x109, AF_ADJUST_UP }, /* ĉ */
    +    { 0x10A, AF_ADJUST_UP }, /* Ċ */
    +    { 0x10B, AF_ADJUST_UP }, /* ċ */
    +    { 0x10C, AF_ADJUST_UP }, /* Č */
    +    { 0x10D, AF_ADJUST_UP }, /* č */
    +    { 0x10E, AF_ADJUST_UP }, /* Ď */
    +
    +    { 0x112, AF_ADJUST_UP }, /* Ē */
    +    { 0x113, AF_ADJUST_UP }, /* ē */
    +    { 0x114, AF_ADJUST_UP }, /* Ĕ */
    +    { 0x115, AF_ADJUST_UP }, /* ĕ */
    +    { 0x116, AF_ADJUST_UP }, /* Ė */
    +    { 0x117, AF_ADJUST_UP }, /* ė */
    +    { 0x118, AF_IGNORE_CAPITAL_BOTTOM }, /* Ę */
    +    { 0x119, AF_IGNORE_SMALL_BOTTOM }, /* ę */
    +    { 0x11A, AF_ADJUST_UP }, /* Ě */
    +    { 0x11B, AF_ADJUST_UP }, /* ě */
    +    { 0x11C, AF_ADJUST_UP }, /* Ĝ */
    +    { 0x11D, AF_ADJUST_UP }, /* ĝ */
    +    { 0x11E, AF_ADJUST_UP }, /* Ğ */
    +    { 0x11F, AF_ADJUST_UP }, /* ğ */
    +
    +    { 0x120, AF_ADJUST_UP }, /* Ġ */
    +    { 0x121, AF_ADJUST_UP }, /* ġ */
    +    { 0x122, AF_ADJUST_DOWN }, /* Ģ */
    +    { 0x123, AF_ADJUST_UP }, /* ģ */
    +    { 0x124, AF_ADJUST_UP }, /* Ĥ */
    +    { 0x125, AF_ADJUST_UP }, /* ĥ */
    +    { 0x128, AF_ADJUST_UP | AF_ADJUST_TILDE_TOP }, /* Ĩ */
    +    { 0x129, AF_ADJUST_UP | AF_ADJUST_TILDE_TOP }, /* ĩ */
    +    { 0x12A, AF_ADJUST_UP }, /* Ī */
    +    { 0x12B, AF_ADJUST_UP }, /* ī */
    +    { 0x12C, AF_ADJUST_UP }, /* Ĭ */
    +    { 0x12D, AF_ADJUST_UP }, /* ĭ */
    +    { 0x12E, AF_IGNORE_CAPITAL_BOTTOM }, /* Į */
    +    { 0x12F, AF_ADJUST_UP | AF_IGNORE_SMALL_BOTTOM }, /* į */
    +
    +    { 0x130, AF_ADJUST_UP }, /* İ */
    +    { 0x133, AF_ADJUST_UP }, /* ij */
    +    { 0x134, AF_ADJUST_UP }, /* Ĵ */
    +    { 0x135, AF_ADJUST_UP }, /* ĵ */
    +    { 0x136, AF_ADJUST_DOWN }, /* Ķ */
    +    { 0x137, AF_ADJUST_DOWN }, /* ķ */
    +    { 0x139, AF_ADJUST_UP }, /* Ĺ */
    +    { 0x13A, AF_ADJUST_UP }, /* ĺ */
    +    { 0x13B, AF_ADJUST_DOWN }, /* Ļ */
    +    { 0x13C, AF_ADJUST_DOWN }, /* ļ */
    +
    +    { 0x143, AF_ADJUST_UP }, /* Ń */
    +    { 0x144, AF_ADJUST_UP }, /* ń */
    +    { 0x145, AF_ADJUST_DOWN }, /* Ņ */
    +    { 0x146, AF_ADJUST_DOWN }, /* ņ */
    +    { 0x147, AF_ADJUST_UP }, /* Ň */
    +    { 0x148, AF_ADJUST_UP }, /* ň */
    +    { 0x14C, AF_ADJUST_UP }, /* Ō */
    +    { 0x14D, AF_ADJUST_UP }, /* ō */
    +    { 0x14E, AF_ADJUST_UP }, /* Ŏ */
    +    { 0x14F, AF_ADJUST_UP }, /* ŏ */
    +
    +    { 0x150, AF_ADJUST_UP }, /* Ő */
    +    { 0x151, AF_ADJUST_UP }, /* ő */
    +    { 0x154, AF_ADJUST_UP }, /* Ŕ */
    +    { 0x155, AF_ADJUST_UP }, /* ŕ */
    +    { 0x156, AF_ADJUST_DOWN }, /* Ŗ */
    +    { 0x157, AF_ADJUST_DOWN }, /* ŗ */
    +    { 0x158, AF_ADJUST_UP }, /* Ř */
    +    { 0x159, AF_ADJUST_UP }, /* ř */
    +    { 0x15A, AF_ADJUST_UP }, /* Ś */
    +    { 0x15B, AF_ADJUST_UP }, /* ś */
    +    { 0x15C, AF_ADJUST_UP }, /* Ŝ */
    +    { 0x15D, AF_ADJUST_UP }, /* ŝ */
    +    { 0x15E, AF_IGNORE_CAPITAL_BOTTOM }, /* Ş */
    +    { 0x15F, AF_IGNORE_SMALL_BOTTOM }, /* ş */
    +
    +    { 0x160, AF_ADJUST_UP }, /* Š */
    +    { 0x161, AF_ADJUST_UP }, /* š */
    +    { 0x162, AF_IGNORE_CAPITAL_BOTTOM }, /* Ţ */
    +    { 0x163, AF_IGNORE_SMALL_BOTTOM }, /* ţ */
    +    { 0x164, AF_ADJUST_UP }, /* Ť */
    +    { 0x168, AF_ADJUST_UP | AF_ADJUST_TILDE_TOP }, /* Ũ */
    +    { 0x169, AF_ADJUST_UP | AF_ADJUST_TILDE_TOP }, /* ũ */
    +    { 0x16A, AF_ADJUST_UP }, /* Ū */
    +    { 0x16B, AF_ADJUST_UP }, /* ū */
    +    { 0x16C, AF_ADJUST_UP }, /* Ŭ */
    +    { 0x16D, AF_ADJUST_UP }, /* ŭ */
    +    { 0x16E, AF_ADJUST_UP }, /* Ů */
    +    { 0x16F, AF_ADJUST_UP }, /* ů */
    +
    +    { 0x170, AF_ADJUST_UP }, /* Ű */
    +    { 0x171, AF_ADJUST_UP }, /* ű */
    +    { 0x172, AF_IGNORE_CAPITAL_BOTTOM }, /* Ų */
    +    { 0x173, AF_IGNORE_SMALL_BOTTOM }, /* ų */
    +    { 0x174, AF_ADJUST_UP }, /* Ŵ */
    +    { 0x175, AF_ADJUST_UP }, /* ŵ */
    +    { 0x176, AF_ADJUST_UP }, /* Ŷ */
    +    { 0x177, AF_ADJUST_UP }, /* ŷ */
    +    { 0x178, AF_ADJUST_UP }, /* Ÿ */
    +    { 0x179, AF_ADJUST_UP }, /* Ź */
    +    { 0x17A, AF_ADJUST_UP }, /* ź */
    +    { 0x17B, AF_ADJUST_UP }, /* Ż */
    +    { 0x17C, AF_ADJUST_UP }, /* ż */
    +    { 0x17D, AF_ADJUST_UP }, /* Ž */
    +    { 0x17E, AF_ADJUST_UP }, /* ž */
    +
    +    /* Latin Extended-B */
    +    { 0x187, AF_IGNORE_CAPITAL_TOP }, /* Ƈ */
    +    { 0x188, AF_IGNORE_SMALL_TOP }, /* ƈ */
    +
    +    { 0x1A0, AF_IGNORE_CAPITAL_TOP }, /* Ơ */
    +    { 0x1A1, AF_IGNORE_SMALL_TOP }, /* ơ */
    +    { 0x1A5, AF_IGNORE_SMALL_TOP }, /* ƥ */
    +    { 0x1AB, AF_IGNORE_SMALL_BOTTOM }, /* ƫ */
    +    { 0x1AE, AF_IGNORE_CAPITAL_BOTTOM }, /* Ʈ */
    +    { 0x1AF, AF_IGNORE_CAPITAL_TOP }, /* Ư */
    +
    +    { 0x1B0, AF_IGNORE_SMALL_TOP }, /* ư */
    +    { 0x1B4, AF_IGNORE_SMALL_TOP }, /* ƴ */
    +
    +    { 0x1C3, AF_ADJUST_UP | AF_ADJUST_NO_HEIGHT_CHECK }, /* ǃ */
    +    { 0x1C4, AF_ADJUST_UP }, /* DŽ */
    +#if 0
    +    { 0x1C5, AF_ADJUST_UP }, /* Dž */
    +    { 0x1C6, AF_ADJUST_UP }, /* dž */
    +    { 0x1C8, AF_ADJUST_UP }, /* Lj */
    +    { 0x1C9, AF_ADJUST_UP }, /* lj */
    +    { 0x1CB, AF_ADJUST_UP }, /* Nj */
    +#endif
    +    { 0x1CC, AF_ADJUST_UP }, /* nj */
    +    { 0x1CD, AF_ADJUST_UP }, /* Ǎ */
    +    { 0x1CE, AF_ADJUST_UP }, /* ǎ */
    +    { 0x1CF, AF_ADJUST_UP }, /* Ǐ */
    +
    +    { 0x1D0, AF_ADJUST_UP }, /* ǐ */
    +    { 0x1D1, AF_ADJUST_UP }, /* Ǒ */
    +    { 0x1D2, AF_ADJUST_UP }, /* ǒ */
    +    { 0x1D3, AF_ADJUST_UP }, /* Ǔ */
    +    { 0x1D4, AF_ADJUST_UP }, /* ǔ */
    +    { 0x1D5, AF_ADJUST_UP2 }, /* Ǖ */
    +    { 0x1D6, AF_ADJUST_UP2 }, /* ǖ */
    +    { 0x1D7, AF_ADJUST_UP2 }, /* Ǘ */
    +    { 0x1D8, AF_ADJUST_UP2 }, /* ǘ */
    +    { 0x1D9, AF_ADJUST_UP2 }, /* Ǚ */
    +    { 0x1DA, AF_ADJUST_UP2 }, /* ǚ */
    +    { 0x1DB, AF_ADJUST_UP2 }, /* Ǜ */
    +    { 0x1DC, AF_ADJUST_UP2 }, /* ǜ */
    +    { 0x1DE, AF_ADJUST_UP2 }, /* Ǟ */
    +    { 0x1DF, AF_ADJUST_UP2 }, /* ǟ */
    +
    +    { 0x1E0, AF_ADJUST_UP2 }, /* Ǡ */
    +    { 0x1E1, AF_ADJUST_UP2 }, /* ǡ */
    +    { 0x1E2, AF_ADJUST_UP }, /* Ǣ */
    +    { 0x1E3, AF_ADJUST_UP }, /* ǣ */
    +    { 0x1E6, AF_ADJUST_UP }, /* Ǧ */
    +    { 0x1E7, AF_ADJUST_UP }, /* ǧ */
    +    { 0x1E8, AF_ADJUST_UP }, /* Ǩ */
    +    { 0x1E9, AF_ADJUST_UP }, /* ǩ */
    +    { 0x1EA, AF_IGNORE_CAPITAL_BOTTOM }, /* Ǫ */
    +    { 0x1EB, AF_IGNORE_SMALL_BOTTOM }, /* ǫ */
    +    { 0x1EC, AF_ADJUST_UP | AF_IGNORE_CAPITAL_BOTTOM }, /* Ǭ */
    +    { 0x1ED, AF_ADJUST_UP | AF_IGNORE_SMALL_BOTTOM }, /* ǭ */
    +    { 0x1EE, AF_ADJUST_UP }, /* Ǯ */
    +    { 0x1EF, AF_ADJUST_UP }, /* ǯ */
    +
    +    { 0x1F0, AF_ADJUST_UP }, /* ǰ */
    +    { 0x1F4, AF_ADJUST_UP }, /* Ǵ */
    +    { 0x1F5, AF_ADJUST_UP }, /* ǵ */
    +    { 0x1F8, AF_ADJUST_UP }, /* Ǹ */
    +    { 0x1F9, AF_ADJUST_UP }, /* ǹ */
    +    { 0x1FA, AF_ADJUST_UP2 }, /* Ǻ */
    +    { 0x1FB, AF_ADJUST_UP2 }, /* ǻ */
    +    { 0x1FC, AF_ADJUST_UP }, /* Ǽ */
    +    { 0x1FD, AF_ADJUST_UP }, /* ǽ */
    +    { 0x1FE, AF_ADJUST_UP }, /* Ǿ */
    +    { 0x1FF, AF_ADJUST_UP }, /* ǿ */
    +
    +    { 0x200, AF_ADJUST_UP }, /* Ȁ */
    +    { 0x201, AF_ADJUST_UP }, /* ȁ */
    +    { 0x202, AF_ADJUST_UP }, /* Ȃ */
    +    { 0x203, AF_ADJUST_UP }, /* ȃ */
    +    { 0x204, AF_ADJUST_UP }, /* Ȅ */
    +    { 0x205, AF_ADJUST_UP }, /* ȅ */
    +    { 0x206, AF_ADJUST_UP }, /* Ȇ */
    +    { 0x207, AF_ADJUST_UP }, /* ȇ */
    +    { 0x208, AF_ADJUST_UP }, /* Ȉ */
    +    { 0x209, AF_ADJUST_UP }, /* ȉ */
    +    { 0x20A, AF_ADJUST_UP }, /* Ȋ */
    +    { 0x20B, AF_ADJUST_UP }, /* ȋ */
    +    { 0x20C, AF_ADJUST_UP }, /* Ȍ */
    +    { 0x20D, AF_ADJUST_UP }, /* ȍ */
    +    { 0x20E, AF_ADJUST_UP }, /* Ȏ */
    +    { 0x20F, AF_ADJUST_UP }, /* ȏ */
    +
    +    { 0x210, AF_ADJUST_UP }, /* Ȑ */
    +    { 0x211, AF_ADJUST_UP }, /* ȑ */
    +    { 0x212, AF_ADJUST_UP }, /* Ȓ */
    +    { 0x213, AF_ADJUST_UP }, /* ȓ */
    +    { 0x214, AF_ADJUST_UP }, /* Ȕ */
    +    { 0x215, AF_ADJUST_UP }, /* ȕ */
    +    { 0x216, AF_ADJUST_UP }, /* Ȗ */
    +    { 0x217, AF_ADJUST_UP }, /* ȗ */
    +    { 0x218, AF_ADJUST_DOWN }, /* Ș */
    +    { 0x219, AF_ADJUST_DOWN }, /* ș */
    +    { 0x21A, AF_ADJUST_DOWN }, /* Ț */
    +    { 0x21B, AF_ADJUST_DOWN }, /* ț */
    +    { 0x21E, AF_ADJUST_UP }, /* Ȟ */
    +    { 0x21F, AF_ADJUST_UP }, /* ȟ */
    +
    +    { 0x224, AF_IGNORE_CAPITAL_BOTTOM }, /* Ȥ */
    +    { 0x225, AF_IGNORE_SMALL_BOTTOM }, /* ȥ */
    +    { 0x226, AF_ADJUST_UP }, /* Ȧ */
    +    { 0x227, AF_ADJUST_UP }, /* ȧ */
    +    { 0x228, AF_IGNORE_CAPITAL_BOTTOM }, /* Ȩ */
    +    { 0x229, AF_IGNORE_SMALL_BOTTOM }, /* ȩ */
    +    { 0x22A, AF_ADJUST_UP2 }, /* Ȫ */
    +    { 0x22B, AF_ADJUST_UP2 }, /* ȫ */
    +    { 0x22C, AF_ADJUST_UP2 }, /* Ȭ */
    +    { 0x22D, AF_ADJUST_UP2 }, /* ȭ */
    +    { 0x22E, AF_ADJUST_UP }, /* Ȯ */
    +    { 0x22F, AF_ADJUST_UP }, /* ȯ */
    +
    +    { 0x230, AF_ADJUST_UP2 }, /* Ȱ */
    +    { 0x231, AF_ADJUST_UP2 }, /* ȱ */
    +    { 0x232, AF_ADJUST_UP }, /* Ȳ */
    +    { 0x233, AF_ADJUST_UP }, /* ȳ */
    +    { 0x23A, AF_IGNORE_CAPITAL_TOP | AF_IGNORE_CAPITAL_BOTTOM }, /* Ⱥ */
    +    { 0x23B, AF_IGNORE_CAPITAL_TOP | AF_IGNORE_CAPITAL_BOTTOM }, /* Ȼ */
    +    { 0x23F, AF_IGNORE_SMALL_BOTTOM }, /* ȿ */
    +
    +    { 0x240, AF_IGNORE_SMALL_BOTTOM }, /* ɀ */
    +    { 0x249, AF_ADJUST_UP }, /* ɉ */
    +
    +    /* IPA Extensions */
    +    { 0x256, AF_IGNORE_SMALL_BOTTOM }, /* ɖ */
    +
    +    { 0x260, AF_IGNORE_SMALL_TOP }, /* ɠ */
    +    { 0x267, AF_IGNORE_SMALL_BOTTOM }, /* ɧ */
    +    { 0x268, AF_ADJUST_UP }, /* ɨ */
    +
    +    { 0x272, AF_IGNORE_SMALL_BOTTOM }, /* ɲ */
    +    { 0x273, AF_IGNORE_SMALL_BOTTOM }, /* ɳ */
    +    { 0x27B, AF_IGNORE_SMALL_BOTTOM }, /* ɻ */
    +    { 0x27D, AF_IGNORE_SMALL_BOTTOM }, /* ɽ */
    +
    +    { 0x282, AF_IGNORE_SMALL_BOTTOM }, /* ʂ */
    +    { 0x288, AF_IGNORE_SMALL_BOTTOM }, /* ʈ */
    +
    +    { 0x290, AF_IGNORE_SMALL_BOTTOM }, /* ʐ */
    +    { 0x29B, AF_IGNORE_SMALL_TOP }, /* ʛ */
    +
    +    { 0x2A0, AF_IGNORE_SMALL_TOP }, /* ʠ */
    +
    +    /* Spacing Modifier Letters */
    +    { 0x2B2, AF_ADJUST_UP }, /* ʲ */
    +    { 0x2B5, AF_IGNORE_SMALL_BOTTOM }, /* ʵ */
    +
    +    /* Greek and Coptic */
    +    { 0x390, AF_ADJUST_UP2 }, /* ΐ */
    +
    +    { 0x3AA, AF_ADJUST_UP }, /* Ϊ */
    +    { 0x3AB, AF_ADJUST_UP }, /* Ϋ */
    +    { 0x3AC, AF_ADJUST_UP }, /* ά */
    +    { 0x3AD, AF_ADJUST_UP }, /* έ */
    +    { 0x3AE, AF_ADJUST_UP }, /* ή */
    +    { 0x3AF, AF_ADJUST_UP }, /* ί */
    +
    +    { 0x3B0, AF_ADJUST_UP2 }, /* ΰ */
    +
    +    { 0x3CA, AF_ADJUST_UP }, /* ϊ */
    +    { 0x3CB, AF_ADJUST_UP }, /* ϋ */
    +    { 0x3CC, AF_ADJUST_UP }, /* ό */
    +    { 0x3CD, AF_ADJUST_UP }, /* ύ */
    +    { 0x3CE, AF_ADJUST_UP }, /* ώ */
    +    { 0x3CF, AF_IGNORE_CAPITAL_BOTTOM }, /* Ϗ */
    +
    +    { 0x3D4, AF_ADJUST_UP }, /* ϔ */
    +    { 0x3D7, AF_IGNORE_SMALL_BOTTOM }, /* ϗ */
    +    { 0x3D9, AF_IGNORE_SMALL_BOTTOM }, /* ϙ */
    +
    +    { 0x3E2, AF_IGNORE_CAPITAL_BOTTOM }, /* Ϣ */
    +    { 0x3E3, AF_IGNORE_SMALL_BOTTOM }, /* ϣ */
    +
    +    { 0x3F3, AF_ADJUST_UP }, /* ϳ */
    +
    +    /* Cyrillic */
    +    { 0x400, AF_ADJUST_UP }, /* Ѐ */
    +    { 0x401, AF_ADJUST_UP }, /* Ё */
    +    { 0x403, AF_ADJUST_UP }, /* Ѓ */
    +    { 0x407, AF_ADJUST_UP }, /* Ї */
    +    { 0x40C, AF_ADJUST_UP }, /* Ќ */
    +    { 0x40D, AF_ADJUST_UP }, /* Ѝ */
    +    { 0x40E, AF_ADJUST_UP }, /* Ў */
    +    { 0x40F, AF_IGNORE_CAPITAL_BOTTOM }, /* Џ */
    +
    +    { 0x419, AF_ADJUST_UP }, /* Й */
    +
    +    { 0x426, AF_IGNORE_CAPITAL_BOTTOM }, /* Ц */
    +    { 0x429, AF_IGNORE_CAPITAL_BOTTOM }, /* Щ */
    +
    +    { 0x439, AF_ADJUST_UP }, /* й */
    +
    +    { 0x446, AF_IGNORE_SMALL_BOTTOM }, /* ц */
    +    { 0x449, AF_IGNORE_SMALL_BOTTOM }, /* щ */
    +
    +    { 0x450, AF_ADJUST_UP }, /* ѐ */
    +    { 0x451, AF_ADJUST_UP }, /* ё */
    +    { 0x453, AF_ADJUST_UP }, /* ѓ */
    +    { 0x456, AF_ADJUST_UP }, /* і */
    +    { 0x457, AF_ADJUST_UP }, /* ї */
    +    { 0x458, AF_ADJUST_UP }, /* ј */
    +    { 0x45C, AF_ADJUST_UP }, /* ќ */
    +    { 0x45D, AF_ADJUST_UP }, /* ѝ */
    +    { 0x45E, AF_ADJUST_UP }, /* ў */
    +    { 0x45F, AF_IGNORE_SMALL_BOTTOM }, /* џ */
    +
    +    { 0x476, AF_ADJUST_UP }, /* Ѷ */
    +    { 0x477, AF_ADJUST_UP }, /* ѷ */
    +    { 0x47C, AF_ADJUST_UP2 }, /* Ѽ */
    +    { 0x47D, AF_ADJUST_UP2 }, /* ѽ */
    +    { 0x47E, AF_ADJUST_UP }, /* Ѿ */
    +    { 0x47F, AF_ADJUST_UP }, /* ѿ */
    +
    +    { 0x480, AF_IGNORE_CAPITAL_BOTTOM }, /* Ҁ */
    +    { 0x481, AF_IGNORE_SMALL_BOTTOM }, /* ҁ */
    +    { 0x48A, AF_ADJUST_UP | AF_IGNORE_CAPITAL_BOTTOM }, /* Ҋ */
    +    { 0x48B, AF_ADJUST_UP | AF_IGNORE_SMALL_BOTTOM }, /* ҋ */
    +
    +    { 0x490, AF_IGNORE_CAPITAL_TOP }, /* Ґ */
    +    { 0x491, AF_IGNORE_SMALL_TOP }, /* ґ */
    +    { 0x496, AF_IGNORE_CAPITAL_BOTTOM }, /* Җ */
    +    { 0x497, AF_IGNORE_SMALL_BOTTOM }, /* җ */
    +    { 0x498, AF_IGNORE_CAPITAL_BOTTOM }, /* Ҙ */
    +    { 0x499, AF_IGNORE_SMALL_BOTTOM }, /* ҙ */
    +    { 0x49A, AF_IGNORE_CAPITAL_BOTTOM }, /* Қ */
    +    { 0x49B, AF_IGNORE_SMALL_BOTTOM }, /* қ */
    +
    +    { 0x4A2, AF_IGNORE_CAPITAL_BOTTOM }, /* Ң */
    +    { 0x4A3, AF_IGNORE_SMALL_BOTTOM }, /* ң */
    +    { 0x4AA, AF_IGNORE_CAPITAL_BOTTOM }, /* Ҫ */
    +    { 0x4AB, AF_IGNORE_SMALL_BOTTOM }, /* ҫ */
    +    { 0x4AC, AF_IGNORE_CAPITAL_BOTTOM }, /* Ҭ */
    +    { 0x4AD, AF_IGNORE_SMALL_BOTTOM }, /* ҭ */
    +
    +    { 0x4B2, AF_IGNORE_CAPITAL_BOTTOM }, /* Ҳ */
    +    { 0x4B3, AF_IGNORE_SMALL_BOTTOM }, /* ҳ */
    +    { 0x4B4, AF_IGNORE_CAPITAL_BOTTOM }, /* Ҵ */
    +    { 0x4B5, AF_IGNORE_SMALL_BOTTOM }, /* ҵ */
    +    { 0x4B6, AF_IGNORE_CAPITAL_BOTTOM }, /* Ҷ */
    +    { 0x4B7, AF_IGNORE_SMALL_BOTTOM }, /* ҷ */
    +    { 0x4BE, AF_IGNORE_CAPITAL_BOTTOM }, /* Ҿ */
    +    { 0x4BF, AF_IGNORE_SMALL_BOTTOM }, /* ҿ */
    +
    +    { 0x4C1, AF_ADJUST_UP }, /* Ӂ */
    +    { 0x4C2, AF_ADJUST_UP }, /* ӂ */
    +    { 0x4C5, AF_IGNORE_CAPITAL_BOTTOM }, /* Ӆ */
    +    { 0x4C6, AF_IGNORE_SMALL_BOTTOM }, /* ӆ */
    +    { 0x4C9, AF_IGNORE_CAPITAL_BOTTOM }, /* Ӊ */
    +    { 0x4CA, AF_IGNORE_SMALL_BOTTOM }, /* ӊ */
    +    { 0x4CB, AF_IGNORE_CAPITAL_BOTTOM }, /* Ӌ */
    +    { 0x4CC, AF_IGNORE_SMALL_BOTTOM }, /* ӌ */
    +    { 0x4CD, AF_IGNORE_CAPITAL_BOTTOM }, /* Ӎ */
    +    { 0x4CE, AF_IGNORE_SMALL_BOTTOM }, /* ӎ */
    +
    +    { 0x4D0, AF_ADJUST_UP }, /* Ӑ */
    +    { 0x4D1, AF_ADJUST_UP }, /* ӑ */
    +    { 0x4D2, AF_ADJUST_UP }, /* Ӓ */
    +    { 0x4D3, AF_ADJUST_UP }, /* ӓ */
    +    { 0x4D6, AF_ADJUST_UP }, /* Ӗ */
    +    { 0x4D7, AF_ADJUST_UP }, /* ӗ */
    +    { 0x4DA, AF_ADJUST_UP }, /* Ӛ */
    +    { 0x4DB, AF_ADJUST_UP }, /* ӛ */
    +    { 0x4DC, AF_ADJUST_UP }, /* Ӝ */
    +    { 0x4DD, AF_ADJUST_UP }, /* ӝ */
    +    { 0x4DE, AF_ADJUST_UP }, /* Ӟ */
    +    { 0x4DF, AF_ADJUST_UP }, /* ӟ */
    +
    +    { 0x4E2, AF_ADJUST_UP }, /* Ӣ */
    +    { 0x4E3, AF_ADJUST_UP }, /* ӣ */
    +    { 0x4E4, AF_ADJUST_UP }, /* Ӥ */
    +    { 0x4E5, AF_ADJUST_UP }, /* ӥ */
    +    { 0x4E6, AF_ADJUST_UP }, /* Ӧ */
    +    { 0x4E7, AF_ADJUST_UP }, /* ӧ */
    +    { 0x4EA, AF_ADJUST_UP }, /* Ӫ */
    +    { 0x4EB, AF_ADJUST_UP }, /* ӫ */
    +    { 0x4EC, AF_ADJUST_UP }, /* Ӭ */
    +    { 0x4ED, AF_ADJUST_UP }, /* ӭ */
    +    { 0x4EE, AF_ADJUST_UP }, /* Ӯ */
    +    { 0x4EF, AF_ADJUST_UP }, /* ӯ */
    +
    +    { 0x4F0, AF_ADJUST_UP }, /* Ӱ */
    +    { 0x4F1, AF_ADJUST_UP }, /* ӱ */
    +    { 0x4F2, AF_ADJUST_UP }, /* Ӳ */
    +    { 0x4F3, AF_ADJUST_UP }, /* ӳ */
    +    { 0x4F4, AF_ADJUST_UP }, /* Ӵ */
    +    { 0x4F5, AF_ADJUST_UP }, /* ӵ */
    +    { 0x4F6, AF_IGNORE_CAPITAL_BOTTOM }, /* Ӷ */
    +    { 0x4F7, AF_IGNORE_SMALL_BOTTOM }, /* ӷ */
    +    { 0x4F8, AF_ADJUST_UP }, /* Ӹ */
    +    { 0x4F9, AF_ADJUST_UP }, /* ӹ */
    +    { 0x4FA, AF_IGNORE_CAPITAL_BOTTOM }, /* Ӻ */
    +    { 0x4FB, AF_IGNORE_SMALL_BOTTOM }, /* ӻ */
    +
    +    /* Cyrillic Supplement */
    +    { 0x506, AF_IGNORE_CAPITAL_BOTTOM }, /* Ԇ */
    +    { 0x507, AF_IGNORE_SMALL_BOTTOM }, /* ԇ */
    +
    +    { 0x524, AF_IGNORE_CAPITAL_BOTTOM }, /* Ԥ */
    +    { 0x525, AF_IGNORE_SMALL_BOTTOM }, /* ԥ */
    +    { 0x526, AF_IGNORE_CAPITAL_BOTTOM }, /* Ԧ */
    +    { 0x527, AF_IGNORE_SMALL_BOTTOM }, /* ԧ */
    +    { 0x52E, AF_IGNORE_CAPITAL_BOTTOM }, /* Ԯ */
    +    { 0x52F, AF_IGNORE_SMALL_BOTTOM }, /* ԯ */
    +
    +    /* Cherokee */
    +    { 0x13A5, AF_ADJUST_UP }, /* Ꭵ */
    +
    +    /* Phonetic Extensions */
    +    { 0x1D09, AF_ADJUST_DOWN }, /* ᴉ */
    +
    +    { 0x1D4E, AF_ADJUST_DOWN }, /* ᵎ */
    +
    +    { 0x1D51, AF_IGNORE_SMALL_BOTTOM }, /* ᵑ */
    +
    +    { 0x1D62, AF_ADJUST_UP }, /* ᵢ */
    +
    +    /* Phonetic Extensions Supplement */
    +    { 0x1D80, AF_IGNORE_SMALL_BOTTOM }, /* ᶀ */
    +    { 0x1D81, AF_IGNORE_SMALL_BOTTOM }, /* ᶁ */
    +    { 0x1D82, AF_IGNORE_SMALL_BOTTOM }, /* ᶂ */
    +    { 0x1D84, AF_IGNORE_SMALL_BOTTOM }, /* ᶄ */
    +    { 0x1D85, AF_IGNORE_SMALL_BOTTOM }, /* ᶅ */
    +    { 0x1D86, AF_IGNORE_SMALL_BOTTOM }, /* ᶆ */
    +    { 0x1D87, AF_IGNORE_SMALL_BOTTOM }, /* ᶇ */
    +    { 0x1D89, AF_IGNORE_SMALL_BOTTOM }, /* ᶉ */
    +    { 0x1D8A, AF_IGNORE_SMALL_BOTTOM }, /* ᶊ */
    +    { 0x1D8C, AF_IGNORE_SMALL_BOTTOM }, /* ᶌ */
    +    { 0x1D8D, AF_IGNORE_SMALL_BOTTOM }, /* ᶍ */
    +    { 0x1D8E, AF_IGNORE_SMALL_BOTTOM }, /* ᶎ */
    +    { 0x1D8F, AF_IGNORE_SMALL_BOTTOM }, /* ᶏ */
    +
    +    { 0x1D90, AF_IGNORE_SMALL_BOTTOM }, /* ᶐ */
    +    { 0x1D91, AF_IGNORE_SMALL_BOTTOM }, /* ᶑ */
    +    { 0x1D92, AF_IGNORE_SMALL_BOTTOM }, /* ᶒ */
    +    { 0x1D93, AF_IGNORE_SMALL_BOTTOM }, /* ᶓ */
    +    { 0x1D94, AF_IGNORE_SMALL_BOTTOM }, /* ᶔ */
    +    { 0x1D95, AF_IGNORE_SMALL_BOTTOM }, /* ᶕ */
    +    { 0x1D96, AF_ADJUST_UP | AF_IGNORE_SMALL_BOTTOM }, /* ᶖ */
    +    { 0x1D97, AF_IGNORE_SMALL_BOTTOM }, /* ᶗ */
    +    { 0x1D98, AF_IGNORE_SMALL_BOTTOM }, /* ᶘ */
    +    { 0x1D99, AF_IGNORE_SMALL_BOTTOM }, /* ᶙ */
    +    { 0x1D9A, AF_IGNORE_SMALL_BOTTOM }, /* ᶚ */
    +
    +    { 0x1DA4, AF_ADJUST_UP }, /* ᶤ */
    +    { 0x1DA8, AF_ADJUST_UP }, /* ᶨ */
    +    { 0x1DA9, AF_IGNORE_SMALL_BOTTOM }, /* ᶩ */
    +    { 0x1DAA, AF_IGNORE_SMALL_BOTTOM }, /* ᶪ */
    +    { 0x1DAC, AF_IGNORE_SMALL_BOTTOM }, /* ᶬ */
    +    { 0x1DAE, AF_IGNORE_SMALL_BOTTOM }, /* ᶮ */
    +    { 0x1DAF, AF_IGNORE_SMALL_BOTTOM }, /* ᶯ */
    +
    +    { 0x1DB3, AF_IGNORE_SMALL_BOTTOM }, /* ᶳ */
    +    { 0x1DB5, AF_IGNORE_SMALL_BOTTOM }, /* ᶵ */
    +    { 0x1DBC, AF_IGNORE_SMALL_BOTTOM }, /* ᶼ */
    +
    +    /* Latin Extended Additional */
    +    { 0x1E00, AF_ADJUST_DOWN }, /* Ḁ */
    +    { 0x1E01, AF_ADJUST_DOWN }, /* ḁ */
    +    { 0x1E02, AF_ADJUST_UP }, /* Ḃ */
    +    { 0x1E03, AF_ADJUST_UP }, /* ḃ */
    +    { 0x1E04, AF_ADJUST_DOWN }, /* Ḅ */
    +    { 0x1E05, AF_ADJUST_DOWN }, /* ḅ */
    +    { 0x1E06, AF_ADJUST_DOWN }, /* Ḇ */
    +    { 0x1E07, AF_ADJUST_DOWN }, /* ḇ */
    +    { 0x1E08, AF_ADJUST_UP | AF_IGNORE_CAPITAL_BOTTOM }, /* Ḉ */
    +    { 0x1E09, AF_ADJUST_UP | AF_IGNORE_SMALL_BOTTOM }, /* ḉ */
    +    { 0x1E0A, AF_ADJUST_UP }, /* Ḋ */
    +    { 0x1E0B, AF_ADJUST_UP }, /* ḋ */
    +    { 0x1E0C, AF_ADJUST_DOWN }, /* Ḍ */
    +    { 0x1E0D, AF_ADJUST_DOWN }, /* ḍ */
    +    { 0x1E0E, AF_ADJUST_DOWN }, /* Ḏ */
    +    { 0x1E0F, AF_ADJUST_DOWN }, /* ḏ */
    +
    +    { 0x1E10, AF_ADJUST_DOWN }, /* Ḑ */
    +    { 0x1E11, AF_ADJUST_DOWN }, /* ḑ */
    +    { 0x1E12, AF_ADJUST_DOWN }, /* Ḓ */
    +    { 0x1E13, AF_ADJUST_DOWN }, /* ḓ */
    +    { 0x1E14, AF_ADJUST_UP2 }, /* Ḕ */
    +    { 0x1E15, AF_ADJUST_UP2 }, /* ḕ */
    +    { 0x1E16, AF_ADJUST_UP2 }, /* Ḗ */
    +    { 0x1E17, AF_ADJUST_UP2 }, /* ḗ */
    +    { 0x1E18, AF_ADJUST_DOWN }, /* Ḙ */
    +    { 0x1E19, AF_ADJUST_DOWN }, /* ḙ */
    +    { 0x1E1A, AF_ADJUST_DOWN | AF_ADJUST_TILDE_BOTTOM }, /* Ḛ */
    +    { 0x1E1B, AF_ADJUST_DOWN | AF_ADJUST_TILDE_BOTTOM }, /* ḛ */
    +    { 0x1E1C, AF_ADJUST_UP | AF_IGNORE_CAPITAL_BOTTOM }, /* Ḝ */
    +    { 0x1E1D, AF_ADJUST_UP | AF_IGNORE_SMALL_BOTTOM }, /* ḝ */
    +    { 0x1E1E, AF_ADJUST_UP }, /* Ḟ */
    +    { 0x1E1F, AF_ADJUST_UP }, /* ḟ */
    +
    +    { 0x1E20, AF_ADJUST_UP }, /* Ḡ */
    +    { 0x1E21, AF_ADJUST_UP }, /* ḡ */
    +    { 0x1E22, AF_ADJUST_UP }, /* Ḣ */
    +    { 0x1E23, AF_ADJUST_UP }, /* ḣ */
    +    { 0x1E24, AF_ADJUST_DOWN }, /* Ḥ */
    +    { 0x1E25, AF_ADJUST_DOWN }, /* ḥ */
    +    { 0x1E26, AF_ADJUST_UP }, /* Ḧ */
    +    { 0x1E27, AF_ADJUST_UP }, /* ḧ */
    +    { 0x1E28, AF_IGNORE_CAPITAL_BOTTOM }, /* Ḩ */
    +    { 0x1E29, AF_IGNORE_SMALL_BOTTOM }, /* ḩ */
    +    { 0x1E2A, AF_ADJUST_DOWN }, /* Ḫ */
    +    { 0x1E2B, AF_ADJUST_DOWN }, /* ḫ */
    +    { 0x1E2C, AF_ADJUST_DOWN | AF_ADJUST_TILDE_BOTTOM }, /* Ḭ */
    +    { 0x1E2D, AF_ADJUST_UP | AF_ADJUST_DOWN | AF_ADJUST_TILDE_BOTTOM }, /* ḭ */
    +    { 0x1E2E, AF_ADJUST_UP2 }, /* Ḯ */
    +    { 0x1E2F, AF_ADJUST_UP2 }, /* ḯ */
    +
    +    { 0x1E30, AF_ADJUST_UP }, /* Ḱ */
    +    { 0x1E31, AF_ADJUST_UP }, /* ḱ */
    +    { 0x1E32, AF_ADJUST_DOWN }, /* Ḳ */
    +    { 0x1E33, AF_ADJUST_DOWN }, /* ḳ */
    +    { 0x1E34, AF_ADJUST_DOWN }, /* Ḵ */
    +    { 0x1E35, AF_ADJUST_DOWN }, /* ḵ */
    +    { 0x1E36, AF_ADJUST_DOWN }, /* Ḷ */
    +    { 0x1E37, AF_ADJUST_DOWN }, /* ḷ */
    +    { 0x1E38, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* Ḹ */
    +    { 0x1E39, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* ḹ */
    +    { 0x1E3A, AF_ADJUST_DOWN }, /* Ḻ */
    +    { 0x1E3B, AF_ADJUST_DOWN }, /* ḻ */
    +    { 0x1E3C, AF_ADJUST_DOWN }, /* Ḽ */
    +    { 0x1E3D, AF_ADJUST_DOWN }, /* ḽ */
    +    { 0x1E3E, AF_ADJUST_UP }, /* Ḿ */
    +    { 0x1E3F, AF_ADJUST_UP }, /* ḿ */
    +
    +    { 0x1E40, AF_ADJUST_UP }, /* Ṁ */
    +    { 0x1E41, AF_ADJUST_UP }, /* ṁ */
    +    { 0x1E42, AF_ADJUST_DOWN }, /* Ṃ */
    +    { 0x1E43, AF_ADJUST_DOWN }, /* ṃ */
    +    { 0x1E44, AF_ADJUST_UP }, /* Ṅ */
    +    { 0x1E45, AF_ADJUST_UP }, /* ṅ */
    +    { 0x1E46, AF_ADJUST_DOWN }, /* Ṇ */
    +    { 0x1E47, AF_ADJUST_DOWN }, /* ṇ */
    +    { 0x1E48, AF_ADJUST_DOWN }, /* Ṉ */
    +    { 0x1E49, AF_ADJUST_DOWN }, /* ṉ */
    +    { 0x1E4A, AF_ADJUST_DOWN }, /* Ṋ */
    +    { 0x1E4B, AF_ADJUST_DOWN }, /* ṋ */
    +    { 0x1E4C, AF_ADJUST_UP2 | AF_ADJUST_TILDE_TOP2 }, /* Ṍ */
    +    { 0x1E4D, AF_ADJUST_UP2 | AF_ADJUST_TILDE_TOP2 }, /* ṍ */
    +    { 0x1E4E, AF_ADJUST_UP2 | AF_ADJUST_TILDE_TOP2 }, /* Ṏ */
    +    { 0x1E4F, AF_ADJUST_UP2 | AF_ADJUST_TILDE_TOP2 }, /* ṏ */
    +
    +    { 0x1E50, AF_ADJUST_UP2 }, /* Ṑ */
    +    { 0x1E51, AF_ADJUST_UP2 }, /* ṑ */
    +    { 0x1E52, AF_ADJUST_UP2 }, /* Ṓ */
    +    { 0x1E53, AF_ADJUST_UP2 }, /* ṓ */
    +    { 0x1E54, AF_ADJUST_UP }, /* Ṕ */
    +    { 0x1E55, AF_ADJUST_UP }, /* ṕ */
    +    { 0x1E56, AF_ADJUST_UP }, /* Ṗ */
    +    { 0x1E57, AF_ADJUST_UP }, /* ṗ */
    +    { 0x1E58, AF_ADJUST_UP }, /* Ṙ */
    +    { 0x1E59, AF_ADJUST_UP }, /* ṙ */
    +    { 0x1E5A, AF_ADJUST_DOWN }, /* Ṛ */
    +    { 0x1E5B, AF_ADJUST_DOWN }, /* ṛ */
    +    { 0x1E5C, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* Ṝ */
    +    { 0x1E5D, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* ṝ */
    +    { 0x1E5E, AF_ADJUST_DOWN }, /* Ṟ */
    +    { 0x1E5F, AF_ADJUST_DOWN }, /* ṟ */
    +
    +    { 0x1E60, AF_ADJUST_UP }, /* Ṡ */
    +    { 0x1E61, AF_ADJUST_UP }, /* ṡ */
    +    { 0x1E62, AF_ADJUST_DOWN }, /* Ṣ */
    +    { 0x1E63, AF_ADJUST_DOWN }, /* ṣ */
    +    { 0x1E64, AF_ADJUST_UP }, /* Ṥ */
    +    { 0x1E65, AF_ADJUST_UP }, /* ṥ */
    +    { 0x1E66, AF_ADJUST_UP }, /* Ṧ */
    +    { 0x1E67, AF_ADJUST_UP }, /* ṧ */
    +    { 0x1E68, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* Ṩ */
    +    { 0x1E69, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* ṩ */
    +    { 0x1E6A, AF_ADJUST_UP }, /* Ṫ */
    +    { 0x1E6B, AF_ADJUST_UP }, /* ṫ */
    +    { 0x1E6C, AF_ADJUST_DOWN }, /* Ṭ */
    +    { 0x1E6D, AF_ADJUST_DOWN }, /* ṭ */
    +    { 0x1E6E, AF_ADJUST_DOWN }, /* Ṯ */
    +    { 0x1E6F, AF_ADJUST_DOWN }, /* ṯ */
    +
    +    { 0x1E70, AF_ADJUST_DOWN }, /* Ṱ */
    +    { 0x1E71, AF_ADJUST_DOWN }, /* ṱ */
    +    { 0x1E72, AF_ADJUST_DOWN }, /* Ṳ */
    +    { 0x1E73, AF_ADJUST_DOWN }, /* ṳ */
    +    { 0x1E74, AF_ADJUST_DOWN | AF_ADJUST_TILDE_BOTTOM }, /* Ṵ */
    +    { 0x1E75, AF_ADJUST_DOWN | AF_ADJUST_TILDE_BOTTOM }, /* ṵ */
    +    { 0x1E76, AF_ADJUST_DOWN }, /* Ṷ */
    +    { 0x1E77, AF_ADJUST_DOWN }, /* ṷ */
    +    { 0x1E78, AF_ADJUST_UP2 | AF_ADJUST_TILDE_TOP2 }, /* Ṹ */
    +    { 0x1E79, AF_ADJUST_UP2 | AF_ADJUST_TILDE_TOP2 }, /* ṹ */
    +    { 0x1E7A, AF_ADJUST_UP2 }, /* Ṻ */
    +    { 0x1E7B, AF_ADJUST_UP2 }, /* ṻ */
    +    { 0x1E7C, AF_ADJUST_UP | AF_ADJUST_TILDE_TOP }, /* Ṽ */
    +    { 0x1E7D, AF_ADJUST_UP | AF_ADJUST_TILDE_TOP }, /* ṽ */
    +    { 0x1E7E, AF_ADJUST_DOWN }, /* Ṿ */
    +    { 0x1E7F, AF_ADJUST_DOWN }, /* ṿ */
    +
    +    { 0x1E80, AF_ADJUST_UP }, /* Ẁ */
    +    { 0x1E81, AF_ADJUST_UP }, /* ẁ */
    +    { 0x1E82, AF_ADJUST_UP }, /* Ẃ */
    +    { 0x1E83, AF_ADJUST_UP }, /* ẃ */
    +    { 0x1E84, AF_ADJUST_UP }, /* Ẅ */
    +    { 0x1E85, AF_ADJUST_UP }, /* ẅ */
    +    { 0x1E86, AF_ADJUST_UP }, /* Ẇ */
    +    { 0x1E87, AF_ADJUST_UP }, /* ẇ */
    +    { 0x1E88, AF_ADJUST_DOWN }, /* Ẉ */
    +    { 0x1E89, AF_ADJUST_DOWN }, /* ẉ */
    +    { 0x1E8A, AF_ADJUST_UP }, /* Ẋ */
    +    { 0x1E8B, AF_ADJUST_UP }, /* ẋ */
    +    { 0x1E8C, AF_ADJUST_UP }, /* Ẍ */
    +    { 0x1E8D, AF_ADJUST_UP }, /* ẍ */
    +    { 0x1E8E, AF_ADJUST_UP }, /* Ẏ */
    +    { 0x1E8F, AF_ADJUST_UP }, /* ẏ */
    +
    +    { 0x1E90, AF_ADJUST_UP }, /* Ẑ */
    +    { 0x1E91, AF_ADJUST_UP }, /* ẑ */
    +    { 0x1E92, AF_ADJUST_DOWN }, /* Ẓ */
    +    { 0x1E93, AF_ADJUST_DOWN }, /* ẓ */
    +    { 0x1E94, AF_ADJUST_DOWN }, /* Ẕ */
    +    { 0x1E95, AF_ADJUST_DOWN }, /* ẕ */
    +    { 0x1E96, AF_ADJUST_DOWN }, /* ẖ */
    +    { 0x1E97, AF_ADJUST_UP }, /* ẗ */
    +    { 0x1E98, AF_ADJUST_UP }, /* ẘ */
    +    { 0x1E99, AF_ADJUST_UP }, /* ẙ */
    +    { 0x1E9A, AF_ADJUST_UP }, /* ẚ */
    +    { 0x1E9B, AF_ADJUST_UP }, /* ẛ */
    +
    +    { 0x1EA0, AF_ADJUST_DOWN }, /* Ạ */
    +    { 0x1EA1, AF_ADJUST_DOWN }, /* ạ */
    +    { 0x1EA2, AF_ADJUST_UP }, /* Ả */
    +    { 0x1EA3, AF_ADJUST_UP }, /* ả */
    +    { 0x1EA4, AF_ADJUST_UP2 }, /* Ấ */
    +    { 0x1EA5, AF_ADJUST_UP2 }, /* ấ */
    +    { 0x1EA6, AF_ADJUST_UP2 }, /* Ầ */
    +    { 0x1EA7, AF_ADJUST_UP2 }, /* ầ */
    +    { 0x1EA8, AF_ADJUST_UP2 }, /* Ẩ */
    +    { 0x1EA9, AF_ADJUST_UP2 }, /* ẩ */
    +    { 0x1EAA, AF_ADJUST_UP2 | AF_ADJUST_TILDE_TOP }, /* Ẫ */
    +    { 0x1EAB, AF_ADJUST_UP2 | AF_ADJUST_TILDE_TOP }, /* ẫ */
    +    { 0x1EAC, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* Ậ */
    +    { 0x1EAD, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* ậ */
    +    { 0x1EAE, AF_ADJUST_UP2 }, /* Ắ */
    +    { 0x1EAF, AF_ADJUST_UP2 }, /* ắ */
    +
    +    { 0x1EB0, AF_ADJUST_UP2 }, /* Ằ */
    +    { 0x1EB1, AF_ADJUST_UP2 }, /* ằ */
    +    { 0x1EB2, AF_ADJUST_UP2 }, /* Ẳ */
    +    { 0x1EB3, AF_ADJUST_UP2 }, /* ẳ */
    +    { 0x1EB4, AF_ADJUST_UP2 | AF_ADJUST_TILDE_TOP }, /* Ẵ */
    +    { 0x1EB5, AF_ADJUST_UP2 | AF_ADJUST_TILDE_TOP }, /* ẵ */
    +    { 0x1EB6, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* Ặ */
    +    { 0x1EB7, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* ặ */
    +    { 0x1EB8, AF_ADJUST_DOWN }, /* Ẹ */
    +    { 0x1EB9, AF_ADJUST_DOWN }, /* ẹ */
    +    { 0x1EBA, AF_ADJUST_UP }, /* Ẻ */
    +    { 0x1EBB, AF_ADJUST_UP }, /* ẻ */
    +    { 0x1EBC, AF_ADJUST_UP | AF_ADJUST_TILDE_TOP }, /* Ẽ */
    +    { 0x1EBD, AF_ADJUST_UP | AF_ADJUST_TILDE_TOP }, /* ẽ */
    +    { 0x1EBE, AF_ADJUST_UP2 }, /* Ế */
    +    { 0x1EBF, AF_ADJUST_UP2 }, /* ế */
    +
    +    { 0x1EC0, AF_ADJUST_UP2 }, /* Ề */
    +    { 0x1EC1, AF_ADJUST_UP2 }, /* ề */
    +    { 0x1EC2, AF_ADJUST_UP2 }, /* Ể */
    +    { 0x1EC3, AF_ADJUST_UP2 }, /* ể */
    +    { 0x1EC4, AF_ADJUST_UP2 | AF_ADJUST_TILDE_TOP }, /* Ễ */
    +    { 0x1EC5, AF_ADJUST_UP2 | AF_ADJUST_TILDE_TOP }, /* ễ */
    +    { 0x1EC6, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* Ệ */
    +    { 0x1EC7, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* ệ */
    +    { 0x1EC8, AF_ADJUST_UP }, /* Ỉ */
    +    { 0x1EC9, AF_ADJUST_UP }, /* ỉ */
    +    { 0x1ECA, AF_ADJUST_DOWN }, /* Ị */
    +    { 0x1ECB, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* ị */
    +    { 0x1ECC, AF_ADJUST_DOWN }, /* Ọ */
    +    { 0x1ECD, AF_ADJUST_DOWN }, /* ọ */
    +    { 0x1ECE, AF_ADJUST_UP }, /* Ỏ */
    +    { 0x1ECF, AF_ADJUST_UP }, /* ỏ */
    +
    +    { 0x1ED0, AF_ADJUST_UP2 }, /* Ố */
    +    { 0x1ED1, AF_ADJUST_UP2 }, /* ố */
    +    { 0x1ED2, AF_ADJUST_UP2 }, /* Ồ */
    +    { 0x1ED3, AF_ADJUST_UP2 }, /* ồ */
    +    { 0x1ED4, AF_ADJUST_UP2 }, /* Ổ */
    +    { 0x1ED5, AF_ADJUST_UP2 }, /* ổ */
    +    { 0x1ED6, AF_ADJUST_UP2 | AF_ADJUST_TILDE_TOP }, /* Ỗ */
    +    { 0x1ED7, AF_ADJUST_UP2 | AF_ADJUST_TILDE_TOP }, /* ỗ */
    +    { 0x1ED8, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* Ộ */
    +    { 0x1ED9, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* ộ */
    +    { 0x1EDA, AF_ADJUST_UP | AF_IGNORE_CAPITAL_TOP }, /* Ớ */
    +    { 0x1EDB, AF_ADJUST_UP | AF_IGNORE_SMALL_TOP }, /* ớ */
    +    { 0x1EDC, AF_ADJUST_UP | AF_IGNORE_CAPITAL_TOP }, /* Ờ */
    +    { 0x1EDD, AF_ADJUST_UP | AF_IGNORE_SMALL_TOP }, /* ờ */
    +    { 0x1EDE, AF_ADJUST_UP | AF_IGNORE_CAPITAL_TOP }, /* Ở */
    +    { 0x1EDF, AF_ADJUST_UP | AF_IGNORE_SMALL_TOP }, /* ở */
    +
    +    { 0x1EE0, AF_ADJUST_UP | AF_ADJUST_TILDE_TOP | AF_IGNORE_CAPITAL_TOP }, /* Ỡ */
    +    { 0x1EE1, AF_ADJUST_UP | AF_ADJUST_TILDE_TOP | AF_IGNORE_SMALL_TOP }, /* ỡ */
    +    { 0x1EE2, AF_ADJUST_DOWN | AF_IGNORE_CAPITAL_TOP }, /* Ợ */
    +    { 0x1EE3, AF_ADJUST_DOWN | AF_IGNORE_SMALL_TOP }, /* ợ */
    +    { 0x1EE4, AF_ADJUST_DOWN }, /* Ụ */
    +    { 0x1EE5, AF_ADJUST_DOWN }, /* ụ */
    +    { 0x1EE6, AF_ADJUST_UP }, /* Ủ */
    +    { 0x1EE7, AF_ADJUST_UP }, /* ủ */
    +    { 0x1EE8, AF_ADJUST_UP | AF_IGNORE_CAPITAL_TOP }, /* Ứ */
    +    { 0x1EE9, AF_ADJUST_UP | AF_IGNORE_SMALL_TOP }, /* ứ */
    +    { 0x1EEA, AF_ADJUST_UP | AF_IGNORE_CAPITAL_TOP }, /* Ừ */
    +    { 0x1EEB, AF_ADJUST_UP | AF_IGNORE_SMALL_TOP }, /* ừ */
    +    { 0x1EEC, AF_ADJUST_UP | AF_IGNORE_CAPITAL_TOP }, /* Ử */
    +    { 0x1EED, AF_ADJUST_UP | AF_IGNORE_SMALL_TOP }, /* ử */
    +    { 0x1EEE, AF_ADJUST_UP | AF_ADJUST_TILDE_TOP | AF_IGNORE_CAPITAL_TOP }, /* Ữ */
    +    { 0x1EEF, AF_ADJUST_UP | AF_ADJUST_TILDE_TOP | AF_IGNORE_SMALL_TOP }, /* ữ */
    +
    +    { 0x1EF0, AF_ADJUST_DOWN | AF_IGNORE_CAPITAL_TOP }, /* Ự */
    +    { 0x1EF1, AF_ADJUST_DOWN | AF_IGNORE_SMALL_TOP }, /* ự */
    +    { 0x1EF2, AF_ADJUST_UP }, /* Ỳ */
    +    { 0x1EF3, AF_ADJUST_UP }, /* ỳ */
    +    { 0x1EF4, AF_ADJUST_DOWN }, /* Ỵ */
    +    { 0x1EF5, AF_ADJUST_DOWN }, /* ỵ */
    +    { 0x1EF6, AF_ADJUST_UP }, /* Ỷ */
    +    { 0x1EF7, AF_ADJUST_UP }, /* ỷ */
    +    { 0x1EF8, AF_ADJUST_UP | AF_ADJUST_TILDE_TOP }, /* Ỹ */
    +    { 0x1EF9, AF_ADJUST_UP | AF_ADJUST_TILDE_TOP }, /* ỹ */
    +
    +    /* Greek Extended */
    +    { 0x1F00, AF_ADJUST_UP }, /* ἀ */
    +    { 0x1F01, AF_ADJUST_UP }, /* ἁ */
    +    { 0x1F02, AF_ADJUST_UP }, /* ἂ */
    +    { 0x1F03, AF_ADJUST_UP }, /* ἃ */
    +    { 0x1F04, AF_ADJUST_UP }, /* ἄ */
    +    { 0x1F05, AF_ADJUST_UP }, /* ἅ */
    +    { 0x1F06, AF_ADJUST_UP2 | AF_ADJUST_TILDE_TOP }, /* ἆ */
    +    { 0x1F07, AF_ADJUST_UP2 | AF_ADJUST_TILDE_TOP }, /* ἇ */
    +
    +    { 0x1F10, AF_ADJUST_UP }, /* ἐ */
    +    { 0x1F11, AF_ADJUST_UP }, /* ἑ */
    +    { 0x1F12, AF_ADJUST_UP }, /* ἒ */
    +    { 0x1F13, AF_ADJUST_UP }, /* ἓ */
    +    { 0x1F14, AF_ADJUST_UP }, /* ἔ */
    +    { 0x1F15, AF_ADJUST_UP }, /* ἕ */
    +
    +    { 0x1F20, AF_ADJUST_UP }, /* ἠ */
    +    { 0x1F21, AF_ADJUST_UP }, /* ἡ */
    +    { 0x1F22, AF_ADJUST_UP }, /* ἢ */
    +    { 0x1F23, AF_ADJUST_UP }, /* ἣ */
    +    { 0x1F24, AF_ADJUST_UP }, /* ἤ */
    +    { 0x1F25, AF_ADJUST_UP }, /* ἥ */
    +    { 0x1F26, AF_ADJUST_UP2 | AF_ADJUST_TILDE_TOP }, /* ἦ */
    +    { 0x1F27, AF_ADJUST_UP2 | AF_ADJUST_TILDE_TOP }, /* ἧ */
    +
    +    { 0x1F30, AF_ADJUST_UP }, /* ἰ */
    +    { 0x1F31, AF_ADJUST_UP }, /* ἱ */
    +    { 0x1F32, AF_ADJUST_UP }, /* ἲ */
    +    { 0x1F33, AF_ADJUST_UP }, /* ἳ */
    +    { 0x1F34, AF_ADJUST_UP }, /* ἴ */
    +    { 0x1F35, AF_ADJUST_UP }, /* ἵ */
    +    { 0x1F36, AF_ADJUST_UP2 | AF_ADJUST_TILDE_TOP }, /* ἶ */
    +    { 0x1F37, AF_ADJUST_UP2 | AF_ADJUST_TILDE_TOP }, /* ἷ */
    +
    +    { 0x1F40, AF_ADJUST_UP }, /* ὀ */
    +    { 0x1F41, AF_ADJUST_UP }, /* ὁ */
    +    { 0x1F42, AF_ADJUST_UP }, /* ὂ */
    +    { 0x1F43, AF_ADJUST_UP }, /* ὃ */
    +    { 0x1F44, AF_ADJUST_UP }, /* ὄ */
    +    { 0x1F45, AF_ADJUST_UP }, /* ὅ */
    +
    +    { 0x1F50, AF_ADJUST_UP }, /* ὐ */
    +    { 0x1F51, AF_ADJUST_UP }, /* ὑ */
    +    { 0x1F52, AF_ADJUST_UP }, /* ὒ */
    +    { 0x1F53, AF_ADJUST_UP }, /* ὓ */
    +    { 0x1F54, AF_ADJUST_UP }, /* ὔ */
    +    { 0x1F55, AF_ADJUST_UP }, /* ὕ */
    +    { 0x1F56, AF_ADJUST_UP2 | AF_ADJUST_TILDE_TOP }, /* ὖ */
    +    { 0x1F57, AF_ADJUST_UP2 | AF_ADJUST_TILDE_TOP }, /* ὗ */
    +
    +    { 0x1F60, AF_ADJUST_UP }, /* ὠ */
    +    { 0x1F61, AF_ADJUST_UP }, /* ὡ */
    +    { 0x1F62, AF_ADJUST_UP }, /* ὢ */
    +    { 0x1F63, AF_ADJUST_UP }, /* ὣ */
    +    { 0x1F64, AF_ADJUST_UP }, /* ὤ */
    +    { 0x1F65, AF_ADJUST_UP }, /* ὥ */
    +    { 0x1F66, AF_ADJUST_UP2 | AF_ADJUST_TILDE_TOP }, /* ὦ */
    +    { 0x1F67, AF_ADJUST_UP2 | AF_ADJUST_TILDE_TOP }, /* ὧ */
    +
    +    { 0x1F70, AF_ADJUST_UP }, /* ὰ */
    +    { 0x1F71, AF_ADJUST_UP }, /* ά */
    +    { 0x1F72, AF_ADJUST_UP }, /* ὲ */
    +    { 0x1F73, AF_ADJUST_UP }, /* έ */
    +    { 0x1F74, AF_ADJUST_UP }, /* ὴ */
    +    { 0x1F75, AF_ADJUST_UP }, /* ή */
    +    { 0x1F76, AF_ADJUST_UP }, /* ὶ */
    +    { 0x1F77, AF_ADJUST_UP }, /* ί */
    +    { 0x1F78, AF_ADJUST_UP }, /* ὸ */
    +    { 0x1F79, AF_ADJUST_UP }, /* ό */
    +    { 0x1F7A, AF_ADJUST_UP }, /* ὺ */
    +    { 0x1F7B, AF_ADJUST_UP }, /* ύ */
    +    { 0x1F7C, AF_ADJUST_UP }, /* ὼ */
    +    { 0x1F7D, AF_ADJUST_UP }, /* ώ */
    +
    +    { 0x1F80, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* ᾀ */
    +    { 0x1F81, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* ᾁ */
    +    { 0x1F82, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* ᾂ */
    +    { 0x1F83, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* ᾃ */
    +    { 0x1F84, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* ᾄ */
    +    { 0x1F85, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* ᾅ */
    +    { 0x1F86, AF_ADJUST_UP2 | AF_ADJUST_TILDE_TOP | AF_ADJUST_DOWN }, /* ᾆ */
    +    { 0x1F87, AF_ADJUST_UP2 | AF_ADJUST_TILDE_TOP | AF_ADJUST_DOWN }, /* ᾇ */
    +    { 0x1F88, AF_ADJUST_DOWN }, /* ᾈ */
    +    { 0x1F89, AF_ADJUST_DOWN }, /* ᾉ */
    +    { 0x1F8A, AF_ADJUST_DOWN }, /* ᾊ */
    +    { 0x1F8B, AF_ADJUST_DOWN }, /* ᾋ */
    +    { 0x1F8C, AF_ADJUST_DOWN }, /* ᾌ */
    +    { 0x1F8D, AF_ADJUST_DOWN }, /* ᾍ */
    +    { 0x1F8E, AF_ADJUST_DOWN }, /* ᾎ */
    +    { 0x1F8F, AF_ADJUST_DOWN }, /* ᾏ */
    +
    +    { 0x1F90, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* ᾐ */
    +    { 0x1F91, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* ᾑ */
    +    { 0x1F92, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* ᾒ */
    +    { 0x1F93, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* ᾓ */
    +    { 0x1F94, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* ᾔ */
    +    { 0x1F95, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* ᾕ */
    +    { 0x1F96, AF_ADJUST_UP2 | AF_ADJUST_TILDE_TOP | AF_ADJUST_DOWN }, /* ᾖ */
    +    { 0x1F97, AF_ADJUST_UP2 | AF_ADJUST_TILDE_TOP | AF_ADJUST_DOWN }, /* ᾗ */
    +    { 0x1F98, AF_ADJUST_DOWN }, /* ᾘ */
    +    { 0x1F99, AF_ADJUST_DOWN }, /* ᾙ */
    +    { 0x1F9A, AF_ADJUST_DOWN }, /* ᾚ */
    +    { 0x1F9B, AF_ADJUST_DOWN }, /* ᾛ */
    +    { 0x1F9C, AF_ADJUST_DOWN }, /* ᾜ */
    +    { 0x1F9D, AF_ADJUST_DOWN }, /* ᾝ */
    +    { 0x1F9E, AF_ADJUST_DOWN }, /* ᾞ */
    +    { 0x1F9F, AF_ADJUST_DOWN }, /* ᾟ */
    +
    +    { 0x1FA0, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* ᾠ */
    +    { 0x1FA1, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* ᾡ */
    +    { 0x1FA2, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* ᾢ */
    +    { 0x1FA3, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* ᾣ */
    +    { 0x1FA4, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* ᾤ */
    +    { 0x1FA5, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* ᾥ */
    +    { 0x1FA6, AF_ADJUST_UP2 | AF_ADJUST_TILDE_TOP | AF_ADJUST_DOWN }, /* ᾦ */
    +    { 0x1FA7, AF_ADJUST_UP2 | AF_ADJUST_TILDE_TOP | AF_ADJUST_DOWN }, /* ᾧ */
    +    { 0x1FA8, AF_ADJUST_DOWN }, /* ᾨ */
    +    { 0x1FA9, AF_ADJUST_DOWN }, /* ᾩ */
    +    { 0x1FAA, AF_ADJUST_DOWN }, /* ᾪ */
    +    { 0x1FAB, AF_ADJUST_DOWN }, /* ᾫ */
    +    { 0x1FAC, AF_ADJUST_DOWN }, /* ᾬ */
    +    { 0x1FAD, AF_ADJUST_DOWN }, /* ᾭ */
    +    { 0x1FAE, AF_ADJUST_DOWN }, /* ᾮ */
    +    { 0x1FAF, AF_ADJUST_DOWN }, /* ᾯ */
    +
    +    { 0x1FB0, AF_ADJUST_UP }, /* ᾰ */
    +    { 0x1FB1, AF_ADJUST_UP }, /* ᾱ */
    +    { 0x1FB2, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* ᾲ */
    +    { 0x1FB3, AF_ADJUST_DOWN }, /* ᾳ */
    +    { 0x1FB4, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* ᾴ */
    +    { 0x1FB6, AF_ADJUST_UP | AF_ADJUST_TILDE_TOP }, /* ᾶ */
    +    { 0x1FB7, AF_ADJUST_UP | AF_ADJUST_TILDE_TOP | AF_ADJUST_DOWN }, /* ᾷ */
    +    { 0x1FB8, AF_ADJUST_UP }, /* Ᾰ */
    +    { 0x1FB9, AF_ADJUST_UP }, /* Ᾱ */
    +    { 0x1FBC, AF_ADJUST_DOWN }, /* ᾼ */
    +
    +    { 0x1FC2, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* ῂ */
    +    { 0x1FC3, AF_ADJUST_DOWN }, /* ῃ */
    +    { 0x1FC4, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* ῄ */
    +    { 0x1FC6, AF_ADJUST_UP | AF_ADJUST_TILDE_TOP }, /* ῆ */
    +    { 0x1FC7, AF_ADJUST_UP | AF_ADJUST_TILDE_TOP | AF_ADJUST_DOWN }, /* ῇ */
    +    { 0x1FCC, AF_ADJUST_DOWN }, /* ῌ */
    +
    +    { 0x1FD0, AF_ADJUST_UP }, /* ῐ */
    +    { 0x1FD1, AF_ADJUST_UP }, /* ῑ */
    +    { 0x1FD2, AF_ADJUST_UP2 }, /* ῒ */
    +    { 0x1FD3, AF_ADJUST_UP2 }, /* ΐ */
    +    { 0x1FD6, AF_ADJUST_UP | AF_ADJUST_TILDE_TOP }, /* ῖ */
    +    { 0x1FD7, AF_ADJUST_UP2 | AF_ADJUST_TILDE_TOP }, /* ῗ */
    +    { 0x1FD8, AF_ADJUST_UP }, /* Ῐ */
    +    { 0x1FD9, AF_ADJUST_UP }, /* Ῑ */
    +
    +    { 0x1FE0, AF_ADJUST_UP }, /* ῠ */
    +    { 0x1FE1, AF_ADJUST_UP }, /* ῡ */
    +    { 0x1FE2, AF_ADJUST_UP2 }, /* ῢ */
    +    { 0x1FE3, AF_ADJUST_UP2 }, /* ΰ */
    +    { 0x1FE4, AF_ADJUST_UP }, /* ῤ */
    +    { 0x1FE5, AF_ADJUST_UP }, /* ῥ */
    +    { 0x1FE6, AF_ADJUST_UP | AF_ADJUST_TILDE_TOP }, /* ῦ */
    +    { 0x1FE7, AF_ADJUST_UP2 | AF_ADJUST_TILDE_TOP }, /* ῧ */
    +    { 0x1FE8, AF_ADJUST_UP }, /* Ῠ */
    +    { 0x1FE9, AF_ADJUST_UP }, /* Ῡ */
    +    { 0x1FF2, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* ῲ */
    +    { 0x1FF3, AF_ADJUST_DOWN }, /* ῳ */
    +    { 0x1FF4, AF_ADJUST_UP | AF_ADJUST_DOWN }, /* ῴ */
    +    { 0x1FF6, AF_ADJUST_UP | AF_ADJUST_TILDE_TOP }, /* ῶ */
    +    { 0x1FF7, AF_ADJUST_UP | AF_ADJUST_TILDE_TOP | AF_ADJUST_DOWN }, /* ῷ */
    +    { 0x1FFC, AF_ADJUST_DOWN }, /* ῼ */
    +
    +    /* General Punctuation */
    +    { 0x203C, AF_ADJUST_UP | AF_ADJUST_NO_HEIGHT_CHECK }, /* ‼ */
    +    { 0x203D, AF_ADJUST_UP | AF_ADJUST_NO_HEIGHT_CHECK }, /* ‽ */
    +
    +    { 0x2047, AF_ADJUST_UP | AF_ADJUST_NO_HEIGHT_CHECK }, /* ⁇ */
    +    { 0x2048, AF_ADJUST_UP | AF_ADJUST_NO_HEIGHT_CHECK }, /* ⁈ */
    +    { 0x2049, AF_ADJUST_UP | AF_ADJUST_NO_HEIGHT_CHECK }, /* ⁉ */
    +
    +    /* Superscripts and Subscripts */
    +    { 0x2071, AF_ADJUST_UP }, /* ⁱ */
    +
    +    /* Currency Symbols */
    +    { 0x20AB, AF_ADJUST_DOWN }, /* ₫ */
    +
    +    { 0x20C0, AF_ADJUST_DOWN }, /* ⃀ */
    +
    +    /* Number Forms */
    +    { 0x2170, AF_ADJUST_UP }, /* ⅰ */
    +    { 0x2171, AF_ADJUST_UP }, /* ⅱ */
    +    { 0x2172, AF_ADJUST_UP }, /* ⅲ */
    +    { 0x2173, AF_ADJUST_UP }, /* ⅳ */
    +    { 0x2175, AF_ADJUST_UP }, /* ⅵ */
    +    { 0x2176, AF_ADJUST_UP }, /* ⅶ */
    +    { 0x2177, AF_ADJUST_UP }, /* ⅷ */
    +    { 0x2178, AF_ADJUST_UP }, /* ⅸ */
    +    { 0x217A, AF_ADJUST_UP }, /* ⅺ */
    +    { 0x217B, AF_ADJUST_UP }, /* ⅻ */
    +
    +    /* Latin Extended-C */
    +    { 0x2C64, AF_IGNORE_CAPITAL_BOTTOM } , /* Ɽ */
    +    { 0x2C67, AF_IGNORE_CAPITAL_BOTTOM } , /* Ⱨ */
    +    { 0x2C68, AF_IGNORE_SMALL_BOTTOM } , /* ⱨ */
    +    { 0x2C69, AF_IGNORE_CAPITAL_BOTTOM } , /* Ⱪ */
    +    { 0x2C6A, AF_IGNORE_SMALL_BOTTOM } , /* ⱪ */
    +    { 0x2C6B, AF_IGNORE_CAPITAL_BOTTOM } , /* Ⱬ */
    +    { 0x2C6C, AF_IGNORE_SMALL_BOTTOM } , /* ⱬ */
    +    { 0x2C6E, AF_IGNORE_CAPITAL_BOTTOM } , /* Ɱ */
    +
    +    { 0x2C7C, AF_ADJUST_UP }, /* ⱼ */
    +    { 0x2C7E, AF_IGNORE_CAPITAL_BOTTOM } , /* Ȿ */
    +    { 0x2C7F, AF_IGNORE_CAPITAL_BOTTOM } , /* Ɀ */
    +
    +    /* Coptic */
    +    { 0x2CC2, AF_ADJUST_UP }, /* Ⳃ */
    +    { 0x2CC3, AF_ADJUST_UP }, /* ⳃ */
    +
    +    /* Supplemental Punctuation */
    +    { 0x2E18, AF_ADJUST_UP }, /* ⸘ */
    +
    +    { 0x2E2E, AF_ADJUST_UP | AF_ADJUST_NO_HEIGHT_CHECK }, /* ⸮ */
    +
    +    /* Cyrillic Extended-B */
    +    { 0xA640, AF_IGNORE_CAPITAL_BOTTOM } , /* Ꙁ */
    +    { 0xA641, AF_IGNORE_SMALL_BOTTOM } , /* ꙁ */
    +    { 0xA642, AF_IGNORE_CAPITAL_BOTTOM } , /* Ꙃ */
    +    { 0xA643, AF_IGNORE_SMALL_BOTTOM } , /* ꙃ */
    +
    +    { 0xA680, AF_IGNORE_CAPITAL_TOP } , /* Ꚁ */
    +    { 0xA681, AF_IGNORE_SMALL_TOP } , /* ꚁ */
    +    { 0xA688, AF_IGNORE_CAPITAL_BOTTOM } , /* Ꚉ */
    +    { 0xA689, AF_IGNORE_SMALL_BOTTOM } , /* ꚉ */
    +    { 0xA68A, AF_IGNORE_CAPITAL_BOTTOM } , /* Ꚋ */
    +    { 0xA68B, AF_IGNORE_SMALL_BOTTOM } , /* ꚋ */
    +    { 0xA68E, AF_IGNORE_CAPITAL_BOTTOM } , /* Ꚏ */
    +    { 0xA68F, AF_IGNORE_SMALL_BOTTOM } , /* ꚏ */
    +
    +    { 0xA690, AF_IGNORE_CAPITAL_BOTTOM } , /* Ꚑ */
    +    { 0xA691, AF_IGNORE_SMALL_BOTTOM } , /* ꚑ */
    +    { 0xA696, AF_IGNORE_CAPITAL_BOTTOM } , /* Ꚗ */
    +    { 0xA697, AF_IGNORE_SMALL_BOTTOM } , /* ꚗ */
    +
    +    /* Latin Extended-D */
    +    { 0xA726, AF_IGNORE_CAPITAL_BOTTOM } , /* Ꜧ */
    +    { 0xA727, AF_IGNORE_SMALL_BOTTOM } , /* ꜧ */
    +
    +    { 0xA756, AF_IGNORE_CAPITAL_BOTTOM } , /* Ꝗ */
    +    { 0xA758, AF_IGNORE_CAPITAL_BOTTOM } , /* Ꝙ */
    +
    +    { 0xA771, AF_IGNORE_SMALL_BOTTOM } , /* ꝱ */
    +    { 0xA772, AF_IGNORE_SMALL_BOTTOM } , /* ꝲ */
    +    { 0xA773, AF_IGNORE_SMALL_BOTTOM } , /* ꝳ */
    +    { 0xA774, AF_IGNORE_SMALL_BOTTOM } , /* ꝴ */
    +    { 0xA776, AF_IGNORE_SMALL_BOTTOM } , /* ꝶ */
    +
    +    { 0xA790, AF_IGNORE_CAPITAL_BOTTOM } , /* Ꞑ */
    +    { 0xA791, AF_IGNORE_SMALL_BOTTOM } , /* ꞑ */
    +    { 0xA794, AF_IGNORE_SMALL_BOTTOM } , /* ꞔ */
    +    { 0xA795, AF_IGNORE_SMALL_BOTTOM } , /* ꞕ */
    +
    +    { 0xA7C0, AF_IGNORE_CAPITAL_TOP | AF_IGNORE_CAPITAL_BOTTOM }, /* Ꟁ */
    +    { 0xA7C1, AF_IGNORE_SMALL_TOP | AF_IGNORE_SMALL_BOTTOM }, /* ꟁ */
    +    { 0xA7C4, AF_IGNORE_CAPITAL_BOTTOM } , /* Ꞔ */
    +    { 0xA7C5, AF_IGNORE_CAPITAL_BOTTOM } , /* Ʂ */
    +    { 0xA7C6, AF_IGNORE_CAPITAL_BOTTOM } , /* Ᶎ */
    +    { 0xA7CC, AF_IGNORE_CAPITAL_TOP | AF_IGNORE_CAPITAL_BOTTOM }, /* Ꟍ */
    +    { 0xA7CD, AF_IGNORE_SMALL_TOP | AF_IGNORE_SMALL_BOTTOM }, /* ꟍ */
    +
    +    /* Latin Extended-E */
    +    { 0xAB3C, AF_IGNORE_SMALL_BOTTOM } , /* ꬼ */
    +
    +    { 0xAB46, AF_IGNORE_SMALL_BOTTOM } , /* ꭆ */
    +
    +    { 0xAB5C, AF_IGNORE_SMALL_BOTTOM } , /* ꭜ */
    +
    +    { 0xAB66, AF_IGNORE_SMALL_BOTTOM } , /* ꭦ */
    +    { 0xAB67, AF_IGNORE_SMALL_BOTTOM } , /* ꭧ */
    +  };
    +
    +
    +  FT_LOCAL_DEF( FT_UInt32 )
    +  af_adjustment_database_lookup( FT_UInt32  codepoint )
    +  {
    +    /* Binary search for database entry */
    +    FT_Offset  low  = 0;
    +    FT_Offset  high = AF_ADJUSTMENT_DATABASE_LENGTH - 1;
    +
    +
    +    while ( high >= low )
    +    {
    +      FT_Offset  mid           = ( low + high ) / 2;
    +      FT_UInt32  mid_codepoint = adjustment_database[mid].codepoint;
    +
    +
    +      if ( mid_codepoint < codepoint )
    +        low = mid + 1;
    +      else if ( mid_codepoint > codepoint )
    +        high = mid - 1;
    +      else
    +        return adjustment_database[mid].flags;
    +    }
    +
    +    return 0;
    +  }
    +
    +
    +#ifdef FT_CONFIG_OPTION_USE_HARFBUZZ
    +
    +  static FT_Error
    +  add_substitute( FT_Int     glyph_idx,
    +                  size_t     value,
    +                  FT_UInt32  codepoint,
    +                  FT_Hash    reverse_map,
    +                  FT_Hash    subst_map,
    +                  FT_Memory  memory )
    +  {
    +    FT_Error  error;
    +
    +    FT_Int  first_substitute = (FT_Int)( value & 0xFFFF );
    +
    +    FT_UInt  used = reverse_map->used;
    +
    +
    +    /*
    +      OpenType features like 'unic' map lowercase letter glyphs to uppercase
    +      forms (and vice versa), which could lead to the use of wrong entries
    +      in the adjustment database.  For this reason we don't overwrite,
    +      prioritizing cmap entries.
    +
    +      XXX Note, however, that this cannot cover all cases since there might
    +      be contradictory entries for glyphs not in the cmap.  A possible
    +      solution might be to specially mark pairs of related lowercase and
    +      uppercase characters in the adjustment database that have diacritics
    +      on different vertical sides (for example, U+0122 'Ģ' and U+0123 'ģ').
    +      The auto-hinter could then perform a topological analysis to do the
    +      right thing.
    +    */
    +    error = ft_hash_num_insert_no_overwrite( first_substitute, codepoint,
    +                                             reverse_map, memory );
    +    if ( error )
    +      return error;
    +
    +    if ( reverse_map->used > used )
    +    {
    +      size_t*  subst = ft_hash_num_lookup( first_substitute, subst_map );
    +
    +
    +      if ( subst )
    +      {
    +        error = add_substitute( first_substitute, *subst, codepoint,
    +                                reverse_map, subst_map, memory );
    +        if ( error )
    +          return error;
    +      }
    +    }
    +
    +    /* The remaining substitutes. */
    +    if ( value & 0xFFFF0000U )
    +    {
    +      FT_UInt  num_substitutes = value >> 16;
    +
    +      FT_UInt  i;
    +
    +
    +      for ( i = 1; i <= num_substitutes; i++ )
    +      {
    +        FT_Int   idx        = glyph_idx + (FT_Int)( i << 16 );
    +        size_t*  substitute = ft_hash_num_lookup( idx, subst_map );
    +
    +
    +        used = reverse_map->used;
    +
    +        error = ft_hash_num_insert_no_overwrite( *substitute,
    +                                                 codepoint,
    +                                                 reverse_map,
    +                                                 memory );
    +        if ( error )
    +          return error;
    +
    +        if ( reverse_map->used > used )
    +        {
    +          size_t*  subst = ft_hash_num_lookup( *substitute, subst_map );
    +
    +
    +          if ( subst )
    +          {
    +            error = add_substitute( *substitute, *subst, codepoint,
    +                                    reverse_map, subst_map, memory );
    +            if ( error )
    +              return error;
    +          }
    +        }
    +      }
    +    }
    +
    +    return FT_Err_Ok;
    +  }
    +
    +#endif /* FT_CONFIG_OPTION_USE_HARFBUZZ */
    +
    +
    +  /* Construct a 'reverse cmap' (i.e., a mapping from glyph indices to   */
    +  /* character codes) for all glyphs that an input code point could turn */
    +  /* into.                                                               */
    +  /*                                                                     */
    +  /* If HarfBuzz support is not available, this is the direct inversion  */
    +  /* of the cmap table, otherwise the mapping gets extended with data    */
    +  /* from the 'GSUB' table.                                              */
    +  FT_LOCAL_DEF( FT_Error )
    +  af_reverse_character_map_new( FT_Hash         *map,
    +                                AF_StyleMetrics  metrics )
    +  {
    +    FT_Error  error;
    +
    +    AF_FaceGlobals  globals = metrics->globals;
    +    FT_Face         face    = globals->face;
    +    FT_Memory       memory  = face->memory;
    +
    +    FT_CharMap  old_charmap;
    +
    +    FT_UInt32  codepoint;
    +    FT_Offset  i;
    +
    +
    +    FT_TRACE4(( "af_reverse_character_map_new:"
    +                " building reverse character map (style `%s')\n",
    +                af_style_names[metrics->style_class->style] ));
    +
    +    /* Search for a unicode charmap.           */
    +    /* If there isn't one, create a blank map. */
    +
    +    /* Back up `face->charmap` because `find_unicode_charmap` sets it. */
    +    old_charmap = face->charmap;
    +
    +    if ( ( error = find_unicode_charmap( face ) ) )
    +      goto Exit;
    +
    +    *map = NULL;
    +    if ( FT_QNEW( *map ) )
    +      goto Exit;
    +
    +    error = ft_hash_num_init( *map, memory );
    +    if ( error )
    +      goto Exit;
    +
    +    /* Initialize reverse cmap with data directly from the cmap table. */
    +    for ( i = 0; i < AF_ADJUSTMENT_DATABASE_LENGTH; i++ )
    +    {
    +      FT_Int  cmap_glyph;
    +
    +
    +      /*
    +        We cannot restrict `codepoint` to character ranges; we have no
    +        control what data the script-specific portion of the GSUB table
    +        actually holds.
    +
    +        An example is `arial.ttf` version 7.00; in this font, there are
    +        lookups for Cyrillic (lookup 43), Greek (lookup 44), and Latin
    +        (lookup 45) that map capital letter glyphs to small capital glyphs.
    +        It is tempting to expect that script-specific versions of the 'c2sc'
    +        feature only use script-specific lookups.  However, this is not the
    +        case in this font: the feature uses all three lookups regardless of
    +        the script.
    +
    +        The auto-hinter, while assigning glyphs to styles, uses the first
    +        coverage result it encounters for a particular glyph.  For example,
    +        if the coverage for Cyrillic is tested before Latin (as is currently
    +        the case), glyphs without a cmap entry that are covered in 'c2sc'
    +        are treated as Cyrillic.
    +
    +        If we now look at glyph 3498, which is a small-caps version of the
    +        Latin character 'A grave' (U+00C0, glyph 172), we can see that it is
    +        registered as belonging to a Cyrillic style due to the algorithm
    +        just described.  As a result, checking only for characters from the
    +        Latin range would miss this glyph; we thus have to test all
    +        character codes in the database.
    +      */
    +      codepoint = adjustment_database[i].codepoint;
    +
    +      cmap_glyph = (FT_Int)FT_Get_Char_Index( face, codepoint );
    +      if ( cmap_glyph == 0 )
    +        continue;
    +
    +      error = ft_hash_num_insert( cmap_glyph, codepoint, *map, memory );
    +      if ( error )
    +        goto Exit;
    +    }
    +
    +#ifdef FT_CONFIG_OPTION_USE_HARFBUZZ
    +
    +    if ( ft_hb_enabled( globals ) )
    +    {
    +      hb_font_t  *hb_font;
    +      hb_face_t  *hb_face;
    +
    +      hb_set_t    *gsub_lookups;
    +      hb_script_t  script;
    +
    +      unsigned int  script_count   = 1;
    +      hb_tag_t      script_tags[2] = { HB_TAG_NONE, HB_TAG_NONE };
    +
    +      FT_Hash  subst_map = NULL;
    +
    +      hb_codepoint_t  idx;
    +      FT_UInt         hash_idx;
    +      FT_Int          glyph_idx;
    +      size_t          value;
    +
    +
    +      /* No need to check whether HarfBuzz has allocation issues; */
    +      /* it continues to work in such cases and simply returns    */
    +      /* 'empty' objects that do nothing.                         */
    +
    +      hb_font = globals->hb_font;
    +      hb_face = hb( font_get_face )( hb_font );
    +
    +      gsub_lookups = hb( set_create )();
    +
    +      script = af_hb_scripts[metrics->style_class->script];
    +
    +      hb( ot_tags_from_script_and_language )( script, NULL,
    +                                              &script_count, script_tags,
    +                                              NULL, NULL );
    +
    +      /* Compute set of all script-specific GSUB lookups. */
    +      hb( ot_layout_collect_lookups )( hb_face,
    +                                       HB_OT_TAG_GSUB,
    +                                       script_tags, NULL, NULL,
    +                                       gsub_lookups );
    +
    +#ifdef FT_DEBUG_LEVEL_TRACE
    +      {
    +        FT_Bool  have_idx = FALSE;
    +
    +
    +        FT_TRACE4(( "  GSUB lookups to check:\n" ));
    +
    +        FT_TRACE4(( "  " ));
    +        idx = HB_SET_VALUE_INVALID;
    +        while ( hb( set_next )( gsub_lookups, &idx ) )
    +          if ( idx < globals->gsub_lookup_count            &&
    +               globals->gsub_lookups_single_alternate[idx] )
    +          {
    +            have_idx = TRUE;
    +            FT_TRACE4(( "  %u", idx ));
    +          }
    +        if ( !have_idx )
    +          FT_TRACE4(( "  (none)" ));
    +        FT_TRACE4(( "\n" ));
    +
    +        FT_TRACE4(( "\n" ));
    +      }
    +#endif
    +
    +      if ( FT_QNEW( subst_map ) )
    +        goto Exit_HarfBuzz;
    +
    +      error = ft_hash_num_init( subst_map, memory );
    +      if ( error )
    +        goto Exit_HarfBuzz;
    +
    +      idx = HB_SET_VALUE_INVALID;
    +      while ( hb( set_next )( gsub_lookups, &idx ) )
    +      {
    +        FT_UInt32  offset;
    +
    +
    +        /* HarfBuzz only validates lookup indices while   */
    +        /* processing lookups, not while collecting them, */
    +        /* so we have to do that by ourselves.            */
    +        if ( idx < globals->gsub_lookup_count )
    +          offset = globals->gsub_lookups_single_alternate[idx];
    +        else
    +          offset = 0;
    +
    +        /* Put all substitutions into a single hash table.  Note that   */
    +        /* the hash values usually contain more than a single character */
    +        /* code; this can happen if different 'SingleSubst' subtables   */
    +        /* map a given glyph index to different substitutions, or if    */
    +        /* 'AlternateSubst' subtable entries are present.               */
    +        if ( offset )
    +          af_map_lookup( globals, subst_map, offset );
    +      }
    +
    +      /*
    +        Now iterate over the collected substitution data in `subst_map`
    +        (using recursion to resolve one-to-many mappings) and insert the
    +        data into the reverse cmap.
    +
    +        As an example, suppose we have the following cmap and substitution
    +        data:
    +
    +          cmap: X -> a
    +                Y -> b
    +                Z -> c
    +
    +          substitutions: a -> b
    +                         b -> c, d
    +                         d -> e
    +
    +        The reverse map now becomes as follows.
    +
    +          a -> X
    +          b -> Y
    +          c -> Z (via cmap, ignoring mapping from 'b')
    +          d -> Y (via 'b')
    +          e -> Y (via 'b' and 'd')
    +      */
    +
    +      hash_idx = 0;
    +      while ( ft_hash_num_iterator( &hash_idx,
    +                                    &glyph_idx,
    +                                    &value,
    +                                    subst_map ) )
    +      {
    +        size_t*  val;
    +
    +
    +        /* Ignore keys that do not point to the first substitute. */
    +        if ( (FT_UInt)glyph_idx & 0xFFFF0000U )
    +          continue;
    +
    +        /* Ignore glyph indices that are not related to accents. */
    +        val = ft_hash_num_lookup( glyph_idx, *map );
    +        if ( !val )
    +          continue;
    +
    +        codepoint = *val;
    +
    +        error = add_substitute( glyph_idx, value, codepoint,
    +                                *map, subst_map, memory );
    +        if ( error )
    +          break;
    +      }
    +
    +    Exit_HarfBuzz:
    +      hb( set_destroy )( gsub_lookups );
    +
    +      ft_hash_num_free( subst_map, memory );
    +      FT_FREE( subst_map );
    +
    +      if ( error )
    +        goto Exit;
    +    }
    +
    +#endif /* FT_CONFIG_OPTION_USE_HARFBUZZ */
    +
    +    FT_TRACE4(( "    reverse character map built successfully"
    +                " with %u entries\n", ( *map )->used ));
    +
    +#ifdef FT_DEBUG_LEVEL_TRACE
    +
    +    {
    +      FT_UInt  cnt;
    +
    +
    +      FT_TRACE7(( "       gidx   code    flags\n" ));
    +               /* "      XXXXX  0xXXXX  XXXXXXXXXXX..." */
    +      FT_TRACE7(( "     ------------------------------\n" ));
    +
    +      for ( cnt = 0; cnt < globals->glyph_count; cnt++ )
    +      {
    +        size_t*    val;
    +        FT_UInt32  adj_type;
    +
    +        const char*  flag_names[] =
    +        {
    +          "up",          /* AF_ADJUST_UP    */
    +          "down",        /* AF_ADJUST_DOWN  */
    +          "double up",   /* AF_ADJUST_UP2   */
    +          "double down", /* AF_ADJUST_DOWN2 */
    +
    +          "top tilde",          /* AF_ADJUST_TILDE_TOP     */
    +          "bottom tilde",       /* AF_ADJUST_TILDE_BOTTOM  */
    +          "below-top tilde",    /* AF_ADJUST_TILDE_TOP2    */
    +          "above-bottom tilde", /* AF_ADJUST_TILDE_BOTTOM2 */
    +
    +          "ignore capital top",    /* AF_IGNORE_CAPITAL_TOP    */
    +          "ignore capital bottom", /* AF_IGNORE_CAPITAL_BOTTOM */
    +          "ignore small top",      /* AF_IGNORE_SMALL_TOP      */
    +          "ignore small bottom",   /* AF_IGNORE_SMALL_BOTTOM   */
    +        };
    +        size_t  flag_names_size = sizeof ( flag_names ) / sizeof ( char* );
    +
    +        char  flag_str[256];
    +        int   need_comma;
    +
    +        size_t  j;
    +
    +
    +        val = ft_hash_num_lookup( (FT_Int)cnt, *map );
    +        if ( !val )
    +          continue;
    +        codepoint = *val;
    +
    +        adj_type = af_adjustment_database_lookup( codepoint );
    +        if ( !adj_type )
    +          continue;
    +
    +        flag_str[0] = '\0';
    +        need_comma  = 0;
    +
    +        for ( j = 0; j < flag_names_size; j++ )
    +        {
    +          if ( adj_type & (1 << j ) )
    +          {
    +            if ( !need_comma )
    +              need_comma = 1;
    +            else
    +              strcat( flag_str, ", " );
    +            strcat( flag_str, flag_names[j] );
    +          }
    +        }
    +
    +        FT_TRACE7(( "      %5u  0x%04X  %s\n", cnt, codepoint, flag_str ));
    +      }
    +    }
    +
    +#endif /* FT_DEBUG_LEVEL_TRACE */
    +
    +
    +  Exit:
    +    face->charmap = old_charmap;
    +
    +    if ( error )
    +    {
    +      FT_TRACE4(( "    error while building reverse character map."
    +                  " Using blank map.\n" ));
    +
    +      if ( *map )
    +        ft_hash_num_free( *map, memory );
    +
    +      FT_FREE( *map );
    +      *map = NULL;
    +      return error;
    +    }
    +
    +    return FT_Err_Ok;
    +  }
    +
    +
    +  FT_LOCAL_DEF( FT_Error )
    +  af_reverse_character_map_done( FT_Hash    map,
    +                                 FT_Memory  memory )
    +  {
    +    if ( map )
    +      ft_hash_num_free( map, memory );
    +    FT_FREE( map );
    +
    +    return FT_Err_Ok;
    +  }
    +
    +
    +/* END */
    diff --git a/src/java.desktop/share/native/libfreetype/src/autofit/afadjust.h b/src/java.desktop/share/native/libfreetype/src/autofit/afadjust.h
    new file mode 100644
    index 00000000000..4837451ae4c
    --- /dev/null
    +++ b/src/java.desktop/share/native/libfreetype/src/autofit/afadjust.h
    @@ -0,0 +1,130 @@
    +/****************************************************************************
    + *
    + * afadjust.h
    + *
    + *   Auto-fitter routines to adjust components based on charcode (header).
    + *
    + * Copyright (C) 2023-2025 by
    + * David Turner, Robert Wilhelm, and Werner Lemberg.
    + *
    + * Written by Craig White .
    + *
    + * This file is part of the FreeType project, and may only be used,
    + * modified, and distributed under the terms of the FreeType project
    + * license, LICENSE.TXT.  By continuing to use, modify, or distribute
    + * this file you indicate that you have read the license and
    + * understand and accept it fully.
    + *
    + */
    +
    +
    +#ifndef AFADJUST_H_
    +#define AFADJUST_H_
    +
    +#include 
    +
    +#include "afglobal.h"
    +#include "aftypes.h"
    +
    +
    +FT_BEGIN_HEADER
    +
    +  /*
    +   * Adjustment type flags.
    +   *
    +   * They also specify topological constraints that the auto-hinter relies
    +   * on.  For example, using `AF_ADJUST_UP` implies that we have two
    +   * enclosing contours, one for the base glyph and one for the diacritic
    +   * above, and no other contour inbetween or above.  With 'enclosing' it is
    +   * meant that such a contour can contain more inner contours.
    +   *
    +   */
    +
    +  /* Find the topmost contour and push it up until its lowest point is */
    +  /* one pixel above the highest point not enclosed by that contour.   */
    +#define AF_ADJUST_UP  0x01
    +
    +  /* Find the bottommost contour and push it down until its highest point */
    +  /* is one pixel below the lowest point not enclosed by that contour.    */
    +#define AF_ADJUST_DOWN  0x02
    +
    +  /* Find the contour below the topmost contour and push it up, together */
    +  /* with the topmost contour, until its lowest point is one pixel above */
    +  /* the highest point not enclosed by that contour.  This flag is       */
    +  /* mutually exclusive with `AF_ADJUST_UP`.                             */
    +#define AF_ADJUST_UP2  0x04
    +
    +  /* Find the contour above the bottommost contour and push it down,  */
    +  /* together with the bottommost contour, until its highest point is */
    +  /* one pixel below the lowest point not enclosed by that contour.   */
    +  /* This flag is mutually exclusive with `AF_ADJUST_DOWN`.           */
    +#define AF_ADJUST_DOWN2  0x08
    +
    +  /* The topmost contour is a tilde.  Enlarge it vertically so that it    */
    +  /* stays legible at small sizes, not degenerating to a horizontal line. */
    +#define AF_ADJUST_TILDE_TOP  0x10
    +
    +  /* The bottommost contour is a tilde.  Enlarge it vertically so that it */
    +  /* stays legible at small sizes, not degenerating to a horizontal line. */
    +#define AF_ADJUST_TILDE_BOTTOM  0x20
    +
    +  /* The contour below the topmost contour is a tilde.  Enlarge it        */
    +  /* vertically so that it stays legible at small sizes, not degenerating */
    +  /* to a horizontal line.  To be used with `AF_ADJUST_UP2` only.         */
    +#define AF_ADJUST_TILDE_TOP2  0x40
    +
    +  /* The contour above the bottommost contour is a tilde.  Enlarge it     */
    +  /* vertically so that it stays legible at small sizes, not degenerating */
    +  /* to a horizontal line.  To be used with `AF_ADJUST_DOWN2` only.       */
    +#define AF_ADJUST_TILDE_BOTTOM2  0x80
    +
    +  /* Make the auto-hinter ignore any diacritic (either a separate contour */
    +  /* or part of the base character outline) that is attached to the top   */
    +  /* of an uppercase base character.                                      */
    +#define AF_IGNORE_CAPITAL_TOP  0x100
    +
    +  /* Make the auto-hinter ignore any diacritic (either a separate contour */
    +  /* or part of the base character outline) that is attached to the       */
    +  /* bottom of an uppercase base character.                               */
    +#define AF_IGNORE_CAPITAL_BOTTOM  0x200
    +
    +  /* Make the auto-hinter ignore any diacritic (either a separate contour */
    +  /* or part of the base character outline) that is attached to the top   */
    +  /* of a lowercase base character.                                       */
    +#define AF_IGNORE_SMALL_TOP  0x400
    +
    +  /* Make the auto-hinter ignore any diacritic (either a separate contour */
    +  /* or part of the base character outline) that is attached to the       */
    +  /* bottom of a lowercase base character.                                */
    +#define AF_IGNORE_SMALL_BOTTOM  0x800
    +
    +  /* By default, the AF_ADJUST_XXX flags are applied only if diacritics */
    +  /* have a 'small' height (based on some heuristic checks).  If this   */
    +  /* flag is set, no such check is performed.                           */
    +#define AF_ADJUST_NO_HEIGHT_CHECK  0x1000
    +
    +  /* No adjustment, i.e., no flag is set. */
    +#define AF_ADJUST_NONE  0x00
    +
    +
    +  FT_LOCAL( FT_UInt32 )
    +  af_adjustment_database_lookup( FT_UInt32  codepoint );
    +
    +  /* Allocate and populate the reverse character map, */
    +  /* using the character map within the face.         */
    +  FT_LOCAL( FT_Error )
    +  af_reverse_character_map_new( FT_Hash         *map,
    +                                AF_StyleMetrics  metrics );
    +
    +  /* Free the reverse character map. */
    +  FT_LOCAL( FT_Error )
    +  af_reverse_character_map_done( FT_Hash    map,
    +                                 FT_Memory  memory );
    +
    +
    +FT_END_HEADER
    +
    +#endif /* AFADJUST_H_ */
    +
    +
    +/* END */
    diff --git a/src/java.desktop/share/native/libfreetype/src/autofit/afblue.c b/src/java.desktop/share/native/libfreetype/src/autofit/afblue.c
    index ea83969cdc9..a6219bdfe41 100644
    --- a/src/java.desktop/share/native/libfreetype/src/autofit/afblue.c
    +++ b/src/java.desktop/share/native/libfreetype/src/autofit/afblue.c
    @@ -7,7 +7,7 @@
      *
      *   Auto-fitter data for blue strings (body).
      *
    - * Copyright (C) 2013-2024 by
    + * Copyright (C) 2013-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -467,24 +467,24 @@
       af_blue_stringsets[] =
       {
         /* */
    -    { AF_BLUE_STRING_ADLAM_CAPITAL_TOP,    AF_BLUE_PROPERTY_LATIN_TOP        },
    -    { AF_BLUE_STRING_ADLAM_CAPITAL_BOTTOM, 0                                 },
    +    { AF_BLUE_STRING_ADLAM_CAPITAL_TOP,    AF_BLUE_PROPERTY_LATIN_TOP            },
    +    { AF_BLUE_STRING_ADLAM_CAPITAL_BOTTOM, AF_BLUE_PROPERTY_LATIN_CAPITAL_BOTTOM },
         { AF_BLUE_STRING_ADLAM_SMALL_TOP,      AF_BLUE_PROPERTY_LATIN_TOP      |
    -                                           AF_BLUE_PROPERTY_LATIN_X_HEIGHT   },
    -    { AF_BLUE_STRING_ADLAM_SMALL_BOTTOM,   0                                 },
    -    { AF_BLUE_STRING_MAX,                  0                                 },
    +                                           AF_BLUE_PROPERTY_LATIN_X_HEIGHT       },
    +    { AF_BLUE_STRING_ADLAM_SMALL_BOTTOM,   AF_BLUE_PROPERTY_LATIN_SMALL_BOTTOM   },
    +    { AF_BLUE_STRING_MAX,                  0                                     },
         { AF_BLUE_STRING_ARABIC_TOP,    AF_BLUE_PROPERTY_LATIN_TOP     },
         { AF_BLUE_STRING_ARABIC_BOTTOM, 0                              },
         { AF_BLUE_STRING_ARABIC_JOIN,   AF_BLUE_PROPERTY_LATIN_NEUTRAL },
         { AF_BLUE_STRING_MAX,           0                              },
    -    { AF_BLUE_STRING_ARMENIAN_CAPITAL_TOP,     AF_BLUE_PROPERTY_LATIN_TOP        },
    -    { AF_BLUE_STRING_ARMENIAN_CAPITAL_BOTTOM,  0                                 },
    -    { AF_BLUE_STRING_ARMENIAN_SMALL_ASCENDER,  AF_BLUE_PROPERTY_LATIN_TOP        },
    +    { AF_BLUE_STRING_ARMENIAN_CAPITAL_TOP,     AF_BLUE_PROPERTY_LATIN_TOP            },
    +    { AF_BLUE_STRING_ARMENIAN_CAPITAL_BOTTOM,  AF_BLUE_PROPERTY_LATIN_CAPITAL_BOTTOM },
    +    { AF_BLUE_STRING_ARMENIAN_SMALL_ASCENDER,  AF_BLUE_PROPERTY_LATIN_TOP            },
         { AF_BLUE_STRING_ARMENIAN_SMALL_TOP,       AF_BLUE_PROPERTY_LATIN_TOP      |
    -                                               AF_BLUE_PROPERTY_LATIN_X_HEIGHT   },
    -    { AF_BLUE_STRING_ARMENIAN_SMALL_BOTTOM,    0                                 },
    -    { AF_BLUE_STRING_ARMENIAN_SMALL_DESCENDER, 0                                 },
    -    { AF_BLUE_STRING_MAX,                      0                                 },
    +                                               AF_BLUE_PROPERTY_LATIN_X_HEIGHT       },
    +    { AF_BLUE_STRING_ARMENIAN_SMALL_BOTTOM,    AF_BLUE_PROPERTY_LATIN_SMALL_BOTTOM   },
    +    { AF_BLUE_STRING_ARMENIAN_SMALL_DESCENDER, 0                                     },
    +    { AF_BLUE_STRING_MAX,                      0                                     },
         { AF_BLUE_STRING_AVESTAN_TOP,    AF_BLUE_PROPERTY_LATIN_TOP },
         { AF_BLUE_STRING_AVESTAN_BOTTOM, 0                          },
         { AF_BLUE_STRING_MAX,            0                          },
    @@ -508,14 +508,14 @@
         { AF_BLUE_STRING_CHAKMA_BOTTOM,    0                          },
         { AF_BLUE_STRING_CHAKMA_DESCENDER, 0                          },
         { AF_BLUE_STRING_MAX,              0                          },
    -    { AF_BLUE_STRING_CANADIAN_SYLLABICS_TOP,          AF_BLUE_PROPERTY_LATIN_TOP        },
    -    { AF_BLUE_STRING_CANADIAN_SYLLABICS_BOTTOM,       0                                 },
    +    { AF_BLUE_STRING_CANADIAN_SYLLABICS_TOP,          AF_BLUE_PROPERTY_LATIN_TOP          },
    +    { AF_BLUE_STRING_CANADIAN_SYLLABICS_BOTTOM,       0                                   },
         { AF_BLUE_STRING_CANADIAN_SYLLABICS_SMALL_TOP,    AF_BLUE_PROPERTY_LATIN_TOP      |
    -                                                      AF_BLUE_PROPERTY_LATIN_X_HEIGHT   },
    -    { AF_BLUE_STRING_CANADIAN_SYLLABICS_SMALL_BOTTOM, 0                                 },
    -    { AF_BLUE_STRING_CANADIAN_SYLLABICS_SUPS_TOP,     AF_BLUE_PROPERTY_LATIN_TOP        },
    -    { AF_BLUE_STRING_CANADIAN_SYLLABICS_SUPS_BOTTOM,  0                                 },
    -    { AF_BLUE_STRING_MAX,                             0                                 },
    +                                                      AF_BLUE_PROPERTY_LATIN_X_HEIGHT     },
    +    { AF_BLUE_STRING_CANADIAN_SYLLABICS_SMALL_BOTTOM, AF_BLUE_PROPERTY_LATIN_SMALL_BOTTOM },
    +    { AF_BLUE_STRING_CANADIAN_SYLLABICS_SUPS_TOP,     AF_BLUE_PROPERTY_LATIN_TOP          },
    +    { AF_BLUE_STRING_CANADIAN_SYLLABICS_SUPS_BOTTOM,  0                                   },
    +    { AF_BLUE_STRING_MAX,                             0                                   },
         { AF_BLUE_STRING_CARIAN_TOP,    AF_BLUE_PROPERTY_LATIN_TOP },
         { AF_BLUE_STRING_CARIAN_BOTTOM, 0                          },
         { AF_BLUE_STRING_MAX,           0                          },
    @@ -527,24 +527,24 @@
         { AF_BLUE_STRING_CHEROKEE_SMALL,           0                                 },
         { AF_BLUE_STRING_CHEROKEE_SMALL_DESCENDER, 0                                 },
         { AF_BLUE_STRING_MAX,                      0                                 },
    -    { AF_BLUE_STRING_COPTIC_CAPITAL_TOP,    AF_BLUE_PROPERTY_LATIN_TOP        },
    -    { AF_BLUE_STRING_COPTIC_CAPITAL_BOTTOM, 0                                 },
    +    { AF_BLUE_STRING_COPTIC_CAPITAL_TOP,    AF_BLUE_PROPERTY_LATIN_TOP            },
    +    { AF_BLUE_STRING_COPTIC_CAPITAL_BOTTOM, AF_BLUE_PROPERTY_LATIN_CAPITAL_BOTTOM },
         { AF_BLUE_STRING_COPTIC_SMALL_TOP,      AF_BLUE_PROPERTY_LATIN_TOP      |
    -                                            AF_BLUE_PROPERTY_LATIN_X_HEIGHT   },
    -    { AF_BLUE_STRING_COPTIC_SMALL_BOTTOM,   0                                 },
    -    { AF_BLUE_STRING_MAX,                   0                                 },
    +                                            AF_BLUE_PROPERTY_LATIN_X_HEIGHT       },
    +    { AF_BLUE_STRING_COPTIC_SMALL_BOTTOM,   AF_BLUE_PROPERTY_LATIN_SMALL_BOTTOM   },
    +    { AF_BLUE_STRING_MAX,                   0                                     },
         { AF_BLUE_STRING_CYPRIOT_TOP,    AF_BLUE_PROPERTY_LATIN_TOP },
         { AF_BLUE_STRING_CYPRIOT_BOTTOM, 0                          },
         { AF_BLUE_STRING_CYPRIOT_SMALL,  AF_BLUE_PROPERTY_LATIN_TOP },
         { AF_BLUE_STRING_CYPRIOT_SMALL,  0                          },
         { AF_BLUE_STRING_MAX,            0                          },
    -    { AF_BLUE_STRING_CYRILLIC_CAPITAL_TOP,     AF_BLUE_PROPERTY_LATIN_TOP        },
    -    { AF_BLUE_STRING_CYRILLIC_CAPITAL_BOTTOM,  0                                 },
    +    { AF_BLUE_STRING_CYRILLIC_CAPITAL_TOP,     AF_BLUE_PROPERTY_LATIN_TOP            },
    +    { AF_BLUE_STRING_CYRILLIC_CAPITAL_BOTTOM,  AF_BLUE_PROPERTY_LATIN_CAPITAL_BOTTOM },
         { AF_BLUE_STRING_CYRILLIC_SMALL,           AF_BLUE_PROPERTY_LATIN_TOP      |
    -                                               AF_BLUE_PROPERTY_LATIN_X_HEIGHT   },
    -    { AF_BLUE_STRING_CYRILLIC_SMALL,           0                                 },
    -    { AF_BLUE_STRING_CYRILLIC_SMALL_DESCENDER, 0                                 },
    -    { AF_BLUE_STRING_MAX,                      0                                 },
    +                                               AF_BLUE_PROPERTY_LATIN_X_HEIGHT       },
    +    { AF_BLUE_STRING_CYRILLIC_SMALL,           0                                     },
    +    { AF_BLUE_STRING_CYRILLIC_SMALL_DESCENDER, 0                                     },
    +    { AF_BLUE_STRING_MAX,                      0                                     },
         { AF_BLUE_STRING_DEVANAGARI_TOP,    AF_BLUE_PROPERTY_LATIN_TOP        },
         { AF_BLUE_STRING_DEVANAGARI_HEAD,   AF_BLUE_PROPERTY_LATIN_TOP        },
         { AF_BLUE_STRING_DEVANAGARI_BASE,   AF_BLUE_PROPERTY_LATIN_TOP      |
    @@ -553,12 +553,12 @@
         { AF_BLUE_STRING_DEVANAGARI_BASE,   0                                 },
         { AF_BLUE_STRING_DEVANAGARI_BOTTOM, 0                                 },
         { AF_BLUE_STRING_MAX,               0                                 },
    -    { AF_BLUE_STRING_DESERET_CAPITAL_TOP,    AF_BLUE_PROPERTY_LATIN_TOP        },
    -    { AF_BLUE_STRING_DESERET_CAPITAL_BOTTOM, 0                                 },
    +    { AF_BLUE_STRING_DESERET_CAPITAL_TOP,    AF_BLUE_PROPERTY_LATIN_TOP            },
    +    { AF_BLUE_STRING_DESERET_CAPITAL_BOTTOM, AF_BLUE_PROPERTY_LATIN_CAPITAL_BOTTOM },
         { AF_BLUE_STRING_DESERET_SMALL_TOP,      AF_BLUE_PROPERTY_LATIN_TOP      |
    -                                             AF_BLUE_PROPERTY_LATIN_X_HEIGHT   },
    -    { AF_BLUE_STRING_DESERET_SMALL_BOTTOM,   0                                 },
    -    { AF_BLUE_STRING_MAX,                    0                                 },
    +                                             AF_BLUE_PROPERTY_LATIN_X_HEIGHT       },
    +    { AF_BLUE_STRING_DESERET_SMALL_BOTTOM,   AF_BLUE_PROPERTY_LATIN_SMALL_BOTTOM   },
    +    { AF_BLUE_STRING_MAX,                    0                                     },
         { AF_BLUE_STRING_ETHIOPIC_TOP,    AF_BLUE_PROPERTY_LATIN_TOP },
         { AF_BLUE_STRING_ETHIOPIC_BOTTOM, 0                          },
         { AF_BLUE_STRING_MAX,             0                          },
    @@ -578,23 +578,23 @@
         { AF_BLUE_STRING_GEORGIAN_NUSKHURI_ASCENDER,  AF_BLUE_PROPERTY_LATIN_TOP        },
         { AF_BLUE_STRING_GEORGIAN_NUSKHURI_DESCENDER, 0                                 },
         { AF_BLUE_STRING_MAX,                         0                                 },
    -    { AF_BLUE_STRING_GLAGOLITIC_CAPITAL_TOP,    AF_BLUE_PROPERTY_LATIN_TOP        },
    -    { AF_BLUE_STRING_GLAGOLITIC_CAPITAL_BOTTOM, 0                                 },
    +    { AF_BLUE_STRING_GLAGOLITIC_CAPITAL_TOP,    AF_BLUE_PROPERTY_LATIN_TOP            },
    +    { AF_BLUE_STRING_GLAGOLITIC_CAPITAL_BOTTOM, AF_BLUE_PROPERTY_LATIN_CAPITAL_BOTTOM },
         { AF_BLUE_STRING_GLAGOLITIC_SMALL_TOP,      AF_BLUE_PROPERTY_LATIN_TOP      |
    -                                                AF_BLUE_PROPERTY_LATIN_X_HEIGHT   },
    -    { AF_BLUE_STRING_GLAGOLITIC_SMALL_BOTTOM,   0                                 },
    -    { AF_BLUE_STRING_MAX,                       0                                 },
    +                                                AF_BLUE_PROPERTY_LATIN_X_HEIGHT       },
    +    { AF_BLUE_STRING_GLAGOLITIC_SMALL_BOTTOM,   AF_BLUE_PROPERTY_LATIN_SMALL_BOTTOM   },
    +    { AF_BLUE_STRING_MAX,                       0                                     },
         { AF_BLUE_STRING_GOTHIC_TOP,    AF_BLUE_PROPERTY_LATIN_TOP },
         { AF_BLUE_STRING_GOTHIC_BOTTOM, 0                          },
         { AF_BLUE_STRING_MAX,           0                          },
    -    { AF_BLUE_STRING_GREEK_CAPITAL_TOP,     AF_BLUE_PROPERTY_LATIN_TOP        },
    -    { AF_BLUE_STRING_GREEK_CAPITAL_BOTTOM,  0                                 },
    -    { AF_BLUE_STRING_GREEK_SMALL_BETA_TOP,  AF_BLUE_PROPERTY_LATIN_TOP        },
    +    { AF_BLUE_STRING_GREEK_CAPITAL_TOP,     AF_BLUE_PROPERTY_LATIN_TOP            },
    +    { AF_BLUE_STRING_GREEK_CAPITAL_BOTTOM,  AF_BLUE_PROPERTY_LATIN_CAPITAL_BOTTOM },
    +    { AF_BLUE_STRING_GREEK_SMALL_BETA_TOP,  AF_BLUE_PROPERTY_LATIN_TOP            },
         { AF_BLUE_STRING_GREEK_SMALL,           AF_BLUE_PROPERTY_LATIN_TOP      |
    -                                            AF_BLUE_PROPERTY_LATIN_X_HEIGHT   },
    -    { AF_BLUE_STRING_GREEK_SMALL,           0                                 },
    -    { AF_BLUE_STRING_GREEK_SMALL_DESCENDER, 0                                 },
    -    { AF_BLUE_STRING_MAX,                   0                                 },
    +                                            AF_BLUE_PROPERTY_LATIN_X_HEIGHT       },
    +    { AF_BLUE_STRING_GREEK_SMALL,           0                                     },
    +    { AF_BLUE_STRING_GREEK_SMALL_DESCENDER, 0                                     },
    +    { AF_BLUE_STRING_MAX,                   0                                     },
         { AF_BLUE_STRING_GUJARATI_TOP,       AF_BLUE_PROPERTY_LATIN_TOP      |
                                              AF_BLUE_PROPERTY_LATIN_X_HEIGHT   },
         { AF_BLUE_STRING_GUJARATI_BOTTOM,    0                                 },
    @@ -643,45 +643,45 @@
         { AF_BLUE_STRING_LAO_LARGE_ASCENDER, AF_BLUE_PROPERTY_LATIN_TOP        },
         { AF_BLUE_STRING_LAO_DESCENDER,      0                                 },
         { AF_BLUE_STRING_MAX,                0                                 },
    -    { AF_BLUE_STRING_LATIN_CAPITAL_TOP,     AF_BLUE_PROPERTY_LATIN_TOP        },
    -    { AF_BLUE_STRING_LATIN_CAPITAL_BOTTOM,  0                                 },
    -    { AF_BLUE_STRING_LATIN_SMALL_F_TOP,     AF_BLUE_PROPERTY_LATIN_TOP        },
    +    { AF_BLUE_STRING_LATIN_CAPITAL_TOP,     AF_BLUE_PROPERTY_LATIN_TOP            },
    +    { AF_BLUE_STRING_LATIN_CAPITAL_BOTTOM,  AF_BLUE_PROPERTY_LATIN_CAPITAL_BOTTOM },
    +    { AF_BLUE_STRING_LATIN_SMALL_F_TOP,     AF_BLUE_PROPERTY_LATIN_TOP            },
         { AF_BLUE_STRING_LATIN_SMALL_TOP,       AF_BLUE_PROPERTY_LATIN_TOP      |
    -                                            AF_BLUE_PROPERTY_LATIN_X_HEIGHT   },
    -    { AF_BLUE_STRING_LATIN_SMALL_BOTTOM,    0                                 },
    -    { AF_BLUE_STRING_LATIN_SMALL_DESCENDER, 0                                 },
    -    { AF_BLUE_STRING_MAX,                   0                                 },
    -    { AF_BLUE_STRING_LATIN_SUBS_CAPITAL_TOP,     AF_BLUE_PROPERTY_LATIN_TOP        },
    -    { AF_BLUE_STRING_LATIN_SUBS_CAPITAL_BOTTOM,  0                                 },
    -    { AF_BLUE_STRING_LATIN_SUBS_SMALL_F_TOP,     AF_BLUE_PROPERTY_LATIN_TOP        },
    +                                            AF_BLUE_PROPERTY_LATIN_X_HEIGHT       },
    +    { AF_BLUE_STRING_LATIN_SMALL_BOTTOM,    AF_BLUE_PROPERTY_LATIN_SMALL_BOTTOM   },
    +    { AF_BLUE_STRING_LATIN_SMALL_DESCENDER, 0                                     },
    +    { AF_BLUE_STRING_MAX,                   0                                     },
    +    { AF_BLUE_STRING_LATIN_SUBS_CAPITAL_TOP,     AF_BLUE_PROPERTY_LATIN_TOP            },
    +    { AF_BLUE_STRING_LATIN_SUBS_CAPITAL_BOTTOM,  AF_BLUE_PROPERTY_LATIN_CAPITAL_BOTTOM },
    +    { AF_BLUE_STRING_LATIN_SUBS_SMALL_F_TOP,     AF_BLUE_PROPERTY_LATIN_TOP            },
         { AF_BLUE_STRING_LATIN_SUBS_SMALL,           AF_BLUE_PROPERTY_LATIN_TOP      |
    -                                                 AF_BLUE_PROPERTY_LATIN_X_HEIGHT   },
    -    { AF_BLUE_STRING_LATIN_SUBS_SMALL,           0                                 },
    -    { AF_BLUE_STRING_LATIN_SUBS_SMALL_DESCENDER, 0                                 },
    -    { AF_BLUE_STRING_MAX,                        0                                 },
    -    { AF_BLUE_STRING_LATIN_SUPS_CAPITAL_TOP,     AF_BLUE_PROPERTY_LATIN_TOP        },
    -    { AF_BLUE_STRING_LATIN_SUPS_CAPITAL_BOTTOM,  0                                 },
    -    { AF_BLUE_STRING_LATIN_SUPS_SMALL_F_TOP,     AF_BLUE_PROPERTY_LATIN_TOP        },
    +                                                 AF_BLUE_PROPERTY_LATIN_X_HEIGHT       },
    +    { AF_BLUE_STRING_LATIN_SUBS_SMALL,           0                                     },
    +    { AF_BLUE_STRING_LATIN_SUBS_SMALL_DESCENDER, 0                                     },
    +    { AF_BLUE_STRING_MAX,                        0                                     },
    +    { AF_BLUE_STRING_LATIN_SUPS_CAPITAL_TOP,     AF_BLUE_PROPERTY_LATIN_TOP            },
    +    { AF_BLUE_STRING_LATIN_SUPS_CAPITAL_BOTTOM,  AF_BLUE_PROPERTY_LATIN_CAPITAL_BOTTOM },
    +    { AF_BLUE_STRING_LATIN_SUPS_SMALL_F_TOP,     AF_BLUE_PROPERTY_LATIN_TOP            },
         { AF_BLUE_STRING_LATIN_SUPS_SMALL,           AF_BLUE_PROPERTY_LATIN_TOP      |
    -                                                 AF_BLUE_PROPERTY_LATIN_X_HEIGHT   },
    -    { AF_BLUE_STRING_LATIN_SUPS_SMALL,           0                                 },
    -    { AF_BLUE_STRING_LATIN_SUPS_SMALL_DESCENDER, 0                                 },
    -    { AF_BLUE_STRING_MAX,                        0                                 },
    +                                                 AF_BLUE_PROPERTY_LATIN_X_HEIGHT       },
    +    { AF_BLUE_STRING_LATIN_SUPS_SMALL,           0                                     },
    +    { AF_BLUE_STRING_LATIN_SUPS_SMALL_DESCENDER, 0                                     },
    +    { AF_BLUE_STRING_MAX,                        0                                     },
         { AF_BLUE_STRING_LISU_TOP,    AF_BLUE_PROPERTY_LATIN_TOP },
         { AF_BLUE_STRING_LISU_BOTTOM, 0                          },
         { AF_BLUE_STRING_MAX,         0                          },
         { AF_BLUE_STRING_MALAYALAM_TOP,    AF_BLUE_PROPERTY_LATIN_TOP },
         { AF_BLUE_STRING_MALAYALAM_BOTTOM, 0                          },
         { AF_BLUE_STRING_MAX,              0                          },
    -    { AF_BLUE_STRING_MEDEFAIDRIN_CAPITAL_TOP,     AF_BLUE_PROPERTY_LATIN_TOP        },
    -    { AF_BLUE_STRING_MEDEFAIDRIN_CAPITAL_BOTTOM,  0                                 },
    -    { AF_BLUE_STRING_MEDEFAIDRIN_SMALL_F_TOP,     AF_BLUE_PROPERTY_LATIN_TOP        },
    +    { AF_BLUE_STRING_MEDEFAIDRIN_CAPITAL_TOP,     AF_BLUE_PROPERTY_LATIN_TOP            },
    +    { AF_BLUE_STRING_MEDEFAIDRIN_CAPITAL_BOTTOM,  AF_BLUE_PROPERTY_LATIN_CAPITAL_BOTTOM },
    +    { AF_BLUE_STRING_MEDEFAIDRIN_SMALL_F_TOP,     AF_BLUE_PROPERTY_LATIN_TOP            },
         { AF_BLUE_STRING_MEDEFAIDRIN_SMALL_TOP,       AF_BLUE_PROPERTY_LATIN_TOP      |
    -                                                  AF_BLUE_PROPERTY_LATIN_X_HEIGHT   },
    -    { AF_BLUE_STRING_MEDEFAIDRIN_SMALL_BOTTOM,    0                                 },
    -    { AF_BLUE_STRING_MEDEFAIDRIN_SMALL_DESCENDER, 0                                 },
    -    { AF_BLUE_STRING_MEDEFAIDRIN_DIGIT_TOP,       AF_BLUE_PROPERTY_LATIN_TOP        },
    -    { AF_BLUE_STRING_MAX,                         0                                 },
    +                                                  AF_BLUE_PROPERTY_LATIN_X_HEIGHT       },
    +    { AF_BLUE_STRING_MEDEFAIDRIN_SMALL_BOTTOM,    AF_BLUE_PROPERTY_LATIN_SMALL_BOTTOM   },
    +    { AF_BLUE_STRING_MEDEFAIDRIN_SMALL_DESCENDER, 0                                     },
    +    { AF_BLUE_STRING_MEDEFAIDRIN_DIGIT_TOP,       AF_BLUE_PROPERTY_LATIN_TOP            },
    +    { AF_BLUE_STRING_MAX,                         0                                     },
         { AF_BLUE_STRING_MONGOLIAN_TOP_BASE,    AF_BLUE_PROPERTY_LATIN_TOP },
         { AF_BLUE_STRING_MONGOLIAN_BOTTOM_BASE, 0                          },
         { AF_BLUE_STRING_MAX,                   0                          },
    @@ -691,12 +691,12 @@
         { AF_BLUE_STRING_MYANMAR_ASCENDER,  AF_BLUE_PROPERTY_LATIN_TOP        },
         { AF_BLUE_STRING_MYANMAR_DESCENDER, 0                                 },
         { AF_BLUE_STRING_MAX,               0                                 },
    -    { AF_BLUE_STRING_NKO_TOP,          AF_BLUE_PROPERTY_LATIN_TOP        },
    -    { AF_BLUE_STRING_NKO_BOTTOM,       0                                 },
    +    { AF_BLUE_STRING_NKO_TOP,          AF_BLUE_PROPERTY_LATIN_TOP          },
    +    { AF_BLUE_STRING_NKO_BOTTOM,       0                                   },
         { AF_BLUE_STRING_NKO_SMALL_TOP,    AF_BLUE_PROPERTY_LATIN_TOP      |
    -                                       AF_BLUE_PROPERTY_LATIN_X_HEIGHT   },
    -    { AF_BLUE_STRING_NKO_SMALL_BOTTOM, 0                                 },
    -    { AF_BLUE_STRING_MAX,              0                                 },
    +                                       AF_BLUE_PROPERTY_LATIN_X_HEIGHT     },
    +    { AF_BLUE_STRING_NKO_SMALL_BOTTOM, AF_BLUE_PROPERTY_LATIN_SMALL_BOTTOM },
    +    { AF_BLUE_STRING_MAX,              0                                   },
         { AF_BLUE_STRING_MAX, 0 },
         { AF_BLUE_STRING_OL_CHIKI, AF_BLUE_PROPERTY_LATIN_TOP },
         { AF_BLUE_STRING_OL_CHIKI, 0                          },
    @@ -704,15 +704,15 @@
         { AF_BLUE_STRING_OLD_TURKIC_TOP,    AF_BLUE_PROPERTY_LATIN_TOP },
         { AF_BLUE_STRING_OLD_TURKIC_BOTTOM, 0                          },
         { AF_BLUE_STRING_MAX,               0                          },
    -    { AF_BLUE_STRING_OSAGE_CAPITAL_TOP,       AF_BLUE_PROPERTY_LATIN_TOP       },
    -    { AF_BLUE_STRING_OSAGE_CAPITAL_BOTTOM,    0                                },
    -    { AF_BLUE_STRING_OSAGE_CAPITAL_DESCENDER, 0                                },
    -    { AF_BLUE_STRING_OSAGE_SMALL_TOP,         AF_BLUE_PROPERTY_LATIN_TOP     |
    -                                              AF_BLUE_PROPERTY_LATIN_X_HEIGHT  },
    -    { AF_BLUE_STRING_OSAGE_SMALL_BOTTOM,      0                                },
    -    { AF_BLUE_STRING_OSAGE_SMALL_ASCENDER,    AF_BLUE_PROPERTY_LATIN_TOP       },
    -    { AF_BLUE_STRING_OSAGE_SMALL_DESCENDER,   0                                },
    -    { AF_BLUE_STRING_MAX,                     0                                },
    +    { AF_BLUE_STRING_OSAGE_CAPITAL_TOP,       AF_BLUE_PROPERTY_LATIN_TOP            },
    +    { AF_BLUE_STRING_OSAGE_CAPITAL_BOTTOM,    AF_BLUE_PROPERTY_LATIN_CAPITAL_BOTTOM },
    +    { AF_BLUE_STRING_OSAGE_CAPITAL_DESCENDER, 0                                     },
    +    { AF_BLUE_STRING_OSAGE_SMALL_TOP,         AF_BLUE_PROPERTY_LATIN_TOP      |
    +                                              AF_BLUE_PROPERTY_LATIN_X_HEIGHT       },
    +    { AF_BLUE_STRING_OSAGE_SMALL_BOTTOM,      AF_BLUE_PROPERTY_LATIN_SMALL_BOTTOM   },
    +    { AF_BLUE_STRING_OSAGE_SMALL_ASCENDER,    AF_BLUE_PROPERTY_LATIN_TOP            },
    +    { AF_BLUE_STRING_OSAGE_SMALL_DESCENDER,   0                                     },
    +    { AF_BLUE_STRING_MAX,                     0                                     },
         { AF_BLUE_STRING_OSMANYA_TOP,    AF_BLUE_PROPERTY_LATIN_TOP },
         { AF_BLUE_STRING_OSMANYA_BOTTOM, 0                          },
         { AF_BLUE_STRING_MAX,            0                          },
    @@ -723,13 +723,13 @@
         { AF_BLUE_STRING_SAURASHTRA_TOP,    AF_BLUE_PROPERTY_LATIN_TOP },
         { AF_BLUE_STRING_SAURASHTRA_BOTTOM, 0                          },
         { AF_BLUE_STRING_MAX,               0                          },
    -    { AF_BLUE_STRING_SHAVIAN_TOP,          AF_BLUE_PROPERTY_LATIN_TOP        },
    -    { AF_BLUE_STRING_SHAVIAN_BOTTOM,       0                                 },
    -    { AF_BLUE_STRING_SHAVIAN_DESCENDER,    0                                 },
    +    { AF_BLUE_STRING_SHAVIAN_TOP,          AF_BLUE_PROPERTY_LATIN_TOP          },
    +    { AF_BLUE_STRING_SHAVIAN_BOTTOM,       0                                   },
    +    { AF_BLUE_STRING_SHAVIAN_DESCENDER,    0                                   },
         { AF_BLUE_STRING_SHAVIAN_SMALL_TOP,    AF_BLUE_PROPERTY_LATIN_TOP      |
    -                                           AF_BLUE_PROPERTY_LATIN_X_HEIGHT   },
    -    { AF_BLUE_STRING_SHAVIAN_SMALL_BOTTOM, 0                                 },
    -    { AF_BLUE_STRING_MAX,                  0                                 },
    +                                           AF_BLUE_PROPERTY_LATIN_X_HEIGHT     },
    +    { AF_BLUE_STRING_SHAVIAN_SMALL_BOTTOM, AF_BLUE_PROPERTY_LATIN_SMALL_BOTTOM },
    +    { AF_BLUE_STRING_MAX,                  0                                   },
         { AF_BLUE_STRING_SINHALA_TOP,       AF_BLUE_PROPERTY_LATIN_TOP },
         { AF_BLUE_STRING_SINHALA_BOTTOM,    0                          },
         { AF_BLUE_STRING_SINHALA_DESCENDER, 0                          },
    diff --git a/src/java.desktop/share/native/libfreetype/src/autofit/afblue.cin b/src/java.desktop/share/native/libfreetype/src/autofit/afblue.cin
    index d2270fac744..786c6b3b9e6 100644
    --- a/src/java.desktop/share/native/libfreetype/src/autofit/afblue.cin
    +++ b/src/java.desktop/share/native/libfreetype/src/autofit/afblue.cin
    @@ -4,7 +4,7 @@
      *
      *   Auto-fitter data for blue strings (body).
      *
    - * Copyright (C) 2013-2024 by
    + * Copyright (C) 2013-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/autofit/afblue.dat b/src/java.desktop/share/native/libfreetype/src/autofit/afblue.dat
    index 88bab2632ab..f6e96ff8189 100644
    --- a/src/java.desktop/share/native/libfreetype/src/autofit/afblue.dat
    +++ b/src/java.desktop/share/native/libfreetype/src/autofit/afblue.dat
    @@ -2,7 +2,7 @@
     //
     //   Auto-fitter data for blue strings.
     //
    -// Copyright (C) 2013-2024 by
    +// Copyright (C) 2013-2025 by
     // David Turner, Robert Wilhelm, and Werner Lemberg.
     //
     // This file is part of the FreeType project, and may only be used,
    @@ -699,12 +699,12 @@ AF_BLUE_STRING_ENUM AF_BLUE_STRINGS_ARRAY AF_BLUE_STRING_MAX_LEN:
     AF_BLUE_STRINGSET_ENUM AF_BLUE_STRINGSETS_ARRAY AF_BLUE_STRINGSET_MAX_LEN:
     
       AF_BLUE_STRINGSET_ADLM
    -    { AF_BLUE_STRING_ADLAM_CAPITAL_TOP,    AF_BLUE_PROPERTY_LATIN_TOP        }
    -    { AF_BLUE_STRING_ADLAM_CAPITAL_BOTTOM, 0                                 }
    +    { AF_BLUE_STRING_ADLAM_CAPITAL_TOP,    AF_BLUE_PROPERTY_LATIN_TOP            }
    +    { AF_BLUE_STRING_ADLAM_CAPITAL_BOTTOM, AF_BLUE_PROPERTY_LATIN_CAPITAL_BOTTOM }
         { AF_BLUE_STRING_ADLAM_SMALL_TOP,      AF_BLUE_PROPERTY_LATIN_TOP      |
    -                                           AF_BLUE_PROPERTY_LATIN_X_HEIGHT   }
    -    { AF_BLUE_STRING_ADLAM_SMALL_BOTTOM,   0                                 }
    -    { AF_BLUE_STRING_MAX,                  0                                 }
    +                                           AF_BLUE_PROPERTY_LATIN_X_HEIGHT       }
    +    { AF_BLUE_STRING_ADLAM_SMALL_BOTTOM,   AF_BLUE_PROPERTY_LATIN_SMALL_BOTTOM   }
    +    { AF_BLUE_STRING_MAX,                  0                                     }
     
       AF_BLUE_STRINGSET_ARAB
         { AF_BLUE_STRING_ARABIC_TOP,    AF_BLUE_PROPERTY_LATIN_TOP     }
    @@ -713,14 +713,14 @@ AF_BLUE_STRINGSET_ENUM AF_BLUE_STRINGSETS_ARRAY AF_BLUE_STRINGSET_MAX_LEN:
         { AF_BLUE_STRING_MAX,           0                              }
     
       AF_BLUE_STRINGSET_ARMN
    -    { AF_BLUE_STRING_ARMENIAN_CAPITAL_TOP,     AF_BLUE_PROPERTY_LATIN_TOP        }
    -    { AF_BLUE_STRING_ARMENIAN_CAPITAL_BOTTOM,  0                                 }
    -    { AF_BLUE_STRING_ARMENIAN_SMALL_ASCENDER,  AF_BLUE_PROPERTY_LATIN_TOP        }
    +    { AF_BLUE_STRING_ARMENIAN_CAPITAL_TOP,     AF_BLUE_PROPERTY_LATIN_TOP            }
    +    { AF_BLUE_STRING_ARMENIAN_CAPITAL_BOTTOM,  AF_BLUE_PROPERTY_LATIN_CAPITAL_BOTTOM }
    +    { AF_BLUE_STRING_ARMENIAN_SMALL_ASCENDER,  AF_BLUE_PROPERTY_LATIN_TOP            }
         { AF_BLUE_STRING_ARMENIAN_SMALL_TOP,       AF_BLUE_PROPERTY_LATIN_TOP      |
    -                                               AF_BLUE_PROPERTY_LATIN_X_HEIGHT   }
    -    { AF_BLUE_STRING_ARMENIAN_SMALL_BOTTOM,    0                                 }
    -    { AF_BLUE_STRING_ARMENIAN_SMALL_DESCENDER, 0                                 }
    -    { AF_BLUE_STRING_MAX,                      0                                 }
    +                                               AF_BLUE_PROPERTY_LATIN_X_HEIGHT       }
    +    { AF_BLUE_STRING_ARMENIAN_SMALL_BOTTOM,    AF_BLUE_PROPERTY_LATIN_SMALL_BOTTOM   }
    +    { AF_BLUE_STRING_ARMENIAN_SMALL_DESCENDER, 0                                     }
    +    { AF_BLUE_STRING_MAX,                      0                                     }
     
       AF_BLUE_STRINGSET_AVST
         { AF_BLUE_STRING_AVESTAN_TOP,    AF_BLUE_PROPERTY_LATIN_TOP }
    @@ -756,14 +756,14 @@ AF_BLUE_STRINGSET_ENUM AF_BLUE_STRINGSETS_ARRAY AF_BLUE_STRINGSET_MAX_LEN:
         { AF_BLUE_STRING_MAX,              0                          }
     
       AF_BLUE_STRINGSET_CANS
    -    { AF_BLUE_STRING_CANADIAN_SYLLABICS_TOP,          AF_BLUE_PROPERTY_LATIN_TOP        }
    -    { AF_BLUE_STRING_CANADIAN_SYLLABICS_BOTTOM,       0                                 }
    +    { AF_BLUE_STRING_CANADIAN_SYLLABICS_TOP,          AF_BLUE_PROPERTY_LATIN_TOP          }
    +    { AF_BLUE_STRING_CANADIAN_SYLLABICS_BOTTOM,       0                                   }
         { AF_BLUE_STRING_CANADIAN_SYLLABICS_SMALL_TOP,    AF_BLUE_PROPERTY_LATIN_TOP      |
    -                                                      AF_BLUE_PROPERTY_LATIN_X_HEIGHT   }
    -    { AF_BLUE_STRING_CANADIAN_SYLLABICS_SMALL_BOTTOM, 0                                 }
    -    { AF_BLUE_STRING_CANADIAN_SYLLABICS_SUPS_TOP,     AF_BLUE_PROPERTY_LATIN_TOP        }
    -    { AF_BLUE_STRING_CANADIAN_SYLLABICS_SUPS_BOTTOM,  0                                 }
    -    { AF_BLUE_STRING_MAX,                             0                                 }
    +                                                      AF_BLUE_PROPERTY_LATIN_X_HEIGHT     }
    +    { AF_BLUE_STRING_CANADIAN_SYLLABICS_SMALL_BOTTOM, AF_BLUE_PROPERTY_LATIN_SMALL_BOTTOM }
    +    { AF_BLUE_STRING_CANADIAN_SYLLABICS_SUPS_TOP,     AF_BLUE_PROPERTY_LATIN_TOP          }
    +    { AF_BLUE_STRING_CANADIAN_SYLLABICS_SUPS_BOTTOM,  0                                   }
    +    { AF_BLUE_STRING_MAX,                             0                                   }
     
       AF_BLUE_STRINGSET_CARI
         { AF_BLUE_STRING_CARIAN_TOP,    AF_BLUE_PROPERTY_LATIN_TOP }
    @@ -781,12 +781,12 @@ AF_BLUE_STRINGSET_ENUM AF_BLUE_STRINGSETS_ARRAY AF_BLUE_STRINGSET_MAX_LEN:
         { AF_BLUE_STRING_MAX,                      0                                 }
     
       AF_BLUE_STRINGSET_COPT
    -    { AF_BLUE_STRING_COPTIC_CAPITAL_TOP,    AF_BLUE_PROPERTY_LATIN_TOP        }
    -    { AF_BLUE_STRING_COPTIC_CAPITAL_BOTTOM, 0                                 }
    +    { AF_BLUE_STRING_COPTIC_CAPITAL_TOP,    AF_BLUE_PROPERTY_LATIN_TOP            }
    +    { AF_BLUE_STRING_COPTIC_CAPITAL_BOTTOM, AF_BLUE_PROPERTY_LATIN_CAPITAL_BOTTOM }
         { AF_BLUE_STRING_COPTIC_SMALL_TOP,      AF_BLUE_PROPERTY_LATIN_TOP      |
    -                                            AF_BLUE_PROPERTY_LATIN_X_HEIGHT   }
    -    { AF_BLUE_STRING_COPTIC_SMALL_BOTTOM,   0                                 }
    -    { AF_BLUE_STRING_MAX,                   0                                 }
    +                                            AF_BLUE_PROPERTY_LATIN_X_HEIGHT       }
    +    { AF_BLUE_STRING_COPTIC_SMALL_BOTTOM,   AF_BLUE_PROPERTY_LATIN_SMALL_BOTTOM   }
    +    { AF_BLUE_STRING_MAX,                   0                                     }
     
       AF_BLUE_STRINGSET_CPRT
         { AF_BLUE_STRING_CYPRIOT_TOP,    AF_BLUE_PROPERTY_LATIN_TOP }
    @@ -796,13 +796,13 @@ AF_BLUE_STRINGSET_ENUM AF_BLUE_STRINGSETS_ARRAY AF_BLUE_STRINGSET_MAX_LEN:
         { AF_BLUE_STRING_MAX,            0                          }
     
       AF_BLUE_STRINGSET_CYRL
    -    { AF_BLUE_STRING_CYRILLIC_CAPITAL_TOP,     AF_BLUE_PROPERTY_LATIN_TOP        }
    -    { AF_BLUE_STRING_CYRILLIC_CAPITAL_BOTTOM,  0                                 }
    +    { AF_BLUE_STRING_CYRILLIC_CAPITAL_TOP,     AF_BLUE_PROPERTY_LATIN_TOP            }
    +    { AF_BLUE_STRING_CYRILLIC_CAPITAL_BOTTOM,  AF_BLUE_PROPERTY_LATIN_CAPITAL_BOTTOM }
         { AF_BLUE_STRING_CYRILLIC_SMALL,           AF_BLUE_PROPERTY_LATIN_TOP      |
    -                                               AF_BLUE_PROPERTY_LATIN_X_HEIGHT   }
    -    { AF_BLUE_STRING_CYRILLIC_SMALL,           0                                 }
    -    { AF_BLUE_STRING_CYRILLIC_SMALL_DESCENDER, 0                                 }
    -    { AF_BLUE_STRING_MAX,                      0                                 }
    +                                               AF_BLUE_PROPERTY_LATIN_X_HEIGHT       }
    +    { AF_BLUE_STRING_CYRILLIC_SMALL,           0                                     }
    +    { AF_BLUE_STRING_CYRILLIC_SMALL_DESCENDER, 0                                     }
    +    { AF_BLUE_STRING_MAX,                      0                                     }
     
       AF_BLUE_STRINGSET_DEVA
         { AF_BLUE_STRING_DEVANAGARI_TOP,    AF_BLUE_PROPERTY_LATIN_TOP        }
    @@ -815,12 +815,12 @@ AF_BLUE_STRINGSET_ENUM AF_BLUE_STRINGSETS_ARRAY AF_BLUE_STRINGSET_MAX_LEN:
         { AF_BLUE_STRING_MAX,               0                                 }
     
       AF_BLUE_STRINGSET_DSRT
    -    { AF_BLUE_STRING_DESERET_CAPITAL_TOP,    AF_BLUE_PROPERTY_LATIN_TOP        }
    -    { AF_BLUE_STRING_DESERET_CAPITAL_BOTTOM, 0                                 }
    +    { AF_BLUE_STRING_DESERET_CAPITAL_TOP,    AF_BLUE_PROPERTY_LATIN_TOP            }
    +    { AF_BLUE_STRING_DESERET_CAPITAL_BOTTOM, AF_BLUE_PROPERTY_LATIN_CAPITAL_BOTTOM }
         { AF_BLUE_STRING_DESERET_SMALL_TOP,      AF_BLUE_PROPERTY_LATIN_TOP      |
    -                                             AF_BLUE_PROPERTY_LATIN_X_HEIGHT   }
    -    { AF_BLUE_STRING_DESERET_SMALL_BOTTOM,   0                                 }
    -    { AF_BLUE_STRING_MAX,                    0                                 }
    +                                             AF_BLUE_PROPERTY_LATIN_X_HEIGHT       }
    +    { AF_BLUE_STRING_DESERET_SMALL_BOTTOM,   AF_BLUE_PROPERTY_LATIN_SMALL_BOTTOM   }
    +    { AF_BLUE_STRING_MAX,                    0                                     }
     
       AF_BLUE_STRINGSET_ETHI
         { AF_BLUE_STRING_ETHIOPIC_TOP,    AF_BLUE_PROPERTY_LATIN_TOP }
    @@ -848,12 +848,12 @@ AF_BLUE_STRINGSET_ENUM AF_BLUE_STRINGSETS_ARRAY AF_BLUE_STRINGSET_MAX_LEN:
         { AF_BLUE_STRING_MAX,                         0                                 }
     
       AF_BLUE_STRINGSET_GLAG
    -    { AF_BLUE_STRING_GLAGOLITIC_CAPITAL_TOP,    AF_BLUE_PROPERTY_LATIN_TOP        }
    -    { AF_BLUE_STRING_GLAGOLITIC_CAPITAL_BOTTOM, 0                                 }
    +    { AF_BLUE_STRING_GLAGOLITIC_CAPITAL_TOP,    AF_BLUE_PROPERTY_LATIN_TOP            }
    +    { AF_BLUE_STRING_GLAGOLITIC_CAPITAL_BOTTOM, AF_BLUE_PROPERTY_LATIN_CAPITAL_BOTTOM }
         { AF_BLUE_STRING_GLAGOLITIC_SMALL_TOP,      AF_BLUE_PROPERTY_LATIN_TOP      |
    -                                                AF_BLUE_PROPERTY_LATIN_X_HEIGHT   }
    -    { AF_BLUE_STRING_GLAGOLITIC_SMALL_BOTTOM,   0                                 }
    -    { AF_BLUE_STRING_MAX,                       0                                 }
    +                                                AF_BLUE_PROPERTY_LATIN_X_HEIGHT       }
    +    { AF_BLUE_STRING_GLAGOLITIC_SMALL_BOTTOM,   AF_BLUE_PROPERTY_LATIN_SMALL_BOTTOM   }
    +    { AF_BLUE_STRING_MAX,                       0                                     }
     
       AF_BLUE_STRINGSET_GOTH
         { AF_BLUE_STRING_GOTHIC_TOP,    AF_BLUE_PROPERTY_LATIN_TOP }
    @@ -861,14 +861,14 @@ AF_BLUE_STRINGSET_ENUM AF_BLUE_STRINGSETS_ARRAY AF_BLUE_STRINGSET_MAX_LEN:
         { AF_BLUE_STRING_MAX,           0                          }
     
       AF_BLUE_STRINGSET_GREK
    -    { AF_BLUE_STRING_GREEK_CAPITAL_TOP,     AF_BLUE_PROPERTY_LATIN_TOP        }
    -    { AF_BLUE_STRING_GREEK_CAPITAL_BOTTOM,  0                                 }
    -    { AF_BLUE_STRING_GREEK_SMALL_BETA_TOP,  AF_BLUE_PROPERTY_LATIN_TOP        }
    +    { AF_BLUE_STRING_GREEK_CAPITAL_TOP,     AF_BLUE_PROPERTY_LATIN_TOP            }
    +    { AF_BLUE_STRING_GREEK_CAPITAL_BOTTOM,  AF_BLUE_PROPERTY_LATIN_CAPITAL_BOTTOM }
    +    { AF_BLUE_STRING_GREEK_SMALL_BETA_TOP,  AF_BLUE_PROPERTY_LATIN_TOP            }
         { AF_BLUE_STRING_GREEK_SMALL,           AF_BLUE_PROPERTY_LATIN_TOP      |
    -                                            AF_BLUE_PROPERTY_LATIN_X_HEIGHT   }
    -    { AF_BLUE_STRING_GREEK_SMALL,           0                                 }
    -    { AF_BLUE_STRING_GREEK_SMALL_DESCENDER, 0                                 }
    -    { AF_BLUE_STRING_MAX,                   0                                 }
    +                                            AF_BLUE_PROPERTY_LATIN_X_HEIGHT       }
    +    { AF_BLUE_STRING_GREEK_SMALL,           0                                     }
    +    { AF_BLUE_STRING_GREEK_SMALL_DESCENDER, 0                                     }
    +    { AF_BLUE_STRING_MAX,                   0                                     }
     
       AF_BLUE_STRINGSET_GUJR
         { AF_BLUE_STRING_GUJARATI_TOP,       AF_BLUE_PROPERTY_LATIN_TOP      |
    @@ -935,34 +935,34 @@ AF_BLUE_STRINGSET_ENUM AF_BLUE_STRINGSETS_ARRAY AF_BLUE_STRINGSET_MAX_LEN:
         { AF_BLUE_STRING_MAX,                0                                 }
     
       AF_BLUE_STRINGSET_LATN
    -    { AF_BLUE_STRING_LATIN_CAPITAL_TOP,     AF_BLUE_PROPERTY_LATIN_TOP        }
    -    { AF_BLUE_STRING_LATIN_CAPITAL_BOTTOM,  0                                 }
    -    { AF_BLUE_STRING_LATIN_SMALL_F_TOP,     AF_BLUE_PROPERTY_LATIN_TOP        }
    +    { AF_BLUE_STRING_LATIN_CAPITAL_TOP,     AF_BLUE_PROPERTY_LATIN_TOP            }
    +    { AF_BLUE_STRING_LATIN_CAPITAL_BOTTOM,  AF_BLUE_PROPERTY_LATIN_CAPITAL_BOTTOM }
    +    { AF_BLUE_STRING_LATIN_SMALL_F_TOP,     AF_BLUE_PROPERTY_LATIN_TOP            }
         { AF_BLUE_STRING_LATIN_SMALL_TOP,       AF_BLUE_PROPERTY_LATIN_TOP      |
    -                                            AF_BLUE_PROPERTY_LATIN_X_HEIGHT   }
    -    { AF_BLUE_STRING_LATIN_SMALL_BOTTOM,    0                                 }
    -    { AF_BLUE_STRING_LATIN_SMALL_DESCENDER, 0                                 }
    -    { AF_BLUE_STRING_MAX,                   0                                 }
    +                                            AF_BLUE_PROPERTY_LATIN_X_HEIGHT       }
    +    { AF_BLUE_STRING_LATIN_SMALL_BOTTOM,    AF_BLUE_PROPERTY_LATIN_SMALL_BOTTOM   }
    +    { AF_BLUE_STRING_LATIN_SMALL_DESCENDER, 0                                     }
    +    { AF_BLUE_STRING_MAX,                   0                                     }
     
       AF_BLUE_STRINGSET_LATB
    -    { AF_BLUE_STRING_LATIN_SUBS_CAPITAL_TOP,     AF_BLUE_PROPERTY_LATIN_TOP        }
    -    { AF_BLUE_STRING_LATIN_SUBS_CAPITAL_BOTTOM,  0                                 }
    -    { AF_BLUE_STRING_LATIN_SUBS_SMALL_F_TOP,     AF_BLUE_PROPERTY_LATIN_TOP        }
    +    { AF_BLUE_STRING_LATIN_SUBS_CAPITAL_TOP,     AF_BLUE_PROPERTY_LATIN_TOP            }
    +    { AF_BLUE_STRING_LATIN_SUBS_CAPITAL_BOTTOM,  AF_BLUE_PROPERTY_LATIN_CAPITAL_BOTTOM }
    +    { AF_BLUE_STRING_LATIN_SUBS_SMALL_F_TOP,     AF_BLUE_PROPERTY_LATIN_TOP            }
         { AF_BLUE_STRING_LATIN_SUBS_SMALL,           AF_BLUE_PROPERTY_LATIN_TOP      |
    -                                                 AF_BLUE_PROPERTY_LATIN_X_HEIGHT   }
    -    { AF_BLUE_STRING_LATIN_SUBS_SMALL,           0                                 }
    -    { AF_BLUE_STRING_LATIN_SUBS_SMALL_DESCENDER, 0                                 }
    -    { AF_BLUE_STRING_MAX,                        0                                 }
    +                                                 AF_BLUE_PROPERTY_LATIN_X_HEIGHT       }
    +    { AF_BLUE_STRING_LATIN_SUBS_SMALL,           0                                     }
    +    { AF_BLUE_STRING_LATIN_SUBS_SMALL_DESCENDER, 0                                     }
    +    { AF_BLUE_STRING_MAX,                        0                                     }
     
       AF_BLUE_STRINGSET_LATP
    -    { AF_BLUE_STRING_LATIN_SUPS_CAPITAL_TOP,     AF_BLUE_PROPERTY_LATIN_TOP        }
    -    { AF_BLUE_STRING_LATIN_SUPS_CAPITAL_BOTTOM,  0                                 }
    -    { AF_BLUE_STRING_LATIN_SUPS_SMALL_F_TOP,     AF_BLUE_PROPERTY_LATIN_TOP        }
    +    { AF_BLUE_STRING_LATIN_SUPS_CAPITAL_TOP,     AF_BLUE_PROPERTY_LATIN_TOP            }
    +    { AF_BLUE_STRING_LATIN_SUPS_CAPITAL_BOTTOM,  AF_BLUE_PROPERTY_LATIN_CAPITAL_BOTTOM }
    +    { AF_BLUE_STRING_LATIN_SUPS_SMALL_F_TOP,     AF_BLUE_PROPERTY_LATIN_TOP            }
         { AF_BLUE_STRING_LATIN_SUPS_SMALL,           AF_BLUE_PROPERTY_LATIN_TOP      |
    -                                                 AF_BLUE_PROPERTY_LATIN_X_HEIGHT   }
    -    { AF_BLUE_STRING_LATIN_SUPS_SMALL,           0                                 }
    -    { AF_BLUE_STRING_LATIN_SUPS_SMALL_DESCENDER, 0                                 }
    -    { AF_BLUE_STRING_MAX,                        0                                 }
    +                                                 AF_BLUE_PROPERTY_LATIN_X_HEIGHT       }
    +    { AF_BLUE_STRING_LATIN_SUPS_SMALL,           0                                     }
    +    { AF_BLUE_STRING_LATIN_SUPS_SMALL_DESCENDER, 0                                     }
    +    { AF_BLUE_STRING_MAX,                        0                                     }
     
       AF_BLUE_STRINGSET_LISU
         { AF_BLUE_STRING_LISU_TOP,    AF_BLUE_PROPERTY_LATIN_TOP }
    @@ -975,15 +975,15 @@ AF_BLUE_STRINGSET_ENUM AF_BLUE_STRINGSETS_ARRAY AF_BLUE_STRINGSET_MAX_LEN:
         { AF_BLUE_STRING_MAX,              0                          }
     
       AF_BLUE_STRINGSET_MEDF
    -    { AF_BLUE_STRING_MEDEFAIDRIN_CAPITAL_TOP,     AF_BLUE_PROPERTY_LATIN_TOP        }
    -    { AF_BLUE_STRING_MEDEFAIDRIN_CAPITAL_BOTTOM,  0                                 }
    -    { AF_BLUE_STRING_MEDEFAIDRIN_SMALL_F_TOP,     AF_BLUE_PROPERTY_LATIN_TOP        }
    +    { AF_BLUE_STRING_MEDEFAIDRIN_CAPITAL_TOP,     AF_BLUE_PROPERTY_LATIN_TOP            }
    +    { AF_BLUE_STRING_MEDEFAIDRIN_CAPITAL_BOTTOM,  AF_BLUE_PROPERTY_LATIN_CAPITAL_BOTTOM }
    +    { AF_BLUE_STRING_MEDEFAIDRIN_SMALL_F_TOP,     AF_BLUE_PROPERTY_LATIN_TOP            }
         { AF_BLUE_STRING_MEDEFAIDRIN_SMALL_TOP,       AF_BLUE_PROPERTY_LATIN_TOP      |
    -                                                  AF_BLUE_PROPERTY_LATIN_X_HEIGHT   }
    -    { AF_BLUE_STRING_MEDEFAIDRIN_SMALL_BOTTOM,    0                                 }
    -    { AF_BLUE_STRING_MEDEFAIDRIN_SMALL_DESCENDER, 0                                 }
    -    { AF_BLUE_STRING_MEDEFAIDRIN_DIGIT_TOP,       AF_BLUE_PROPERTY_LATIN_TOP        }
    -    { AF_BLUE_STRING_MAX,                         0                                 }
    +                                                  AF_BLUE_PROPERTY_LATIN_X_HEIGHT       }
    +    { AF_BLUE_STRING_MEDEFAIDRIN_SMALL_BOTTOM,    AF_BLUE_PROPERTY_LATIN_SMALL_BOTTOM   }
    +    { AF_BLUE_STRING_MEDEFAIDRIN_SMALL_DESCENDER, 0                                     }
    +    { AF_BLUE_STRING_MEDEFAIDRIN_DIGIT_TOP,       AF_BLUE_PROPERTY_LATIN_TOP            }
    +    { AF_BLUE_STRING_MAX,                         0                                     }
     
       AF_BLUE_STRINGSET_MONG
         { AF_BLUE_STRING_MONGOLIAN_TOP_BASE,    AF_BLUE_PROPERTY_LATIN_TOP }
    @@ -999,12 +999,12 @@ AF_BLUE_STRINGSET_ENUM AF_BLUE_STRINGSETS_ARRAY AF_BLUE_STRINGSET_MAX_LEN:
         { AF_BLUE_STRING_MAX,               0                                 }
     
       AF_BLUE_STRINGSET_NKOO
    -    { AF_BLUE_STRING_NKO_TOP,          AF_BLUE_PROPERTY_LATIN_TOP        }
    -    { AF_BLUE_STRING_NKO_BOTTOM,       0                                 }
    +    { AF_BLUE_STRING_NKO_TOP,          AF_BLUE_PROPERTY_LATIN_TOP          }
    +    { AF_BLUE_STRING_NKO_BOTTOM,       0                                   }
         { AF_BLUE_STRING_NKO_SMALL_TOP,    AF_BLUE_PROPERTY_LATIN_TOP      |
    -                                       AF_BLUE_PROPERTY_LATIN_X_HEIGHT   }
    -    { AF_BLUE_STRING_NKO_SMALL_BOTTOM, 0                                 }
    -    { AF_BLUE_STRING_MAX,              0                                 }
    +                                       AF_BLUE_PROPERTY_LATIN_X_HEIGHT     }
    +    { AF_BLUE_STRING_NKO_SMALL_BOTTOM, AF_BLUE_PROPERTY_LATIN_SMALL_BOTTOM }
    +    { AF_BLUE_STRING_MAX,              0                                   }
     
       AF_BLUE_STRINGSET_NONE
         { AF_BLUE_STRING_MAX, 0 }
    @@ -1020,15 +1020,15 @@ AF_BLUE_STRINGSET_ENUM AF_BLUE_STRINGSETS_ARRAY AF_BLUE_STRINGSET_MAX_LEN:
         { AF_BLUE_STRING_MAX,               0                          }
     
       AF_BLUE_STRINGSET_OSGE
    -    { AF_BLUE_STRING_OSAGE_CAPITAL_TOP,       AF_BLUE_PROPERTY_LATIN_TOP       }
    -    { AF_BLUE_STRING_OSAGE_CAPITAL_BOTTOM,    0                                }
    -    { AF_BLUE_STRING_OSAGE_CAPITAL_DESCENDER, 0                                }
    -    { AF_BLUE_STRING_OSAGE_SMALL_TOP,         AF_BLUE_PROPERTY_LATIN_TOP     |
    -                                              AF_BLUE_PROPERTY_LATIN_X_HEIGHT  }
    -    { AF_BLUE_STRING_OSAGE_SMALL_BOTTOM,      0                                }
    -    { AF_BLUE_STRING_OSAGE_SMALL_ASCENDER,    AF_BLUE_PROPERTY_LATIN_TOP       }
    -    { AF_BLUE_STRING_OSAGE_SMALL_DESCENDER,   0                                }
    -    { AF_BLUE_STRING_MAX,                     0                                }
    +    { AF_BLUE_STRING_OSAGE_CAPITAL_TOP,       AF_BLUE_PROPERTY_LATIN_TOP            }
    +    { AF_BLUE_STRING_OSAGE_CAPITAL_BOTTOM,    AF_BLUE_PROPERTY_LATIN_CAPITAL_BOTTOM }
    +    { AF_BLUE_STRING_OSAGE_CAPITAL_DESCENDER, 0                                     }
    +    { AF_BLUE_STRING_OSAGE_SMALL_TOP,         AF_BLUE_PROPERTY_LATIN_TOP      |
    +                                              AF_BLUE_PROPERTY_LATIN_X_HEIGHT       }
    +    { AF_BLUE_STRING_OSAGE_SMALL_BOTTOM,      AF_BLUE_PROPERTY_LATIN_SMALL_BOTTOM   }
    +    { AF_BLUE_STRING_OSAGE_SMALL_ASCENDER,    AF_BLUE_PROPERTY_LATIN_TOP            }
    +    { AF_BLUE_STRING_OSAGE_SMALL_DESCENDER,   0                                     }
    +    { AF_BLUE_STRING_MAX,                     0                                     }
     
       AF_BLUE_STRINGSET_OSMA
         { AF_BLUE_STRING_OSMANYA_TOP,    AF_BLUE_PROPERTY_LATIN_TOP }
    @@ -1047,13 +1047,13 @@ AF_BLUE_STRINGSET_ENUM AF_BLUE_STRINGSETS_ARRAY AF_BLUE_STRINGSET_MAX_LEN:
         { AF_BLUE_STRING_MAX,               0                          }
     
       AF_BLUE_STRINGSET_SHAW
    -    { AF_BLUE_STRING_SHAVIAN_TOP,          AF_BLUE_PROPERTY_LATIN_TOP        }
    -    { AF_BLUE_STRING_SHAVIAN_BOTTOM,       0                                 }
    -    { AF_BLUE_STRING_SHAVIAN_DESCENDER,    0                                 }
    +    { AF_BLUE_STRING_SHAVIAN_TOP,          AF_BLUE_PROPERTY_LATIN_TOP          }
    +    { AF_BLUE_STRING_SHAVIAN_BOTTOM,       0                                   }
    +    { AF_BLUE_STRING_SHAVIAN_DESCENDER,    0                                   }
         { AF_BLUE_STRING_SHAVIAN_SMALL_TOP,    AF_BLUE_PROPERTY_LATIN_TOP      |
    -                                           AF_BLUE_PROPERTY_LATIN_X_HEIGHT   }
    -    { AF_BLUE_STRING_SHAVIAN_SMALL_BOTTOM, 0                                 }
    -    { AF_BLUE_STRING_MAX,                  0                                 }
    +                                           AF_BLUE_PROPERTY_LATIN_X_HEIGHT     }
    +    { AF_BLUE_STRING_SHAVIAN_SMALL_BOTTOM, AF_BLUE_PROPERTY_LATIN_SMALL_BOTTOM }
    +    { AF_BLUE_STRING_MAX,                  0                                   }
     
       AF_BLUE_STRINGSET_SINH
         { AF_BLUE_STRING_SINHALA_TOP,       AF_BLUE_PROPERTY_LATIN_TOP }
    diff --git a/src/java.desktop/share/native/libfreetype/src/autofit/afblue.h b/src/java.desktop/share/native/libfreetype/src/autofit/afblue.h
    index 2aa9d0984ef..5bb8406dc2b 100644
    --- a/src/java.desktop/share/native/libfreetype/src/autofit/afblue.h
    +++ b/src/java.desktop/share/native/libfreetype/src/autofit/afblue.h
    @@ -7,7 +7,7 @@
      *
      *   Auto-fitter data for blue strings (specification).
      *
    - * Copyright (C) 2013-2024 by
    + * Copyright (C) 2013-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -314,14 +314,17 @@ FT_BEGIN_HEADER
       /* Properties are specific to a writing system.  We assume that a given  */
       /* blue string can't be used in more than a single writing system, which */
       /* is a safe bet.                                                        */
    -#define AF_BLUE_PROPERTY_LATIN_TOP       ( 1U << 0 )  /* must have value 1 */
    +#define AF_BLUE_PROPERTY_LATIN_TOP       ( 1U << 0 )    /* must be value 1 */
     #define AF_BLUE_PROPERTY_LATIN_SUB_TOP   ( 1U << 1 )
     #define AF_BLUE_PROPERTY_LATIN_NEUTRAL   ( 1U << 2 )
     #define AF_BLUE_PROPERTY_LATIN_X_HEIGHT  ( 1U << 3 )
     #define AF_BLUE_PROPERTY_LATIN_LONG      ( 1U << 4 )
     
    -#define AF_BLUE_PROPERTY_CJK_TOP    ( 1U << 0 )       /* must have value 1 */
    -#define AF_BLUE_PROPERTY_CJK_HORIZ  ( 1U << 1 )       /* must have value 2 */
    +#define AF_BLUE_PROPERTY_LATIN_CAPITAL_BOTTOM  ( 1U << 5 )
    +#define AF_BLUE_PROPERTY_LATIN_SMALL_BOTTOM    ( 1U << 6 )
    +
    +#define AF_BLUE_PROPERTY_CJK_TOP    ( 1U << 0 )         /* must be value 1 */
    +#define AF_BLUE_PROPERTY_CJK_HORIZ  ( 1U << 1 )         /* must be value 2 */
     #define AF_BLUE_PROPERTY_CJK_RIGHT  AF_BLUE_PROPERTY_CJK_TOP
     
     
    diff --git a/src/java.desktop/share/native/libfreetype/src/autofit/afblue.hin b/src/java.desktop/share/native/libfreetype/src/autofit/afblue.hin
    index 38031505a85..dbac14548d5 100644
    --- a/src/java.desktop/share/native/libfreetype/src/autofit/afblue.hin
    +++ b/src/java.desktop/share/native/libfreetype/src/autofit/afblue.hin
    @@ -4,7 +4,7 @@
      *
      *   Auto-fitter data for blue strings (specification).
      *
    - * Copyright (C) 2013-2024 by
    + * Copyright (C) 2013-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -99,14 +99,17 @@ FT_BEGIN_HEADER
       /* Properties are specific to a writing system.  We assume that a given  */
       /* blue string can't be used in more than a single writing system, which */
       /* is a safe bet.                                                        */
    -#define AF_BLUE_PROPERTY_LATIN_TOP       ( 1U << 0 )  /* must have value 1 */
    +#define AF_BLUE_PROPERTY_LATIN_TOP       ( 1U << 0 )    /* must be value 1 */
     #define AF_BLUE_PROPERTY_LATIN_SUB_TOP   ( 1U << 1 )
     #define AF_BLUE_PROPERTY_LATIN_NEUTRAL   ( 1U << 2 )
     #define AF_BLUE_PROPERTY_LATIN_X_HEIGHT  ( 1U << 3 )
     #define AF_BLUE_PROPERTY_LATIN_LONG      ( 1U << 4 )
     
    -#define AF_BLUE_PROPERTY_CJK_TOP    ( 1U << 0 )       /* must have value 1 */
    -#define AF_BLUE_PROPERTY_CJK_HORIZ  ( 1U << 1 )       /* must have value 2 */
    +#define AF_BLUE_PROPERTY_LATIN_CAPITAL_BOTTOM  ( 1U << 5 )
    +#define AF_BLUE_PROPERTY_LATIN_SMALL_BOTTOM    ( 1U << 6 )
    +
    +#define AF_BLUE_PROPERTY_CJK_TOP    ( 1U << 0 )         /* must be value 1 */
    +#define AF_BLUE_PROPERTY_CJK_HORIZ  ( 1U << 1 )         /* must be value 2 */
     #define AF_BLUE_PROPERTY_CJK_RIGHT  AF_BLUE_PROPERTY_CJK_TOP
     
     
    diff --git a/src/java.desktop/share/native/libfreetype/src/autofit/afcjk.c b/src/java.desktop/share/native/libfreetype/src/autofit/afcjk.c
    index 869b60487c2..7086601838c 100644
    --- a/src/java.desktop/share/native/libfreetype/src/autofit/afcjk.c
    +++ b/src/java.desktop/share/native/libfreetype/src/autofit/afcjk.c
    @@ -4,7 +4,7 @@
      *
      *   Auto-fitter hinting routines for CJK writing system (body).
      *
    - * Copyright (C) 2006-2024 by
    + * Copyright (C) 2006-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -90,12 +90,8 @@
     
           /* If HarfBuzz is not available, we need a pointer to a single */
           /* unsigned long value.                                        */
    -#ifdef FT_CONFIG_OPTION_USE_HARFBUZZ
    -      void*     shaper_buf;
    -#else
           FT_ULong  shaper_buf_;
           void*     shaper_buf = &shaper_buf_;
    -#endif
     
           const char*  p;
     
    @@ -105,9 +101,8 @@
     
           p = script_class->standard_charstring;
     
    -#ifdef FT_CONFIG_OPTION_USE_HARFBUZZ
    -      shaper_buf = af_shaper_buf_create( face );
    -#endif
    +      if ( ft_hb_enabled( metrics->root.globals ) )
    +        shaper_buf = af_shaper_buf_create( metrics->root.globals );
     
           /* We check a list of standard characters.  The first match wins. */
     
    @@ -144,7 +139,7 @@
               break;
           }
     
    -      af_shaper_buf_destroy( face, shaper_buf );
    +      af_shaper_buf_destroy( metrics->root.globals, shaper_buf );
     
           if ( !glyph_index )
             goto Exit;
    @@ -152,7 +147,7 @@
           if ( !glyph_index )
             goto Exit;
     
    -      FT_TRACE5(( "standard character: U+%04lX (glyph index %ld)\n",
    +      FT_TRACE5(( "standard character: U+%04lX (glyph index %lu)\n",
                       ch, glyph_index ));
     
           error = FT_Load_Glyph( face, glyph_index, FT_LOAD_NO_SCALE );
    @@ -297,12 +292,8 @@
     
         /* If HarfBuzz is not available, we need a pointer to a single */
         /* unsigned long value.                                        */
    -#ifdef FT_CONFIG_OPTION_USE_HARFBUZZ
    -    void*     shaper_buf;
    -#else
         FT_ULong  shaper_buf_;
         void*     shaper_buf = &shaper_buf_;
    -#endif
     
     
         /* we walk over the blue character strings as specified in the   */
    @@ -313,9 +304,8 @@
         FT_TRACE5(( "==========================\n" ));
         FT_TRACE5(( "\n" ));
     
    -#ifdef FT_CONFIG_OPTION_USE_HARFBUZZ
    -    shaper_buf = af_shaper_buf_create( face );
    -#endif
    +    if ( ft_hb_enabled( metrics->root.globals ) )
    +      shaper_buf = af_shaper_buf_create( metrics->root.globals );
     
         for ( ; bs->string != AF_BLUE_STRING_MAX; bs++ )
         {
    @@ -340,7 +330,7 @@
             };
     
     
    -        FT_TRACE5(( "blue zone %d (%s):\n",
    +        FT_TRACE5(( "blue zone %u (%s):\n",
                         axis->blue_count,
                         cjk_blue_name[AF_CJK_IS_HORIZ_BLUE( bs ) |
                                       AF_CJK_IS_TOP_BLUE( bs )   ] ));
    @@ -553,7 +543,7 @@
     
         } /* end for loop */
     
    -    af_shaper_buf_destroy( face, shaper_buf );
    +    af_shaper_buf_destroy( metrics->root.globals, shaper_buf );
     
         FT_TRACE5(( "\n" ));
     
    @@ -572,23 +562,20 @@
     
         /* If HarfBuzz is not available, we need a pointer to a single */
         /* unsigned long value.                                        */
    -#ifdef FT_CONFIG_OPTION_USE_HARFBUZZ
    -    void*     shaper_buf;
    -#else
         FT_ULong  shaper_buf_;
         void*     shaper_buf = &shaper_buf_;
    -#endif
     
         /* in all supported charmaps, digits have character codes 0x30-0x39 */
         const char   digits[] = "0 1 2 3 4 5 6 7 8 9";
         const char*  p;
     
    +    FT_UNUSED( face );
    +
     
         p = digits;
     
    -#ifdef FT_CONFIG_OPTION_USE_HARFBUZZ
    -    shaper_buf = af_shaper_buf_create( face );
    -#endif
    +    if ( ft_hb_enabled( metrics->root.globals ) )
    +      shaper_buf = af_shaper_buf_create( metrics->root.globals );
     
         while ( *p )
         {
    @@ -624,7 +611,7 @@
           }
         }
     
    -    af_shaper_buf_destroy( face, shaper_buf );
    +    af_shaper_buf_destroy( metrics->root.globals, shaper_buf );
     
         metrics->root.digits_have_same_width = same_width;
       }
    @@ -710,7 +697,7 @@
             FT_Pos  delta1, delta2;
     
     
    -        blue->ref.fit  = FT_PIX_ROUND( blue->ref.cur );
    +        blue->ref.fit = FT_PIX_ROUND( blue->ref.cur );
     
             /* shoot is under shoot for cjk */
             delta1 = FT_DivFix( blue->ref.fit, scale ) - blue->shoot.org;
    @@ -736,7 +723,7 @@
     
             blue->shoot.fit = blue->ref.fit - delta2;
     
    -        FT_TRACE5(( ">> active cjk blue zone %c%d[%ld/%ld]:\n",
    +        FT_TRACE5(( ">> active cjk blue zone %c%u[%ld/%ld]:\n",
                         ( dim == AF_DIMENSION_HORZ ) ? 'H' : 'V',
                         nn, blue->ref.org, blue->shoot.org ));
             FT_TRACE5(( "     ref:   cur=%.2f fit=%.2f\n",
    @@ -1378,7 +1365,7 @@
       }
     
     
    -  /* Initalize hinting engine. */
    +  /* Initialize hinting engine. */
     
       FT_LOCAL_DEF( FT_Error )
       af_cjk_hints_init( AF_GlyphHints    hints,
    @@ -2185,7 +2172,7 @@
       af_cjk_align_edge_points( AF_GlyphHints  hints,
                                 AF_Dimension   dim )
       {
    -    AF_AxisHints  axis       = & hints->axis[dim];
    +    AF_AxisHints  axis       = &hints->axis[dim];
         AF_Edge       edges      = axis->edges;
         AF_Edge       edge_limit = FT_OFFSET( edges, axis->num_edges );
         AF_Edge       edge;
    diff --git a/src/java.desktop/share/native/libfreetype/src/autofit/afcjk.h b/src/java.desktop/share/native/libfreetype/src/autofit/afcjk.h
    index bc5aaf12e6e..bd1b39358e0 100644
    --- a/src/java.desktop/share/native/libfreetype/src/autofit/afcjk.h
    +++ b/src/java.desktop/share/native/libfreetype/src/autofit/afcjk.h
    @@ -4,7 +4,7 @@
      *
      *   Auto-fitter hinting routines for CJK writing system (specification).
      *
    - * Copyright (C) 2006-2024 by
    + * Copyright (C) 2006-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/autofit/afcover.h b/src/java.desktop/share/native/libfreetype/src/autofit/afcover.h
    index 7980cf2e979..b93bcd1a2c5 100644
    --- a/src/java.desktop/share/native/libfreetype/src/autofit/afcover.h
    +++ b/src/java.desktop/share/native/libfreetype/src/autofit/afcover.h
    @@ -4,7 +4,7 @@
      *
      *   Auto-fitter coverages (specification only).
      *
    - * Copyright (C) 2013-2024 by
    + * Copyright (C) 2013-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/autofit/afdummy.c b/src/java.desktop/share/native/libfreetype/src/autofit/afdummy.c
    index ad667d2edc7..8613544f913 100644
    --- a/src/java.desktop/share/native/libfreetype/src/autofit/afdummy.c
    +++ b/src/java.desktop/share/native/libfreetype/src/autofit/afdummy.c
    @@ -5,7 +5,7 @@
      *   Auto-fitter dummy routines to be used if no hinting should be
      *   performed (body).
      *
    - * Copyright (C) 2003-2024 by
    + * Copyright (C) 2003-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/autofit/afdummy.h b/src/java.desktop/share/native/libfreetype/src/autofit/afdummy.h
    index 613c2f88a38..78a79439d95 100644
    --- a/src/java.desktop/share/native/libfreetype/src/autofit/afdummy.h
    +++ b/src/java.desktop/share/native/libfreetype/src/autofit/afdummy.h
    @@ -5,7 +5,7 @@
      *   Auto-fitter dummy routines to be used if no hinting should be
      *   performed (specification).
      *
    - * Copyright (C) 2003-2024 by
    + * Copyright (C) 2003-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/autofit/aferrors.h b/src/java.desktop/share/native/libfreetype/src/autofit/aferrors.h
    index ae584ff06db..f3093fc90df 100644
    --- a/src/java.desktop/share/native/libfreetype/src/autofit/aferrors.h
    +++ b/src/java.desktop/share/native/libfreetype/src/autofit/aferrors.h
    @@ -4,7 +4,7 @@
      *
      *   Autofitter error codes (specification only).
      *
    - * Copyright (C) 2005-2024 by
    + * Copyright (C) 2005-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/autofit/afglobal.c b/src/java.desktop/share/native/libfreetype/src/autofit/afglobal.c
    index b7403fa65e1..e74d8141161 100644
    --- a/src/java.desktop/share/native/libfreetype/src/autofit/afglobal.c
    +++ b/src/java.desktop/share/native/libfreetype/src/autofit/afglobal.c
    @@ -4,7 +4,7 @@
      *
      *   Auto-fitter routines to compute global hinting values (body).
      *
    - * Copyright (C) 2003-2024 by
    + * Copyright (C) 2003-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -22,6 +22,11 @@
     #include "afws-decl.h"
     #include 
     
    +#ifdef FT_CONFIG_OPTION_USE_HARFBUZZ
    +#  include "afgsub.h"
    +#  include "ft-hb-ft.h"
    +#endif
    +
     
       /**************************************************************************
        *
    @@ -184,7 +189,7 @@
               if ( gindex != 0                                                &&
                    gindex < globals->glyph_count                              &&
                    ( gstyles[gindex] & AF_STYLE_MASK ) == AF_STYLE_UNASSIGNED )
    -            gstyles[gindex] = ss;
    +            gstyles[gindex] = ss | AF_HAS_CMAP_ENTRY;
     
               for (;;)
               {
    @@ -195,7 +200,7 @@
     
                 if ( gindex < globals->glyph_count                              &&
                      ( gstyles[gindex] & AF_STYLE_MASK ) == AF_STYLE_UNASSIGNED )
    -              gstyles[gindex] = ss;
    +              gstyles[gindex] = ss | AF_HAS_CMAP_ENTRY;
               }
             }
     
    @@ -301,7 +306,7 @@
               if ( !( count % 10 ) )
                 FT_TRACE4(( " " ));
     
    -          FT_TRACE4(( " %d", idx ));
    +          FT_TRACE4(( " %u", idx ));
               count++;
     
               if ( !( count % 10 ) )
    @@ -356,8 +361,21 @@
         globals->scale_down_factor         = 0;
     
     #ifdef FT_CONFIG_OPTION_USE_HARFBUZZ
    -    globals->hb_font = hb_ft_font_create_( face, NULL );
    -    globals->hb_buf  = hb_buffer_create();
    +    if ( ft_hb_enabled ( globals ) )
    +    {
    +      globals->hb_font = ft_hb_ft_font_create( globals );
    +      globals->hb_buf  = hb( buffer_create )();
    +
    +      af_parse_gsub( globals );
    +    }
    +    else
    +    {
    +      globals->hb_font = NULL;
    +      globals->hb_buf  = NULL;
    +
    +      globals->gsub                          = NULL;
    +      globals->gsub_lookups_single_alternate = NULL;
    +    }
     #endif
     
         error = af_face_globals_compute_style_coverage( globals );
    @@ -405,8 +423,14 @@
           }
     
     #ifdef FT_CONFIG_OPTION_USE_HARFBUZZ
    -      hb_font_destroy( globals->hb_font );
    -      hb_buffer_destroy( globals->hb_buf );
    +      if ( ft_hb_enabled ( globals ) )
    +      {
    +        hb( font_destroy )( globals->hb_font );
    +        hb( buffer_destroy )( globals->hb_buf );
    +
    +        FT_FREE( globals->gsub );
    +        FT_FREE( globals->gsub_lookups_single_alternate );
    +      }
     #endif
     
           /* no need to free `globals->glyph_styles'; */
    diff --git a/src/java.desktop/share/native/libfreetype/src/autofit/afglobal.h b/src/java.desktop/share/native/libfreetype/src/autofit/afglobal.h
    index ddb54c89b27..dc061159492 100644
    --- a/src/java.desktop/share/native/libfreetype/src/autofit/afglobal.h
    +++ b/src/java.desktop/share/native/libfreetype/src/autofit/afglobal.h
    @@ -5,7 +5,7 @@
      *   Auto-fitter routines to compute global hinting values
      *   (specification).
      *
    - * Copyright (C) 2003-2024 by
    + * Copyright (C) 2003-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -73,15 +73,17 @@ FT_BEGIN_HEADER
       /* default script for OpenType; ignored if HarfBuzz isn't used */
     #define AF_SCRIPT_DEFAULT    AF_SCRIPT_LATN
     
    -  /* a bit mask for AF_DIGIT and AF_NONBASE */
    -#define AF_STYLE_MASK        0x3FFF
    +  /* a bit mask for AF_DIGIT, AF_NONBASE, and AF_HAS_CMAP_ENTRY */
    +#define AF_STYLE_MASK        0x1FFF
       /* an uncovered glyph      */
     #define AF_STYLE_UNASSIGNED  AF_STYLE_MASK
     
    -  /* if this flag is set, we have an ASCII digit   */
    +  /* if this flag is set, we have an ASCII digit */
     #define AF_DIGIT             0x8000U
       /* if this flag is set, we have a non-base character */
     #define AF_NONBASE           0x4000U
    +  /* if this flag is set, the glyph has a (direct) cmap entry */
    +#define AF_HAS_CMAP_ENTRY    0x2000U
     
       /* `increase-x-height' property */
     #define AF_PROP_INCREASE_X_HEIGHT_MIN  6
    @@ -111,6 +113,13 @@ FT_BEGIN_HEADER
     #ifdef FT_CONFIG_OPTION_USE_HARFBUZZ
         hb_font_t*       hb_font;
         hb_buffer_t*     hb_buf;           /* for feature comparison */
    +
    +    /* The GSUB table. */
    +    FT_Byte*         gsub;
    +    /* An array of lookup offsets (of `gsub_lookup_count` elements), */
    +    /* with only SingleSubst and AlternateSubst lookups non-NULL.    */
    +    FT_UShort        gsub_lookup_count;
    +    FT_UInt32*       gsub_lookups_single_alternate;
     #endif
     
         /* per-face auto-hinter properties */
    diff --git a/src/java.desktop/share/native/libfreetype/src/autofit/afhints.c b/src/java.desktop/share/native/libfreetype/src/autofit/afhints.c
    index 96ffe343aa4..11faa655f62 100644
    --- a/src/java.desktop/share/native/libfreetype/src/autofit/afhints.c
    +++ b/src/java.desktop/share/native/libfreetype/src/autofit/afhints.c
    @@ -4,7 +4,7 @@
      *
      *   Auto-fitter hinting routines (body).
      *
    - * Copyright (C) 2003-2024 by
    + * Copyright (C) 2003-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -840,6 +840,10 @@
     
         if ( hints->contours != hints->embedded.contours )
           FT_FREE( hints->contours );
    +    if ( hints->contour_y_minima != hints->embedded.contour_y_minima )
    +      FT_FREE( hints->contour_y_minima );
    +    if ( hints->contour_y_maxima != hints->embedded.contour_y_maxima )
    +      FT_FREE( hints->contour_y_maxima );
         hints->max_contours = 0;
         hints->num_contours = 0;
     
    @@ -896,19 +900,30 @@
         {
           if ( !hints->contours )
           {
    -        hints->contours     = hints->embedded.contours;
    +        hints->contours         = hints->embedded.contours;
    +        hints->contour_y_minima = hints->embedded.contour_y_minima;
    +        hints->contour_y_maxima = hints->embedded.contour_y_maxima;
    +
             hints->max_contours = AF_CONTOURS_EMBEDDED;
           }
         }
         else if ( new_max > old_max )
         {
           if ( hints->contours == hints->embedded.contours )
    -        hints->contours = NULL;
    +      {
    +        hints->contours         = NULL;
    +        hints->contour_y_minima = NULL;
    +        hints->contour_y_maxima = NULL;
    +      }
     
           new_max = ( new_max + 3 ) & ~3; /* round up to a multiple of 4 */
     
           if ( FT_RENEW_ARRAY( hints->contours, old_max, new_max ) )
             goto Exit;
    +      if ( FT_RENEW_ARRAY( hints->contour_y_minima, old_max, new_max ) )
    +        goto Exit;
    +      if ( FT_RENEW_ARRAY( hints->contour_y_maxima, old_max, new_max ) )
    +        goto Exit;
     
           hints->max_contours = new_max;
         }
    @@ -1324,7 +1339,7 @@
       af_glyph_hints_align_edge_points( AF_GlyphHints  hints,
                                         AF_Dimension   dim )
       {
    -    AF_AxisHints  axis          = & hints->axis[dim];
    +    AF_AxisHints  axis          = &hints->axis[dim];
         AF_Segment    segments      = axis->segments;
         AF_Segment    segment_limit = FT_OFFSET( segments, axis->num_segments );
         AF_Segment    seg;
    diff --git a/src/java.desktop/share/native/libfreetype/src/autofit/afhints.h b/src/java.desktop/share/native/libfreetype/src/autofit/afhints.h
    index 76fe83006a5..46b3ed3366f 100644
    --- a/src/java.desktop/share/native/libfreetype/src/autofit/afhints.h
    +++ b/src/java.desktop/share/native/libfreetype/src/autofit/afhints.h
    @@ -4,7 +4,7 @@
      *
      *   Auto-fitter hinting routines (specification).
      *
    - * Copyright (C) 2003-2024 by
    + * Copyright (C) 2003-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -222,6 +222,9 @@ FT_BEGIN_HEADER
       /* the distance to the next point is very small */
     #define AF_FLAG_NEAR  ( 1U << 5 )
     
    +  /* prevent the auto-hinter from adding such a point to a segment */
    +#define AF_FLAG_IGNORE  ( 1U << 6 )
    +
     
       /* edge hint flags */
     #define AF_EDGE_NORMAL  0
    @@ -229,6 +232,7 @@ FT_BEGIN_HEADER
     #define AF_EDGE_SERIF    ( 1U << 1 )
     #define AF_EDGE_DONE     ( 1U << 2 )
     #define AF_EDGE_NEUTRAL  ( 1U << 3 ) /* edge aligns to a neutral blue zone */
    +#define AF_EDGE_NO_BLUE  ( 1U << 4 ) /* do not align edge to blue zone     */
     
     
       typedef struct AF_PointRec_*    AF_Point;
    @@ -303,6 +307,7 @@ FT_BEGIN_HEADER
     
       } AF_EdgeRec;
     
    +
     #define AF_SEGMENTS_EMBEDDED  18   /* number of embedded segments   */
     #define AF_EDGES_EMBEDDED     12   /* number of embedded edges      */
     
    @@ -346,9 +351,11 @@ FT_BEGIN_HEADER
         FT_Int           num_points;    /* number of used points      */
         AF_Point         points;        /* points array               */
     
    -    FT_Int           max_contours;  /* number of allocated contours */
    -    FT_Int           num_contours;  /* number of used contours      */
    -    AF_Point*        contours;      /* contours array               */
    +    FT_Int           max_contours;     /* number of allocated contours    */
    +    FT_Int           num_contours;     /* number of used contours         */
    +    AF_Point*        contours;         /* contours array                  */
    +    FT_Pos*          contour_y_minima; /* array with y maxima of contours */
    +    FT_Pos*          contour_y_maxima; /* array with y minima of contours */
     
         AF_AxisHintsRec  axis[AF_DIMENSION_MAX];
     
    @@ -357,11 +364,13 @@ FT_BEGIN_HEADER
                                         /* implementations         */
         AF_StyleMetrics  metrics;
     
    -    /* Two arrays to avoid allocation penalty.            */
    +    /* Some arrays to avoid allocation penalty.           */
         /* The `embedded' structure must be the last element! */
         struct
         {
           AF_Point       contours[AF_CONTOURS_EMBEDDED];
    +      FT_Pos         contour_y_minima[AF_CONTOURS_EMBEDDED];
    +      FT_Pos         contour_y_maxima[AF_CONTOURS_EMBEDDED];
           AF_PointRec    points[AF_POINTS_EMBEDDED];
         } embedded;
     
    diff --git a/src/java.desktop/share/native/libfreetype/src/autofit/afindic.c b/src/java.desktop/share/native/libfreetype/src/autofit/afindic.c
    index c6d23efd86f..a2cd14f8817 100644
    --- a/src/java.desktop/share/native/libfreetype/src/autofit/afindic.c
    +++ b/src/java.desktop/share/native/libfreetype/src/autofit/afindic.c
    @@ -4,7 +4,7 @@
      *
      *   Auto-fitter hinting routines for Indic writing system (body).
      *
    - * Copyright (C) 2007-2024 by
    + * Copyright (C) 2007-2025 by
      * Rahul Bhalerao , .
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/autofit/afindic.h b/src/java.desktop/share/native/libfreetype/src/autofit/afindic.h
    index a7f73f25153..a2e825e9f86 100644
    --- a/src/java.desktop/share/native/libfreetype/src/autofit/afindic.h
    +++ b/src/java.desktop/share/native/libfreetype/src/autofit/afindic.h
    @@ -5,7 +5,7 @@
      *   Auto-fitter hinting routines for Indic writing system
      *   (specification).
      *
    - * Copyright (C) 2007-2024 by
    + * Copyright (C) 2007-2025 by
      * Rahul Bhalerao , .
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/autofit/aflatin.c b/src/java.desktop/share/native/libfreetype/src/autofit/aflatin.c
    index 89287f7ea5a..4a42d919474 100644
    --- a/src/java.desktop/share/native/libfreetype/src/autofit/aflatin.c
    +++ b/src/java.desktop/share/native/libfreetype/src/autofit/aflatin.c
    @@ -4,7 +4,7 @@
      *
      *   Auto-fitter hinting routines for latin writing system (body).
      *
    - * Copyright (C) 2003-2024 by
    + * Copyright (C) 2003-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -22,6 +22,7 @@
     #include "afglobal.h"
     #include "aflatin.h"
     #include "aferrors.h"
    +#include "afadjust.h"
     
     
       /**************************************************************************
    @@ -81,12 +82,8 @@
     
           /* If HarfBuzz is not available, we need a pointer to a single */
           /* unsigned long value.                                        */
    -#ifdef FT_CONFIG_OPTION_USE_HARFBUZZ
    -      void*     shaper_buf;
    -#else
           FT_ULong  shaper_buf_;
           void*     shaper_buf = &shaper_buf_;
    -#endif
     
           const char*  p;
     
    @@ -97,9 +94,9 @@
     
           p = script_class->standard_charstring;
     
    -#ifdef FT_CONFIG_OPTION_USE_HARFBUZZ
    -      shaper_buf = af_shaper_buf_create( face );
    -#endif
    +      if ( ft_hb_enabled ( metrics->root.globals ) )
    +        shaper_buf = af_shaper_buf_create( metrics->root.globals );
    +
           /*
            * We check a list of standard characters to catch features like
            * `c2sc' (small caps from caps) that don't contain lowercase letters
    @@ -140,7 +137,7 @@
               break;
           }
     
    -      af_shaper_buf_destroy( face, shaper_buf );
    +      af_shaper_buf_destroy( metrics->root.globals, shaper_buf );
     
           if ( !glyph_index )
           {
    @@ -149,7 +146,7 @@
             goto Exit;
           }
     
    -      FT_TRACE5(( "standard character: U+%04lX (glyph index %ld)\n",
    +      FT_TRACE5(( "standard character: U+%04lX (glyph index %lu)\n",
                       ch, glyph_index ));
     
           error = FT_Load_Glyph( face, glyph_index, FT_LOAD_NO_SCALE );
    @@ -334,12 +331,8 @@
     
         /* If HarfBuzz is not available, we need a pointer to a single */
         /* unsigned long value.                                        */
    -#ifdef FT_CONFIG_OPTION_USE_HARFBUZZ
    -    void*     shaper_buf;
    -#else
         FT_ULong  shaper_buf_;
         void*     shaper_buf = &shaper_buf_;
    -#endif
     
     
         /* we walk over the blue character strings as specified in the */
    @@ -349,9 +342,8 @@
         FT_TRACE5(( "============================\n" ));
         FT_TRACE5(( "\n" ));
     
    -#ifdef FT_CONFIG_OPTION_USE_HARFBUZZ
    -    shaper_buf = af_shaper_buf_create( face );
    -#endif
    +    if ( ft_hb_enabled ( metrics->root.globals ) )
    +      shaper_buf = af_shaper_buf_create( metrics->root.globals );
     
         for ( ; bs->string != AF_BLUE_STRING_MAX; bs++ )
         {
    @@ -367,7 +359,7 @@
             FT_Bool  have_flag = 0;
     
     
    -        FT_TRACE5(( "blue zone %d", axis->blue_count ));
    +        FT_TRACE5(( "blue zone %u", axis->blue_count ));
     
             if ( bs->properties )
             {
    @@ -407,6 +399,20 @@
                 FT_TRACE5(( "long" ));
               }
     
    +          if ( AF_LATIN_IS_CAPITAL_BOTTOM_BLUE( bs ) )
    +          {
    +            if ( have_flag )
    +              FT_TRACE5(( ", " ));
    +            FT_TRACE5(( "capital bottom" ));
    +          }
    +
    +          if ( AF_LATIN_IS_SMALL_BOTTOM_BLUE( bs ) )
    +          {
    +            if ( have_flag )
    +              FT_TRACE5(( ", " ));
    +            FT_TRACE5(( "small bottom" ));
    +          }
    +
               FT_TRACE5(( ")" ));
             }
     
    @@ -454,9 +460,9 @@
             }
     
             if ( AF_LATIN_IS_TOP_BLUE( bs ) )
    -          best_y_extremum = FT_INT_MIN;
    +          best_y_extremum = FT_LONG_MIN;
             else
    -          best_y_extremum = FT_INT_MAX;
    +          best_y_extremum = FT_LONG_MAX;
     
             /* iterate over all glyph elements of the character cluster */
             /* and get the data of the `biggest' one                    */
    @@ -487,7 +493,7 @@
                 if ( num_idx == 1 )
                   FT_TRACE5(( "  U+%04lX contains no (usable) outlines\n", ch ));
                 else
    -              FT_TRACE5(( "  component %d of cluster starting with U+%04lX"
    +              FT_TRACE5(( "  component %u of cluster starting with U+%04lX"
                               " contains no (usable) outlines\n", i, ch ));
     #endif
                 continue;
    @@ -825,7 +831,7 @@
                 if ( num_idx == 1 )
                   FT_TRACE5(( "  U+%04lX: best_y = %5ld", ch, best_y ));
                 else
    -              FT_TRACE5(( "  component %d of cluster starting with U+%04lX:"
    +              FT_TRACE5(( "  component %u of cluster starting with U+%04lX:"
                               " best_y = %5ld", i, ch, best_y ));
     #endif
     
    @@ -879,8 +885,8 @@
     
             } /* end for loop */
     
    -        if ( !( best_y_extremum == FT_INT_MIN ||
    -                best_y_extremum == FT_INT_MAX ) )
    +        if ( !( best_y_extremum == FT_LONG_MIN ||
    +                best_y_extremum == FT_LONG_MAX ) )
             {
               if ( best_round )
                 rounds[num_rounds++] = best_y_extremum;
    @@ -959,6 +965,10 @@
             blue->flags |= AF_LATIN_BLUE_SUB_TOP;
           if ( AF_LATIN_IS_NEUTRAL_BLUE( bs ) )
             blue->flags |= AF_LATIN_BLUE_NEUTRAL;
    +      if ( AF_LATIN_IS_CAPITAL_BOTTOM_BLUE( bs ) )
    +        blue->flags |= AF_LATIN_BLUE_BOTTOM;
    +      if ( AF_LATIN_IS_SMALL_BOTTOM_BLUE( bs ) )
    +        blue->flags |= AF_LATIN_BLUE_BOTTOM_SMALL;
     
           /*
            * The following flag is used later to adjust the y and x scales
    @@ -973,7 +983,7 @@
     
         } /* end for loop */
     
    -    af_shaper_buf_destroy( face, shaper_buf );
    +    af_shaper_buf_destroy( metrics->root.globals, shaper_buf );
     
         if ( axis->blue_count )
         {
    @@ -1070,23 +1080,20 @@
     
         /* If HarfBuzz is not available, we need a pointer to a single */
         /* unsigned long value.                                        */
    -#ifdef FT_CONFIG_OPTION_USE_HARFBUZZ
    -    void*     shaper_buf;
    -#else
         FT_ULong  shaper_buf_;
         void*     shaper_buf = &shaper_buf_;
    -#endif
     
         /* in all supported charmaps, digits have character codes 0x30-0x39 */
         const char   digits[] = "0 1 2 3 4 5 6 7 8 9";
         const char*  p;
     
    +    FT_UNUSED( face );
    +
     
         p = digits;
     
    -#ifdef FT_CONFIG_OPTION_USE_HARFBUZZ
    -    shaper_buf = af_shaper_buf_create( face );
    -#endif
    +    if ( ft_hb_enabled ( metrics->root.globals ) )
    +      shaper_buf = af_shaper_buf_create( metrics->root.globals );
     
         while ( *p )
         {
    @@ -1122,7 +1129,7 @@
           }
         }
     
    -    af_shaper_buf_destroy( face, shaper_buf );
    +    af_shaper_buf_destroy( metrics->root.globals, shaper_buf );
     
         metrics->root.digits_have_same_width = same_width;
       }
    @@ -1155,6 +1162,9 @@
           af_latin_metrics_check_digits( metrics, face );
         }
     
    +    af_reverse_character_map_new( &metrics->root.reverse_charmap,
    +                                  &metrics->root );
    +
       Exit:
         face->charmap = oldmap;
         return error;
    @@ -1263,7 +1273,7 @@
                   max_height = FT_MAX( max_height, -Axis->blues[nn].descender );
                 }
     
    -            dist  = FT_MulFix( max_height, new_scale - scale );
    +            dist = FT_MulFix( max_height, new_scale - scale );
     
                 if ( -128 < dist && dist < 128 )
                 {
    @@ -1466,13 +1476,13 @@
             AF_LatinBlue  blue = &axis->blues[nn];
     
     
    -        FT_TRACE5(( "  reference %d: %ld scaled to %.2f%s\n",
    +        FT_TRACE5(( "  reference %u: %ld scaled to %.2f%s\n",
                         nn,
                         blue->ref.org,
                         (double)blue->ref.fit / 64,
                         ( blue->flags & AF_LATIN_BLUE_ACTIVE ) ? ""
                                                                : " (inactive)" ));
    -        FT_TRACE5(( "  overshoot %d: %ld scaled to %.2f%s\n",
    +        FT_TRACE5(( "  overshoot %u: %ld scaled to %.2f%s\n",
                         nn,
                         blue->shoot.org,
                         (double)blue->shoot.fit / 64,
    @@ -1484,6 +1494,17 @@
       }
     
     
    +  FT_CALLBACK_DEF( void )
    +  af_latin_metrics_done( AF_StyleMetrics  metrics_ )
    +  {
    +    AF_LatinMetrics  metrics = (AF_LatinMetrics)metrics_;
    +
    +
    +    af_reverse_character_map_done( metrics->root.reverse_charmap,
    +                                   metrics->root.globals->face->memory );
    +  }
    +
    +
       /* Scale global values in both directions. */
     
       FT_LOCAL_DEF( void )
    @@ -1617,7 +1638,8 @@
           FT_Pos     prev_max_on_coord = max_on_coord;
     
     
    -      if ( FT_ABS( last->out_dir )  == major_dir &&
    +      if ( !( point->flags & AF_FLAG_IGNORE )    &&
    +           FT_ABS( last->out_dir )  == major_dir &&
                FT_ABS( point->out_dir ) == major_dir )
           {
             /* we are already on an edge, try to locate its start */
    @@ -1676,13 +1698,17 @@
                   max_on_coord = v;
               }
     
    -          if ( point->out_dir != segment_dir || point == last )
    +          if ( point->flags & AF_FLAG_IGNORE ||
    +               point->out_dir != segment_dir ||
    +               point == last )
               {
                 /* check whether the new segment's start point is identical to */
                 /* the previous segment's end point; for example, this might   */
                 /* happen for spikes                                           */
     
    -            if ( !prev_segment || segment->first != prev_segment->last )
    +            if ( point->flags & AF_FLAG_IGNORE        ||
    +                 !prev_segment                        ||
    +                 segment->first != prev_segment->last )
                 {
                   /* points are different: we are just leaving an edge, thus */
                   /* record a new segment                                    */
    @@ -1842,7 +1868,8 @@
             /* if we are not on an edge, check whether the major direction */
             /* coincides with the current point's `out' direction, or      */
             /* whether we have a single-point contour                      */
    -        if ( !on_edge                                  &&
    +        if ( !( point->flags & AF_FLAG_IGNORE )        &&
    +             !on_edge                                  &&
                  ( FT_ABS( point->out_dir ) == major_dir ||
                    point == point->prev                  ) )
             {
    @@ -2521,6 +2548,9 @@
           FT_Pos    best_dist;                 /* initial threshold */
     
     
    +      if ( edge->flags & AF_EDGE_NO_BLUE )
    +        continue;
    +
           /* compute the initial threshold as a fraction of the EM size */
           /* (the value 40 is heuristic)                                */
           best_dist = FT_MulFix( metrics->units_per_em / 40, scale );
    @@ -2610,7 +2640,7 @@
       }
     
     
    -  /* Initalize hinting engine. */
    +  /* Initialize hinting engine. */
     
       static FT_Error
       af_latin_hints_init( AF_GlyphHints    hints,
    @@ -2737,6 +2767,1198 @@
       }
     
     
    +#undef  FT_COMPONENT
    +#define FT_COMPONENT  afadjust
    +
    +
    +  static void
    +  af_move_contour_vertically( AF_Point  contour,
    +                              FT_Int    movement )
    +  {
    +    AF_Point  point       = contour;
    +    AF_Point  first_point = point;
    +
    +
    +    if ( point )
    +    {
    +      do
    +      {
    +        point->y += movement;
    +        point     = point->next;
    +
    +      } while ( point != first_point );
    +    }
    +  }
    +
    +
    +  /* Move all contours higher than `limit` by `delta`. */
    +  static void
    +  af_move_contours_up( AF_GlyphHints  hints,
    +                       FT_Pos         limit,
    +                       FT_Pos         delta )
    +  {
    +    FT_Int  contour;
    +
    +
    +    for ( contour = 0; contour < hints->num_contours; contour++ )
    +    {
    +      FT_Pos  min_y = hints->contour_y_minima[contour];
    +      FT_Pos  max_y = hints->contour_y_maxima[contour];
    +
    +
    +      if ( min_y < max_y &&
    +           min_y > limit )
    +        af_move_contour_vertically( hints->contours[contour],
    +                                    delta );
    +    }
    +  }
    +
    +
    +  static void
    +  af_move_contours_down( AF_GlyphHints  hints,
    +                         FT_Pos         limit,
    +                         FT_Pos         delta )
    +  {
    +    FT_Int  contour;
    +
    +
    +    for ( contour = 0; contour < hints->num_contours; contour++ )
    +    {
    +      FT_Pos  min_y = hints->contour_y_minima[contour];
    +      FT_Pos  max_y = hints->contour_y_maxima[contour];
    +
    +
    +      if ( min_y < max_y &&
    +           max_y < limit )
    +        af_move_contour_vertically( hints->contours[contour],
    +                                    -delta );
    +    }
    +  }
    +
    +
    +  /* Compute vertical extrema of all contours and store them in the */
    +  /* `contour_y_minima` and `contour_y_maxima` arrays of `hints`.   */
    +  static void
    +  af_compute_vertical_extrema( AF_GlyphHints  hints )
    +  {
    +    FT_Int  contour;
    +
    +
    +    for ( contour = 0; contour < hints->num_contours; contour++ )
    +    {
    +      FT_Pos  min_y = FT_LONG_MAX;
    +      FT_Pos  max_y = FT_LONG_MIN;
    +
    +      AF_Point  first_point = hints->contours[contour];
    +      AF_Point  point       = first_point;
    +
    +
    +      if ( !first_point || first_point->next->next == first_point )
    +        goto End_loop;
    +
    +      do
    +      {
    +        if ( point->y < min_y )
    +          min_y = point->y;
    +        if ( point->y > max_y )
    +          max_y = point->y;
    +
    +        point = point->next;
    +
    +      } while ( point != first_point );
    +
    +    End_loop:
    +      hints->contour_y_minima[contour] = min_y;
    +      hints->contour_y_maxima[contour] = max_y;
    +    }
    +  }
    +
    +
    +  static FT_Int
    +  af_find_highest_contour( AF_GlyphHints  hints )
    +  {
    +    FT_Int  highest_contour = 0;
    +    FT_Pos  highest_min_y   = FT_LONG_MAX;
    +    FT_Pos  highest_max_y   = FT_LONG_MIN;
    +
    +    FT_Int  contour;
    +
    +
    +    /* At this point we have one 'lower' (usually the base glyph)   */
    +    /* and one 'upper' object (usually the diacritic glyph).  If    */
    +    /* there are more contours, they must be enclosed within either */
    +    /* 'lower' or 'upper'.  To find this enclosing 'upper' contour  */
    +    /* it is thus sufficient to search for the contour with the     */
    +    /* highest y maximum value.                                     */
    +    for ( contour = 0; contour < hints->num_contours; contour++ )
    +    {
    +      FT_Pos  current_min_y = hints->contour_y_minima[contour];
    +      FT_Pos  current_max_y = hints->contour_y_maxima[contour];
    +
    +
    +      /* If we have two contours with the same maximum value, take */
    +      /* the one that has a smaller height.                        */
    +      if ( current_max_y > highest_max_y      ||
    +           ( current_max_y == highest_max_y &&
    +             current_min_y > highest_min_y  ) )
    +      {
    +        highest_min_y   = current_min_y;
    +        highest_max_y   = current_max_y;
    +        highest_contour = contour;
    +      }
    +    }
    +
    +    return highest_contour;
    +  }
    +
    +
    +  static FT_Int
    +  af_find_second_highest_contour( AF_GlyphHints  hints )
    +  {
    +    FT_Int  highest_contour;
    +    FT_Pos  highest_min_y;
    +
    +    FT_Int  second_highest_contour = 0;
    +    FT_Pos  second_highest_max_y   = FT_LONG_MIN;
    +
    +    FT_Int  contour;
    +
    +
    +    if ( hints->num_contours < 3 )
    +      return 0;
    +
    +    highest_contour = af_find_highest_contour( hints );
    +    highest_min_y   = hints->contour_y_minima[highest_contour];
    +
    +    /* Search the contour with the largest vertical maximum that has a */
    +    /* vertical minimum lower than the vertical minimum of the topmost */
    +    /* contour.                                                        */
    +    for ( contour = 0; contour < hints->num_contours; contour++ )
    +    {
    +      FT_Pos  current_min_y;
    +      FT_Pos  current_max_y;
    +
    +
    +      if ( contour == highest_contour )
    +        continue;
    +
    +      current_min_y = hints->contour_y_minima[contour];
    +      current_max_y = hints->contour_y_maxima[contour];
    +
    +      if ( current_max_y > second_highest_max_y &&
    +           current_min_y < highest_min_y        )
    +      {
    +        second_highest_max_y   = current_max_y;
    +        second_highest_contour = contour;
    +      }
    +    }
    +
    +    return second_highest_contour;
    +  }
    +
    +
    +  static FT_Int
    +  af_find_lowest_contour( AF_GlyphHints  hints )
    +  {
    +    FT_Int  lowest_contour = 0;
    +    FT_Pos  lowest_min_y   = FT_LONG_MAX;
    +    FT_Pos  lowest_max_y   = FT_LONG_MIN;
    +
    +    FT_Int  contour;
    +
    +
    +    for ( contour = 0; contour < hints->num_contours; contour++ )
    +    {
    +      FT_Pos  current_min_y = hints->contour_y_minima[contour];
    +      FT_Pos  current_max_y = hints->contour_y_maxima[contour];
    +
    +
    +      if ( current_min_y < lowest_min_y      ||
    +           ( current_min_y == lowest_min_y &&
    +             current_max_y < lowest_max_y  ) )
    +      {
    +        lowest_min_y   = current_min_y;
    +        lowest_max_y   = current_max_y;
    +        lowest_contour = contour;
    +      }
    +    }
    +
    +    return lowest_contour;
    +  }
    +
    +
    +  static FT_Int
    +  af_find_second_lowest_contour( AF_GlyphHints  hints )
    +  {
    +    FT_Int  lowest_contour;
    +    FT_Pos  lowest_max_y;
    +
    +    FT_Int  second_lowest_contour = 0;
    +    FT_Pos  second_lowest_min_y   = FT_LONG_MAX;
    +
    +    FT_Int  contour;
    +
    +
    +    if ( hints->num_contours < 3 )
    +      return 0;
    +
    +    lowest_contour = af_find_lowest_contour( hints );
    +    lowest_max_y   = hints->contour_y_maxima[lowest_contour];
    +
    +    for ( contour = 0; contour < hints->num_contours; contour++ )
    +    {
    +      FT_Pos  current_min_y;
    +      FT_Pos  current_max_y;
    +
    +
    +      if ( contour == lowest_contour )
    +        continue;
    +
    +      current_min_y = hints->contour_y_minima[contour];
    +      current_max_y = hints->contour_y_maxima[contour];
    +
    +      if ( current_min_y < second_lowest_min_y &&
    +           current_max_y > lowest_max_y        )
    +      {
    +        second_lowest_min_y   = current_min_y;
    +        second_lowest_contour = contour;
    +      }
    +    }
    +
    +    return second_lowest_contour;
    +  }
    +
    +
    +  /* While aligning edges to blue zones, make the auto-hinter */
    +  /* ignore the ones that are higher than `pos`.              */
    +  static void
    +  af_prevent_top_blue_alignment( AF_GlyphHints  hints,
    +                                 FT_Pos         pos )
    +  {
    +    AF_AxisHints  axis = &hints->axis[AF_DIMENSION_VERT];
    +
    +    AF_Edge  edges      = axis->edges;
    +    AF_Edge  edge_limit = FT_OFFSET( edges, axis->num_edges );
    +    AF_Edge  edge;
    +
    +
    +    for ( edge = edges; edge < edge_limit; edge++ )
    +      if ( edge->pos > pos )
    +        edge->flags |= AF_EDGE_NO_BLUE;
    +  }
    +
    +
    +  static void
    +  af_prevent_bottom_blue_alignment( AF_GlyphHints  hints,
    +                                    FT_Pos         pos )
    +  {
    +    AF_AxisHints  axis = &hints->axis[AF_DIMENSION_VERT];
    +
    +    AF_Edge  edges      = axis->edges;
    +    AF_Edge  edge_limit = FT_OFFSET( edges, axis->num_edges );
    +    AF_Edge  edge;
    +
    +
    +    for ( edge = edges; edge < edge_limit; edge++ )
    +      if ( edge->pos < pos )
    +        edge->flags |= AF_EDGE_NO_BLUE;
    +  }
    +
    +
    +  static void
    +  af_latin_get_base_glyph_blues( AF_GlyphHints  hints,
    +                                 FT_Bool        is_capital,
    +                                 AF_LatinBlue*  top,
    +                                 AF_LatinBlue*  bottom )
    +  {
    +    AF_LatinMetrics  metrics = (AF_LatinMetrics)hints->metrics;
    +    AF_LatinAxis     axis    = &metrics->axis[AF_DIMENSION_VERT];
    +
    +    FT_UInt  top_flag;
    +    FT_UInt  bottom_flag;
    +
    +    FT_UInt  i;
    +
    +
    +    top_flag  = is_capital ? AF_LATIN_BLUE_TOP
    +                           : AF_LATIN_BLUE_ADJUSTMENT;
    +    top_flag |= AF_LATIN_BLUE_ACTIVE;
    +
    +    for ( i = 0; i < axis->blue_count; i++ )
    +      if ( ( axis->blues[i].flags & top_flag ) == top_flag )
    +        break;
    +    if ( i < axis->blue_count )
    +      *top = &axis->blues[i];
    +
    +    bottom_flag  = is_capital ? AF_LATIN_BLUE_BOTTOM
    +                              : AF_LATIN_BLUE_BOTTOM_SMALL;
    +    bottom_flag |= AF_LATIN_BLUE_ACTIVE;
    +
    +    for ( i = 0; i < axis->blue_count; i++ )
    +      if ( ( axis->blues[i].flags & bottom_flag ) == bottom_flag )
    +        break;
    +    if ( i < axis->blue_count )
    +      *bottom = &axis->blues[i];
    +  }
    +
    +
    +  /* Make the auto-hinter ignore top blue zones while aligning edges. */
    +  /* This affects everything that is higher than a vertical position  */
    +  /* based on the lowercase or uppercase top and bottom blue zones    */
    +  /* (depending on `is_capital`).                                     */
    +  static void
    +  af_latin_ignore_top( AF_GlyphHints  hints,
    +                       AF_LatinBlue   top_blue,
    +                       AF_LatinBlue   bottom_blue )
    +  {
    +    FT_Pos  base_glyph_height;
    +    FT_Pos  limit;
    +
    +
    +    /* Ignore blue zones that are higher than a heuristic threshold     */
    +    /* (value 7 corresponds to approx. 14%, which should be sufficient  */
    +    /* to exceed the height of uppercase serifs.  We also add a quarter */
    +    /* of a pixel as a safety measure.                                  */
    +    base_glyph_height = top_blue->shoot.cur - bottom_blue->shoot.cur;
    +    limit             = top_blue->shoot.cur + base_glyph_height / 7 + 16;
    +
    +    af_prevent_top_blue_alignment( hints, limit );
    +  }
    +
    +
    +  static void
    +  af_latin_ignore_bottom( AF_GlyphHints  hints,
    +                          AF_LatinBlue   top_blue,
    +                          AF_LatinBlue   bottom_blue )
    +  {
    +    FT_Pos  base_glyph_height;
    +    FT_Pos  limit;
    +
    +
    +    base_glyph_height = top_blue->shoot.cur - bottom_blue->shoot.cur;
    +    limit             = bottom_blue->shoot.cur - base_glyph_height / 7 - 16;
    +
    +    af_prevent_bottom_blue_alignment( hints, limit );
    +  }
    +
    +
    +  static void
    +  af_touch_contour( AF_GlyphHints  hints,
    +                    FT_Int         contour )
    +  {
    +    AF_Point  first_point = hints->contours[contour];
    +    AF_Point  p           = first_point;
    +
    +
    +    do
    +    {
    +      p = p->next;
    +
    +      p->flags |= AF_FLAG_IGNORE;
    +      if ( !( p->flags & AF_FLAG_CONTROL ) )
    +        p->flags |= AF_FLAG_TOUCH_Y;
    +
    +    } while ( p != first_point );
    +  }
    +
    +
    +  static void
    +  af_touch_top_contours( AF_GlyphHints  hints,
    +                         FT_Int         limit_contour )
    +  {
    +    FT_Pos  limit = hints->contour_y_minima[limit_contour];
    +
    +    FT_Int  contour;
    +
    +
    +    for ( contour = 0; contour < hints->num_contours; contour++ )
    +    {
    +      FT_Pos  min_y = hints->contour_y_minima[contour];
    +      FT_Pos  max_y = hints->contour_y_maxima[contour];
    +
    +
    +      if ( min_y < max_y  &&
    +           min_y >= limit )
    +        af_touch_contour( hints, contour );
    +    }
    +  }
    +
    +
    +  static void
    +  af_touch_bottom_contours( AF_GlyphHints  hints,
    +                            FT_Int         limit_contour )
    +  {
    +    FT_Pos  limit = hints->contour_y_minima[limit_contour];
    +
    +    FT_Int  contour;
    +
    +
    +    for ( contour = 0; contour < hints->num_contours; contour++ )
    +    {
    +      FT_Pos  min_y = hints->contour_y_minima[contour];
    +      FT_Pos  max_y = hints->contour_y_maxima[contour];
    +
    +
    +      if ( min_y < max_y  &&
    +           max_y <= limit )
    +        af_touch_contour( hints, contour );
    +    }
    +  }
    +
    +
    +  /* Stretch tilde vertically, if necessary, and return the height */
    +  /* difference between the original and the stretched outline.    */
    +  static FT_Pos
    +  af_latin_stretch_top_tilde( AF_GlyphHints  hints,
    +                              FT_Int         tilde_contour )
    +  {
    +    AF_Point  p           = hints->contours[tilde_contour];
    +    AF_Point  first_point = p;
    +
    +    FT_Pos  min_y = hints->contour_y_minima[tilde_contour];
    +    FT_Pos  max_y = hints->contour_y_maxima[tilde_contour];
    +
    +    FT_Pos   min_measurement   = FT_LONG_MAX;
    +    FT_Bool  measurement_taken = FALSE;
    +
    +    FT_Pos  height;
    +    FT_Pos  extremum_threshold;
    +    FT_Pos  target_height;
    +
    +
    +    if ( min_y == max_y )
    +      return 0;
    +
    +    FT_TRACE4(( "af_latin_stretch_top_tilde: min y: %ld, max y: %ld\n",
    +                min_y, max_y ));
    +
    +    height             = SUB_LONG( max_y, min_y );
    +    extremum_threshold = height / 8;    /* Value 8 is heuristic. */
    +
    +    /* Find points that are local vertical round extrema, and which   */
    +    /* do not coincide with the vertical extreme values (i.e., we     */
    +    /* search for the 'other' wiggles in the tilde), then measure the */
    +    /* distance to the vertical extreme values.  Try to find the one  */
    +    /* with the smallest distance.                                    */
    +    /*                                                                */
    +    /* The algorithm only works for tilde shapes that don't deviate   */
    +    /* from the standard shape too much.  In particular, the wiggles  */
    +    /* must be round extrema.                                         */
    +    do
    +    {
    +      p = p->next;
    +
    +      if ( !( p->flags & AF_FLAG_CONTROL )          &&
    +           p->prev->y == p->y && p->next->y == p->y &&
    +           p->y != min_y && p->y != max_y           &&
    +           p->prev->flags & AF_FLAG_CONTROL         &&
    +           p->next->flags & AF_FLAG_CONTROL         )
    +      {
    +        /* This point could be a candidate.  Find the next and previous */
    +        /* on-curve points, and make sure they are both either above or */
    +        /* below the point, then make the measurement.                  */
    +        AF_Point  prev_on = p->prev;
    +        AF_Point  next_on = p->next;
    +
    +        FT_Pos  measurement;
    +
    +
    +        while ( prev_on->flags & AF_FLAG_CONTROL )
    +          prev_on = prev_on->prev;
    +        while ( next_on->flags & AF_FLAG_CONTROL )
    +          next_on = next_on->next;
    +
    +        if ( next_on->y > p->y && prev_on->y > p->y )
    +          measurement = SUB_LONG( p->y, min_y );
    +        else if ( next_on->y < p->y && prev_on->y < p->y )
    +          measurement = SUB_LONG( max_y, p->y );
    +        else
    +          continue;
    +
    +        /* Ignore hits that are too near to a vertical extremum. */
    +        if ( measurement < extremum_threshold )
    +          continue;
    +
    +        if ( !measurement_taken || measurement < min_measurement )
    +        {
    +          measurement_taken = TRUE;
    +          min_measurement   = measurement;
    +        }
    +      }
    +
    +    } while ( p != first_point );
    +
    +    if ( !measurement_taken )
    +      min_measurement = 0;
    +
    +    FT_TRACE4(( "af_latin_stretch_top_tilde: min measurement %ld\n",
    +                min_measurement ));
    +
    +    /* To preserve the stretched shape we prevent that the tilde */
    +    /* gets auto-hinted; we do this for all contours equal or    */
    +    /* above the vertical minimum of `tilde_contour`.            */
    +    af_touch_top_contours( hints, tilde_contour );
    +
    +    /* XXX This is an important element of the algorithm; */
    +    /*     we need a description.                         */
    +    target_height = min_measurement + 64;
    +    if ( height >= target_height )
    +      return 0;
    +
    +    /* Do the scaling. */
    +    p = first_point;
    +    do
    +    {
    +      p    = p->next;
    +      /* We adjust the height of the diacritic only, which means */
    +      /* we are never dealing with (valid) large numbers and can */
    +      /* thus avoid `FT_MulFix`.                                 */
    +      p->y = ADD_LONG( MUL_LONG( SUB_LONG( p->y,
    +                                           min_y ),
    +                                 target_height ) / height,
    +                       min_y );
    +
    +    } while ( p != first_point );
    +
    +    return target_height - height;
    +  }
    +
    +
    +  static FT_Pos
    +  af_latin_stretch_bottom_tilde( AF_GlyphHints  hints,
    +                                 FT_Int         tilde_contour )
    +  {
    +    AF_Point  p           = hints->contours[tilde_contour];
    +    AF_Point  first_point = p;
    +
    +    FT_Pos  min_y = hints->contour_y_minima[tilde_contour];
    +    FT_Pos  max_y = hints->contour_y_maxima[tilde_contour];
    +
    +    FT_Pos   min_measurement   = FT_LONG_MAX;
    +    FT_Bool  measurement_taken = FALSE;
    +
    +    FT_Pos  height;
    +    FT_Pos  extremum_threshold;
    +    FT_Pos  target_height;
    +
    +
    +    if ( min_y == max_y )
    +      return 0;
    +
    +    FT_TRACE4(( "af_latin_stretch_bottom_tilde: min y: %ld, max y: %ld\n",
    +                min_y, max_y ));
    +
    +    height             = SUB_LONG( max_y, min_y );
    +    extremum_threshold = height / 8;
    +
    +    do
    +    {
    +      p = p->next;
    +
    +      if ( !( p->flags & AF_FLAG_CONTROL )          &&
    +           p->prev->y == p->y && p->next->y == p->y &&
    +           p->y != min_y && p->y != max_y           &&
    +           p->prev->flags & AF_FLAG_CONTROL         &&
    +           p->next->flags & AF_FLAG_CONTROL         )
    +      {
    +        AF_Point  prev_on = p->prev;
    +        AF_Point  next_on = p->next;
    +
    +        FT_Pos  measurement;
    +
    +
    +        while ( prev_on->flags & AF_FLAG_CONTROL )
    +          prev_on = prev_on->prev;
    +        while ( next_on->flags & AF_FLAG_CONTROL )
    +          next_on = next_on->next;
    +
    +        if ( next_on->y > p->y && prev_on->y > p->y )
    +          measurement = SUB_LONG( p->y, min_y );
    +        else if ( next_on->y < p->y && prev_on->y < p->y )
    +          measurement = SUB_LONG( max_y, p->y );
    +        else
    +          continue;
    +
    +        if ( measurement < extremum_threshold )
    +          continue;
    +
    +        if ( !measurement_taken || measurement < min_measurement )
    +        {
    +          measurement_taken = TRUE;
    +          min_measurement   = measurement;
    +        }
    +      }
    +
    +    } while ( p != first_point );
    +
    +    if ( !measurement_taken )
    +      min_measurement = 0;
    +
    +    FT_TRACE4(( "af_latin_stretch_bottom_tilde: min measurement %ld\n",
    +                min_measurement ));
    +
    +    af_touch_bottom_contours( hints, tilde_contour );
    +
    +    target_height = min_measurement + 64;
    +    if ( height >= target_height )
    +      return 0;
    +
    +    p = first_point;
    +    do
    +    {
    +      p    = p->next;
    +      p->y = ADD_LONG( MUL_LONG( SUB_LONG( p->y,
    +                                           max_y ),
    +                                 target_height ) / height,
    +                       max_y );
    +
    +    } while ( p != first_point );
    +
    +    return target_height - height;
    +  }
    +
    +
    +  /*
    +    As part of `af_latin_stretch_top_tilde`, normally all points in the
    +    tilde are marked as touched, so the existing grid fitting will leave the
    +    tilde misaligned with the grid.
    +
    +    This function moves the tilde contour down to be grid-fitted.  We assume
    +    that if moving the tilde down would cause it to touch or overlap another
    +    countour, the vertical adjustment step will fix it.
    +
    +    Because the vertical adjustment step comes after all other grid-fitting
    +    steps, the top edge of the contour under the tilde is usually aligned
    +    with a horizontal grid line.  The vertical gap enforced by the vertical
    +    adjustment is exactly one pixel, so if the top edge of the contour below
    +    the tilde is on a grid line, the resulting tilde contour will also be
    +    grid-aligned.
    +
    +    But in cases where the gap is already big enough so that the vertical
    +    adjustment does nothing, this function ensures that even without the
    +    intervention of the vertical adjustment step, the tilde will be
    +    grid-aligned.
    +
    +    Return the vertical alignment amount.
    +  */
    +  static FT_Pos
    +  af_latin_align_top_tilde( AF_GlyphHints  hints,
    +                            FT_Int         tilde_contour )
    +  {
    +    AF_Point  p           = hints->contours[tilde_contour];
    +    AF_Point  first_point = p;
    +
    +    FT_Pos  min_y = p->y;
    +    FT_Pos  max_y = p->y;
    +
    +    FT_Pos  min_y_rounded;
    +    FT_Pos  delta;
    +    FT_Pos  height;
    +
    +
    +    /* Find vertical extrema of the (now stretched) tilde contour. */
    +    do
    +    {
    +      p = p->next;
    +      if ( p->y < min_y )
    +        min_y = p->y;
    +      if ( p->y > max_y )
    +        max_y = p->y;
    +
    +    } while ( p != first_point );
    +
    +    /* Align bottom of the tilde to the grid. */
    +    min_y_rounded = FT_PIX_ROUND_LONG( min_y );
    +    delta         = SUB_LONG( min_y_rounded, min_y );
    +    height        = SUB_LONG( max_y, min_y );
    +
    +    /* If the tilde is less than 3 pixels tall, snap the center of it */
    +    /* to the grid instead of the bottom to improve readability.      */
    +    if ( height < 64 * 3 )
    +      delta += ( FT_PIX_ROUND( height ) - height ) / 2;
    +
    +    af_move_contour_vertically( first_point, delta );
    +
    +    return delta;
    +  }
    +
    +
    +  static FT_Pos
    +  af_latin_align_bottom_tilde( AF_GlyphHints  hints,
    +                               FT_Int         tilde_contour )
    +  {
    +    AF_Point  p           = hints->contours[tilde_contour];
    +    AF_Point  first_point = p;
    +
    +    FT_Pos  min_y = p->y;
    +    FT_Pos  max_y = p->y;
    +
    +    FT_Pos  max_y_rounded;
    +    FT_Pos  delta;
    +    FT_Pos  height;
    +
    +
    +    do
    +    {
    +      p = p->next;
    +      if ( p->y < min_y )
    +        min_y = p->y;
    +      if ( p->y > max_y )
    +        max_y = p->y;
    +
    +    } while ( p != first_point );
    +
    +    max_y_rounded = FT_PIX_ROUND_LONG( max_y );
    +    delta         = SUB_LONG( max_y_rounded, max_y );
    +    height        = SUB_LONG( max_y, min_y );
    +
    +    if ( height < 64 * 3 )
    +      delta -= ( FT_PIX_ROUND( height ) - height ) / 2;
    +
    +    af_move_contour_vertically( first_point, delta );
    +
    +    return delta;
    +  }
    +
    +
    +  /* Return 1 if the given contour overlaps horizontally with the bounding */
    +  /* box of all other contours combined.  This is a helper for function    */
    +  /* `af_glyph_hints_apply_vertical_separation_adjustments`.               */
    +  static FT_Bool
    +  af_check_contour_horizontal_overlap( AF_GlyphHints  hints,
    +                                       FT_Int         contour_index )
    +  {
    +    FT_Pos  contour_max_x = FT_LONG_MIN;
    +    FT_Pos  contour_min_x = FT_LONG_MAX;
    +    FT_Pos  others_max_x  = FT_LONG_MIN;
    +    FT_Pos  others_min_x  = FT_LONG_MAX;
    +
    +    FT_Int  contour;
    +
    +    FT_Bool  horizontal_overlap;
    +
    +
    +    for ( contour = 0; contour < hints->num_contours; contour++ )
    +    {
    +      AF_Point  first_point = hints->contours[contour];
    +      AF_Point  p           = first_point;
    +
    +
    +      /* Ignore dimensionless contours (i.e., contours with only one or */
    +      /* two points).                                                   */
    +      if ( first_point->next->next == first_point )
    +        continue;
    +
    +      do
    +      {
    +        p = p->next;
    +
    +        if ( contour == contour_index )
    +        {
    +          if ( p->x < contour_min_x )
    +            contour_min_x = p->x;
    +          if ( p->x > contour_max_x )
    +            contour_max_x = p->x;
    +        }
    +        else
    +        {
    +          if ( p->x < others_min_x )
    +            others_min_x = p->x;
    +          if ( p->x > others_max_x )
    +            others_max_x = p->x;
    +        }
    +      } while ( p != first_point );
    +    }
    +
    +    horizontal_overlap =
    +      ( others_min_x <= contour_max_x && contour_max_x <= others_max_x ) ||
    +      ( others_min_x <= contour_min_x && contour_min_x <= others_max_x ) ||
    +      ( contour_max_x >= others_max_x && contour_min_x <= others_min_x );
    +
    +    return horizontal_overlap;
    +  }
    +
    +
    +  static void
    +  af_glyph_hints_apply_vertical_separation_adjustments(
    +    AF_GlyphHints  hints,
    +    AF_Dimension   dim,
    +    FT_UInt        glyph_index,
    +    FT_Pos         accent_height_limit,
    +    FT_Hash        reverse_charmap )
    +  {
    +    FT_Bool  adjust_top       = FALSE;
    +    FT_Bool  adjust_below_top = FALSE;
    +
    +    FT_Bool  adjust_bottom       = FALSE;
    +    FT_Bool  adjust_above_bottom = FALSE;
    +
    +    size_t*    val;
    +    FT_UInt32  adj_type = AF_ADJUST_NONE;
    +
    +
    +    FT_TRACE4(( "Entering"
    +                " af_glyph_hints_apply_vertical_separation_adjustments\n" ));
    +
    +    if ( dim != AF_DIMENSION_VERT )
    +      return;
    +
    +    val = ft_hash_num_lookup( (FT_Int)glyph_index, reverse_charmap );
    +    if ( val )
    +    {
    +      FT_UInt  codepoint = *val;
    +
    +
    +      adj_type = af_adjustment_database_lookup( codepoint );
    +
    +      if ( adj_type )
    +      {
    +        adjust_top       = !!( adj_type & AF_ADJUST_UP );
    +        adjust_below_top = !!( adj_type & AF_ADJUST_UP2 );
    +
    +        adjust_bottom       = !!( adj_type & AF_ADJUST_DOWN );
    +        adjust_above_bottom = !!( adj_type & AF_ADJUST_DOWN2 );
    +      }
    +    }
    +
    +    if ( ( ( adjust_top || adjust_bottom ) &&
    +           hints->num_contours >= 2        )             ||
    +         ( ( adjust_below_top || adjust_above_bottom ) &&
    +           hints->num_contours >= 3                    ) )
    +    {
    +      /* Recompute vertical extrema, this time acting on already */
    +      /* auto-hinted outlines.                                   */
    +      af_compute_vertical_extrema( hints );
    +    }
    +
    +    if ( ( adjust_top && hints->num_contours >= 2 )       ||
    +         ( adjust_below_top && hints->num_contours >= 3 ) )
    +    {
    +      FT_Int  high_contour;
    +      FT_Pos  high_min_y;
    +      FT_Pos  high_max_y;
    +      FT_Pos  high_height;
    +
    +      FT_Int  tilde_contour;
    +      FT_Pos  tilde_min_y;
    +      FT_Pos  tilde_max_y;
    +      FT_Pos  tilde_height;
    +
    +      FT_Int   contour;
    +      FT_Bool  horizontal_overlap;
    +
    +      FT_Pos  min_distance         = 64;
    +      FT_Pos  adjustment_amount;
    +      FT_Pos  calculated_amount;
    +      FT_Pos  centering_adjustment = 0;
    +      FT_Pos  pos;
    +
    +      FT_Bool  is_top_tilde       = !!( adj_type & AF_ADJUST_TILDE_TOP );
    +      FT_Bool  is_below_top_tilde = !!( adj_type & AF_ADJUST_TILDE_TOP2 );
    +
    +
    +      FT_TRACE4(( "af_glyph_hints_apply_vertical_separation_adjustments:\n"
    +                  "  Applying vertical adjustment: %s\n",
    +                  adjust_top ? "AF_ADJUST_TOP" : "AF_ADJUST_TOP2" ));
    +
    +      high_contour = adjust_below_top
    +                       ? af_find_second_highest_contour( hints )
    +                       : af_find_highest_contour( hints );
    +
    +      /* Check for a horizontal overlap between the high contour and the */
    +      /* rest.  If there is no overlap, do not adjust.                   */
    +      horizontal_overlap =
    +        af_check_contour_horizontal_overlap( hints, high_contour );
    +      if ( !horizontal_overlap )
    +      {
    +        FT_TRACE4(( "    High contour does not horizontally overlap"
    +                    " with other contours.\n"
    +                    "    Skipping adjustment.\n" ));
    +        return;
    +      }
    +
    +      high_min_y  = hints->contour_y_minima[high_contour];
    +      high_max_y  = hints->contour_y_maxima[high_contour];
    +      high_height = SUB_LONG( high_max_y, high_min_y );
    +
    +      if ( high_height > accent_height_limit )
    +      {
    +        FT_TRACE4(( "    High contour height (%.2f) exceeds accent height"
    +                    " limit (%.2f).\n"
    +                    "    Skipping adjustment.\n",
    +                    (double)high_height / 64,
    +                    (double)accent_height_limit / 64 ));
    +        return;
    +      }
    +
    +      /* If the difference between the vertical minimum of the high   */
    +      /* contour and the vertical maximum of another contour is less  */
    +      /* than a pixel, shift up the high contour to make the distance */
    +      /* one pixel.                                                   */
    +      for ( contour = 0; contour < hints->num_contours; contour++ )
    +      {
    +        FT_Pos  min_y;
    +        FT_Pos  max_y;
    +        FT_Pos  distance;
    +
    +
    +        if ( contour == high_contour )
    +          continue;
    +
    +        min_y = hints->contour_y_minima[contour];
    +        max_y = hints->contour_y_maxima[contour];
    +
    +        /* We also check that the y minimum of the 'other' contour */
    +        /* is below the high contour to avoid potential false hits */
    +        /* with contours enclosed in the high one.                 */
    +        distance = SUB_LONG( high_min_y, max_y );
    +        if ( distance < 64           &&
    +             distance < min_distance &&
    +             min_y < high_min_y      )
    +          min_distance = distance;
    +      }
    +
    +      adjustment_amount = 64 - min_distance;
    +
    +      if ( is_top_tilde || is_below_top_tilde )
    +      {
    +        tilde_contour = adjust_top
    +                          ? high_contour
    +                          : ( is_below_top_tilde
    +                                ? high_contour
    +                                : af_find_highest_contour( hints ) );
    +
    +        tilde_min_y  = hints->contour_y_minima[tilde_contour];
    +        tilde_max_y  = hints->contour_y_maxima[tilde_contour];
    +        tilde_height = SUB_LONG( tilde_max_y, tilde_min_y);
    +
    +        /* The vertical separation adjustment potentially undoes a */
    +        /* tilde center alignment.  If it would grid-align a tilde */
    +        /* less than 3 pixels in height, shift additionally to     */
    +        /* re-center the tilde.                                    */
    +
    +        pos = ADD_LONG( high_min_y, adjustment_amount );
    +        if ( adjust_below_top && is_top_tilde )
    +          pos += high_height;
    +
    +        if ( pos % 64 == 0 && tilde_height < 3 * 64 )
    +        {
    +          centering_adjustment = ( FT_PIX_ROUND( tilde_height ) -
    +                                   tilde_height ) / 2;
    +
    +          FT_TRACE4(( "    Additional tilde centering adjustment: %ld\n",
    +                      centering_adjustment ));
    +        }
    +      }
    +
    +      if ( ( adjust_top && is_top_tilde )             ||
    +           ( adjust_below_top && is_below_top_tilde ) )
    +        calculated_amount = adjustment_amount + centering_adjustment;
    +      else
    +        calculated_amount = adjustment_amount;
    +
    +      /* allow a delta of 2/64px to handle rounding differences */
    +      FT_TRACE4(( "    Calculated adjustment amount: %ld%s\n",
    +                  calculated_amount,
    +                  ( calculated_amount < -2                               ||
    +                    ( adjustment_amount > 66 && calculated_amount > 66 ) )
    +                      ? " (out of range [-2;66], not adjusting)" : "" ));
    +
    +      if ( calculated_amount != 0                                 &&
    +           calculated_amount >= -2                                &&
    +           ( calculated_amount <= 66 || adjustment_amount <= 66 ) )
    +      {
    +        /* Value 8 is heuristic. */
    +        FT_Pos  height_delta = high_height / 8;
    +        FT_Pos  min_y_limit  = SUB_LONG( high_min_y, height_delta );
    +
    +
    +        FT_TRACE4(( "    Pushing high contour %ld units up\n",
    +                    calculated_amount ));
    +
    +        /* While we use only a single contour (the 'high' one) for    */
    +        /* computing `adjustment_amount`, we apply it to all contours */
    +        /* that are (approximately) in the same vertical range or     */
    +        /* higher.  This covers, for example, the inner contour of    */
    +        /* the Czech ring accent or the second acute accent in the    */
    +        /* Hungarian double acute accent.                             */
    +        af_move_contours_up( hints, min_y_limit, adjustment_amount );
    +
    +        if ( adjust_below_top && is_top_tilde )
    +        {
    +          FT_TRACE4(( "    Pushing top tilde %ld units up\n",
    +                      centering_adjustment ));
    +
    +          af_move_contours_up( hints,
    +                               ADD_LONG( min_y_limit, high_height ),
    +                               centering_adjustment );
    +        }
    +      }
    +    }
    +
    +    if ( ( adjust_bottom && hints->num_contours >= 2 )       ||
    +         ( adjust_above_bottom && hints->num_contours >= 3 ) )
    +    {
    +      FT_Int  low_contour;
    +      FT_Pos  low_min_y;
    +      FT_Pos  low_max_y;
    +      FT_Pos  low_height;
    +
    +      FT_Int  tilde_contour;
    +      FT_Pos  tilde_min_y;
    +      FT_Pos  tilde_max_y;
    +      FT_Pos  tilde_height;
    +
    +      FT_Int   contour;
    +      FT_Bool  horizontal_overlap;
    +
    +      FT_Pos  min_distance         = 64;
    +      FT_Pos  adjustment_amount;
    +      FT_Pos  calculated_amount;
    +      FT_Pos  centering_adjustment = 0;
    +      FT_Pos  pos;
    +
    +      FT_Bool  is_bottom_tilde =
    +                 !!( adj_type & AF_ADJUST_TILDE_BOTTOM );
    +      FT_Bool  is_above_bottom_tilde =
    +                 !!( adj_type & AF_ADJUST_TILDE_BOTTOM2 );
    +
    +
    +      FT_TRACE4(( "af_glyph_hints_apply_vertical_separation_adjustments:\n"
    +                  "  Applying vertical adjustment: %s\n",
    +                  adjust_bottom ? "AF_ADJUST_DOWN": "AF_ADJUST_DOWN2" ));
    +
    +      low_contour = adjust_above_bottom
    +                      ? af_find_second_lowest_contour( hints )
    +                      : af_find_lowest_contour( hints );
    +
    +      horizontal_overlap =
    +        af_check_contour_horizontal_overlap( hints, low_contour );
    +      if ( !horizontal_overlap )
    +      {
    +        FT_TRACE4(( "    Low contour does not horizontally overlap"
    +                    " with other contours.\n"
    +                    "    Skipping adjustment.\n" ));
    +        return;
    +      }
    +
    +      low_min_y  = hints->contour_y_minima[low_contour];
    +      low_max_y  = hints->contour_y_maxima[low_contour];
    +      low_height = SUB_LONG( low_max_y, low_min_y );
    +
    +      if ( low_height > accent_height_limit )
    +      {
    +        FT_TRACE4(( "    Low contour height (%.2f) exceeds accent height"
    +                    " limit (%.2f).\n"
    +                    "    Skipping adjustment.\n",
    +                    (double)low_height / 64,
    +                    (double)accent_height_limit / 64 ));
    +        return;
    +      }
    +
    +      for ( contour = 0; contour < hints->num_contours; contour++ )
    +      {
    +        FT_Pos  min_y;
    +        FT_Pos  max_y;
    +        FT_Pos  distance;
    +
    +
    +        if ( contour == low_contour )
    +          continue;
    +
    +        min_y = hints->contour_y_minima[contour];
    +        max_y = hints->contour_y_maxima[contour];
    +
    +        distance = SUB_LONG( min_y, low_max_y );
    +        if ( distance < 64           &&
    +             distance < min_distance &&
    +             max_y > low_max_y       )
    +          min_distance = distance;
    +      }
    +
    +      adjustment_amount = 64 - min_distance;
    +
    +      if ( is_bottom_tilde || is_above_bottom_tilde )
    +      {
    +        tilde_contour = adjust_bottom
    +                          ? low_contour
    +                          : ( is_above_bottom_tilde
    +                                ? low_contour
    +                                : af_find_lowest_contour( hints ) );
    +
    +        tilde_min_y  = hints->contour_y_minima[tilde_contour];
    +        tilde_max_y  = hints->contour_y_maxima[tilde_contour];
    +        tilde_height = SUB_LONG( tilde_max_y, tilde_min_y );
    +
    +        pos = SUB_LONG( low_max_y, adjustment_amount );
    +        if ( adjust_above_bottom && is_bottom_tilde )
    +          pos -= low_height;
    +
    +        if ( pos % 64 == 0 && tilde_height < 3 * 64 )
    +        {
    +          centering_adjustment = ( FT_PIX_ROUND( tilde_height ) -
    +                                   tilde_height ) / 2;
    +
    +          FT_TRACE4(( "    Additional tilde centering adjustment: %ld\n",
    +                      centering_adjustment ));
    +        }
    +      }
    +
    +      if ( ( adjust_bottom && is_bottom_tilde )             ||
    +           ( adjust_above_bottom && is_above_bottom_tilde ) )
    +        calculated_amount = adjustment_amount + centering_adjustment;
    +      else
    +        calculated_amount = adjustment_amount;
    +
    +      FT_TRACE4(( "    Calculated adjustment amount: %ld%s\n",
    +                  calculated_amount,
    +                  ( calculated_amount < -2                               ||
    +                    ( adjustment_amount > 66 && calculated_amount > 66 ) )
    +                      ? " (out of range [-2;66], not adjusting)" : "" ));
    +
    +      if ( calculated_amount != 0                                 &&
    +           calculated_amount >= -2                                &&
    +           ( calculated_amount <= 66 || adjustment_amount <= 66 ) )
    +      {
    +        FT_Pos  height_delta = low_height / 8;
    +        FT_Pos  max_y_limit  = ADD_LONG( low_max_y, height_delta );
    +
    +
    +        FT_TRACE4(( "    Pushing low contour %ld units down\n",
    +                    calculated_amount ));
    +
    +        af_move_contours_down( hints, max_y_limit, adjustment_amount );
    +
    +        if ( adjust_above_bottom && is_bottom_tilde )
    +        {
    +          FT_TRACE4(( "    Pushing bottom tilde %ld units down\n",
    +                      centering_adjustment ));
    +
    +          af_move_contours_down( hints,
    +                                 SUB_LONG( max_y_limit, low_height ),
    +                                 centering_adjustment );
    +        }
    +      }
    +    }
    +
    +#ifdef FT_DEBUG_LEVEL_TRACE
    +    if ( !( ( ( adjust_top || adjust_bottom ) &&
    +              hints->num_contours >= 2        )             ||
    +            ( ( adjust_below_top || adjust_above_bottom ) &&
    +              hints->num_contours >= 3                    ) ) )
    +      FT_TRACE4(( "af_glyph_hints_apply_vertical_separation_adjustments:\n"
    +                  "  No vertical adjustment applied\n" ));
    +#endif
    +
    +    FT_TRACE4(( "Exiting"
    +                " af_glyph_hints_apply_vertical_separation_adjustments\n" ));
    +  }
    +
    +
    +#undef  FT_COMPONENT
    +#define FT_COMPONENT  aflatin
    +
    +
       /* Compute the snapped width of a given stem, ignoring very thin ones. */
       /* There is a lot of voodoo in this function; changing the hard-coded  */
       /* parameters influence the whole hinting process.                     */
    @@ -2998,13 +4220,15 @@
       af_latin_hint_edges( AF_GlyphHints  hints,
                            AF_Dimension   dim )
       {
    -    AF_AxisHints  axis       = &hints->axis[dim];
    -    AF_Edge       edges      = axis->edges;
    -    AF_Edge       edge_limit = FT_OFFSET( edges, axis->num_edges );
    -    FT_PtrDist    n_edges;
    -    AF_Edge       edge;
    -    AF_Edge       anchor     = NULL;
    -    FT_Int        has_serifs = 0;
    +    AF_AxisHints  axis = &hints->axis[dim];
    +
    +    AF_Edge     edges      = axis->edges;
    +    AF_Edge     edge_limit = FT_OFFSET( edges, axis->num_edges );
    +    AF_Edge     edge;
    +    FT_PtrDist  n_edges;
    +
    +    AF_Edge  anchor             = NULL;
    +    FT_Bool  has_non_stem_edges = 0;
     
         AF_StyleClass   style_class  = hints->metrics->style_class;
         AF_ScriptClass  script_class = af_script_classes[style_class->script];
    @@ -3131,7 +4355,7 @@
           edge2 = edge->link;
           if ( !edge2 )
           {
    -        has_serifs++;
    +        has_non_stem_edges = TRUE;
             continue;
           }
     
    @@ -3408,7 +4632,7 @@
           }
         }
     
    -    if ( has_serifs || !anchor )
    +    if ( has_non_stem_edges || !anchor )
         {
           /*
            * now hint the remaining edges (serifs and single) in order
    @@ -3426,9 +4650,75 @@
     
             if ( edge->serif )
             {
    +          AF_Edge  e, top, bottom;
    +          FT_Pos   min_pos, max_pos;
    +
    +
    +          /* Check whether we have a real serif -- if there are  */
    +          /* other edges with overlapping (or enclosed) segments */
    +          /* between the primary and serif edge, we have not.    */
    +          /*                                                     */
    +          /* Such a situation might happen if an accent is very  */
    +          /* near to its base glyph (for example, Vietnamese     */
    +          /* uppercase letters with two accents in `arial.ttf`), */
    +          /* and the segment detection algorithm classifies the  */
    +          /* top of the accent incorrectly as a serif.           */
               delta = edge->serif->opos - edge->opos;
               if ( delta < 0 )
    +          {
                 delta = -delta;
    +
    +            top    = edge;
    +            bottom = edge->serif;
    +          }
    +          else
    +          {
    +            top    = edge->serif;
    +            bottom = edge;
    +          }
    +
    +          if ( delta < 64 + 32 )
    +          {
    +            /* take care of outline orientation while computing extrema */
    +            min_pos = FT_MIN( FT_MIN( FT_MIN( top->first->first->v,
    +                                              top->first->last->v ),
    +                                      FT_MIN( top->last->first->v,
    +                                              top->last->last->v ) ),
    +                              FT_MIN( FT_MIN( bottom->first->first->v,
    +                                              bottom->first->last->v ),
    +                                      FT_MIN( bottom->last->first->v,
    +                                              bottom->last->last->v ) ) );
    +            max_pos = FT_MAX( FT_MAX( FT_MAX( top->first->first->v,
    +                                              top->first->last->v ),
    +                                      FT_MAX( top->last->first->v,
    +                                              top->last->last->v ) ),
    +                              FT_MAX( FT_MAX( bottom->first->first->v,
    +                                              bottom->first->last->v ),
    +                                      FT_MAX( bottom->last->first->v,
    +                                              bottom->last->last->v ) ) );
    +
    +            for ( e = bottom + 1; e < top; e++ )
    +            {
    +              FT_Pos  e_min = FT_MIN( FT_MIN( e->first->first->v,
    +                                              e->first->last->v ),
    +                                      FT_MIN( e->last->first->v,
    +                                              e->last->last->v ) );
    +              FT_Pos  e_max = FT_MAX( FT_MAX( e->first->first->v,
    +                                              e->first->last->v ),
    +                                      FT_MAX( e->last->first->v,
    +                                              e->last->last->v ) );
    +
    +              if ( !( ( e_min < min_pos && e_max < min_pos ) ||
    +                      ( e_min > max_pos && e_max > max_pos ) ) )
    +              {
    +                delta = 1000;  /* not a real serif */
    +                break;
    +              }
    +            }
    +
    +            if ( delta == 1000 )
    +              continue;
    +          }
             }
     
             if ( delta < 64 + 16 )
    @@ -3562,6 +4852,8 @@
     
         AF_LatinAxis  axis;
     
    +    FT_Pos  accent_height_limit = 0;
    +
     
         error = af_glyph_hints_reload( hints, outline );
         if ( error )
    @@ -3581,11 +4873,172 @@
     
         if ( AF_HINTS_DO_VERTICAL( hints ) )
         {
    +      size_t*  val;
    +
    +      FT_Int  top_tilde_contour    = 0;
    +      FT_Int  bottom_tilde_contour = 0;
    +
    +      FT_Int  below_top_tilde_contour    = 0;
    +      FT_Int  above_bottom_tilde_contour = 0;
    +
    +      AF_LatinBlue  capital_top_blue    = NULL;
    +      AF_LatinBlue  capital_bottom_blue = NULL;
    +
    +      AF_LatinBlue  small_top_blue    = NULL;
    +      AF_LatinBlue  small_bottom_blue = NULL;
    +
    +      FT_Bool  have_flags = FALSE;
    +
    +      FT_Bool  is_top_tilde    = FALSE;
    +      FT_Bool  is_bottom_tilde = FALSE;
    +
    +      FT_Bool  is_below_top_tilde    = FALSE;
    +      FT_Bool  is_above_bottom_tilde = FALSE;
    +
    +      FT_Bool  ignore_capital_top    = FALSE;
    +      FT_Bool  ignore_capital_bottom = FALSE;
    +
    +      FT_Bool  ignore_small_top    = FALSE;
    +      FT_Bool  ignore_small_bottom = FALSE;
    +
    +      FT_Bool  do_height_check = TRUE;
    +
    +      FT_Pos  limit;
    +      FT_Pos  y_offset;
    +
    +
    +      val = ft_hash_num_lookup( (FT_Int)glyph_index,
    +                                metrics->root.reverse_charmap );
    +      if ( val )
    +      {
    +        FT_UInt    codepoint = *val;
    +        FT_UInt32  adj_type  = af_adjustment_database_lookup( codepoint );
    +
    +
    +        if ( adj_type )
    +        {
    +          have_flags = !!adj_type;
    +
    +          is_top_tilde    = !!( adj_type & AF_ADJUST_TILDE_TOP );
    +          is_bottom_tilde = !!( adj_type & AF_ADJUST_TILDE_BOTTOM );
    +
    +          is_below_top_tilde    = !!( adj_type & AF_ADJUST_TILDE_TOP2 );
    +          is_above_bottom_tilde = !!( adj_type & AF_ADJUST_TILDE_BOTTOM2 );
    +
    +          ignore_capital_top    = !!( adj_type & AF_IGNORE_CAPITAL_TOP );
    +          ignore_capital_bottom = !!( adj_type & AF_IGNORE_CAPITAL_BOTTOM );
    +
    +          ignore_small_top    = !!( adj_type & AF_IGNORE_SMALL_TOP );
    +          ignore_small_bottom = !!( adj_type & AF_IGNORE_SMALL_BOTTOM );
    +
    +          do_height_check = !( adj_type & AF_ADJUST_NO_HEIGHT_CHECK );
    +        }
    +      }
    +
    +      if ( is_top_tilde || is_bottom_tilde             ||
    +           is_below_top_tilde || is_above_bottom_tilde )
    +        af_compute_vertical_extrema( hints );
    +
    +      /* Process inner tilde glyphs first. */
    +      if ( is_below_top_tilde )
    +      {
    +        below_top_tilde_contour = af_find_second_highest_contour( hints );
    +
    +        y_offset = af_latin_stretch_top_tilde(
    +                     hints, below_top_tilde_contour );
    +        y_offset += af_latin_align_top_tilde(
    +                      hints, below_top_tilde_contour );
    +
    +        limit = hints->contour_y_minima[below_top_tilde_contour];
    +        af_move_contours_up( hints, limit, y_offset );
    +      }
    +      if ( is_above_bottom_tilde )
    +      {
    +        above_bottom_tilde_contour = af_find_second_lowest_contour( hints );
    +
    +        y_offset = af_latin_stretch_bottom_tilde(
    +                     hints, above_bottom_tilde_contour );
    +        y_offset -= af_latin_align_bottom_tilde(
    +                      hints, above_bottom_tilde_contour );
    +
    +        limit = hints->contour_y_maxima[above_bottom_tilde_contour];
    +        af_move_contours_down( hints, limit, y_offset );
    +      }
    +
    +      if ( is_top_tilde )
    +      {
    +        top_tilde_contour = af_find_highest_contour( hints );
    +
    +        (void)af_latin_stretch_top_tilde( hints, top_tilde_contour );
    +        (void)af_latin_align_top_tilde( hints, top_tilde_contour );
    +      }
    +      if ( is_bottom_tilde )
    +      {
    +        bottom_tilde_contour = af_find_lowest_contour( hints );
    +
    +        (void)af_latin_stretch_bottom_tilde( hints, bottom_tilde_contour );
    +        (void)af_latin_align_bottom_tilde( hints, bottom_tilde_contour );
    +      }
    +
           axis  = &metrics->axis[AF_DIMENSION_VERT];
           error = af_latin_hints_detect_features( hints,
                                                   axis->width_count,
                                                   axis->widths,
                                                   AF_DIMENSION_VERT );
    +
    +      if ( have_flags )
    +      {
    +        af_latin_get_base_glyph_blues( hints,
    +                                       TRUE,
    +                                       &capital_top_blue,
    +                                       &capital_bottom_blue );
    +        af_latin_get_base_glyph_blues( hints,
    +                                       FALSE,
    +                                       &small_top_blue,
    +                                       &small_bottom_blue );
    +
    +        if ( do_height_check )
    +        {
    +          /* Set a heuristic limit for the accent height so that    */
    +          /* `af_glyph_hints_apply_vertical_separation_adjustments` */
    +          /* can correctly ignore the case where an accent is       */
    +          /* unexpectedly not the highest (or lowest) contour.      */
    +
    +          /* Either 2/3 of the lowercase blue zone height... */
    +          if ( small_top_blue && small_bottom_blue )
    +            accent_height_limit = 2 * ( small_top_blue->shoot.cur -
    +                                        small_bottom_blue->shoot.cur ) / 3;
    +          /* or 1/2 of the uppercase blue zone height... */
    +          else if ( capital_top_blue && capital_bottom_blue )
    +            accent_height_limit = ( capital_top_blue->shoot.cur -
    +                                    capital_bottom_blue->shoot.cur ) / 2;
    +          /* or half of the standard PostScript ascender value (8/10) */
    +          /* of the EM value, scaled.                                 */
    +          else
    +            accent_height_limit = FT_MulFix( metrics->units_per_em * 4 / 10,
    +                                             metrics->root.scaler.y_scale );
    +        }
    +      }
    +
    +      if ( capital_top_blue && capital_bottom_blue )
    +      {
    +        if ( ignore_capital_top )
    +          af_latin_ignore_top( hints,
    +                               capital_top_blue, capital_bottom_blue );
    +        if ( ignore_capital_bottom )
    +          af_latin_ignore_bottom( hints,
    +                                  capital_top_blue, capital_bottom_blue );
    +      }
    +      if ( small_top_blue && small_bottom_blue )
    +      {
    +        if ( ignore_small_top )
    +          af_latin_ignore_top( hints,
    +                               small_top_blue, small_bottom_blue );
    +        if ( ignore_small_bottom )
    +          af_latin_ignore_bottom( hints,
    +                                  small_top_blue, small_bottom_blue );
    +      }
    +
           if ( error )
             goto Exit;
     
    @@ -3604,6 +5057,12 @@
             af_glyph_hints_align_edge_points( hints, (AF_Dimension)dim );
             af_glyph_hints_align_strong_points( hints, (AF_Dimension)dim );
             af_glyph_hints_align_weak_points( hints, (AF_Dimension)dim );
    +        af_glyph_hints_apply_vertical_separation_adjustments(
    +          hints,
    +          (AF_Dimension)dim,
    +          glyph_index,
    +          accent_height_limit,
    +          metrics->root.reverse_charmap );
           }
         }
     
    @@ -3632,7 +5091,7 @@
     
         (AF_WritingSystem_InitMetricsFunc) af_latin_metrics_init,        /* style_metrics_init    */
         (AF_WritingSystem_ScaleMetricsFunc)af_latin_metrics_scale,       /* style_metrics_scale   */
    -    (AF_WritingSystem_DoneMetricsFunc) NULL,                         /* style_metrics_done    */
    +    (AF_WritingSystem_DoneMetricsFunc) af_latin_metrics_done,        /* style_metrics_done    */
         (AF_WritingSystem_GetStdWidthsFunc)af_latin_get_standard_widths, /* style_metrics_getstdw */
     
         (AF_WritingSystem_InitHintsFunc)   af_latin_hints_init,          /* style_hints_init      */
    diff --git a/src/java.desktop/share/native/libfreetype/src/autofit/aflatin.h b/src/java.desktop/share/native/libfreetype/src/autofit/aflatin.h
    index 54e50615021..82b4b0d480d 100644
    --- a/src/java.desktop/share/native/libfreetype/src/autofit/aflatin.h
    +++ b/src/java.desktop/share/native/libfreetype/src/autofit/aflatin.h
    @@ -5,7 +5,7 @@
      *   Auto-fitter hinting routines for latin writing system
      *   (specification).
      *
    - * Copyright (C) 2003-2024 by
    + * Copyright (C) 2003-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -61,17 +61,26 @@ FT_BEGIN_HEADER
               ( (b)->properties & AF_BLUE_PROPERTY_LATIN_X_HEIGHT )
     #define AF_LATIN_IS_LONG_BLUE( b ) \
               ( (b)->properties & AF_BLUE_PROPERTY_LATIN_LONG )
    +#define AF_LATIN_IS_CAPITAL_BOTTOM_BLUE( b ) \
    +          ( (b)->properties & AF_BLUE_PROPERTY_LATIN_CAPITAL_BOTTOM )
    +#define AF_LATIN_IS_SMALL_BOTTOM_BLUE( b ) \
    +          ( (b)->properties & AF_BLUE_PROPERTY_LATIN_SMALL_BOTTOM )
     
     #define AF_LATIN_MAX_WIDTHS  16
     
     
    -#define AF_LATIN_BLUE_ACTIVE      ( 1U << 0 ) /* zone height is <= 3/4px   */
    -#define AF_LATIN_BLUE_TOP         ( 1U << 1 ) /* we have a top blue zone   */
    -#define AF_LATIN_BLUE_SUB_TOP     ( 1U << 2 ) /* we have a subscript top   */
    -                                              /* blue zone                 */
    -#define AF_LATIN_BLUE_NEUTRAL     ( 1U << 3 ) /* we have neutral blue zone */
    -#define AF_LATIN_BLUE_ADJUSTMENT  ( 1U << 4 ) /* used for scale adjustment */
    -                                              /* optimization              */
    +#define AF_LATIN_BLUE_ACTIVE        ( 1U << 0 ) /* zone height is <= 3/4px */
    +#define AF_LATIN_BLUE_TOP           ( 1U << 1 ) /* we have a top blue zone */
    +#define AF_LATIN_BLUE_SUB_TOP       ( 1U << 2 ) /* we have a subscript     */
    +                                                /* top blue zone           */
    +#define AF_LATIN_BLUE_NEUTRAL       ( 1U << 3 ) /* we have a neutral blue  */
    +                                                /* zone                    */
    +#define AF_LATIN_BLUE_ADJUSTMENT    ( 1U << 4 ) /* used for scale adjustm. */
    +                                                /* optimization            */
    +#define AF_LATIN_BLUE_BOTTOM        ( 1U << 5 ) /* we have a capital       */
    +                                                /* letter bottom blue zone */
    +#define AF_LATIN_BLUE_BOTTOM_SMALL  ( 1U << 6 ) /* we have a small letter  */
    +                                                /* bottom blue zone        */
     
     
       typedef struct  AF_LatinBlueRec_
    diff --git a/src/java.desktop/share/native/libfreetype/src/autofit/afloader.c b/src/java.desktop/share/native/libfreetype/src/autofit/afloader.c
    index af1d59a6896..4e2ac1f1ce3 100644
    --- a/src/java.desktop/share/native/libfreetype/src/autofit/afloader.c
    +++ b/src/java.desktop/share/native/libfreetype/src/autofit/afloader.c
    @@ -4,7 +4,7 @@
      *
      *   Auto-fitter glyph loading routines (body).
      *
    - * Copyright (C) 2003-2024 by
    + * Copyright (C) 2003-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -524,16 +524,18 @@
     
           bbox.xMin = FT_PIX_FLOOR( bbox.xMin );
           bbox.yMin = FT_PIX_FLOOR( bbox.yMin );
    -      bbox.xMax = FT_PIX_CEIL(  bbox.xMax );
    -      bbox.yMax = FT_PIX_CEIL(  bbox.yMax );
    +      bbox.xMax = FT_PIX_CEIL_LONG( bbox.xMax );
    +      bbox.yMax = FT_PIX_CEIL_LONG( bbox.yMax );
     
    -      slot->metrics.width        = bbox.xMax - bbox.xMin;
    -      slot->metrics.height       = bbox.yMax - bbox.yMin;
    +      slot->metrics.width        = SUB_LONG( bbox.xMax, bbox.xMin );
    +      slot->metrics.height       = SUB_LONG( bbox.yMax, bbox.yMin );
           slot->metrics.horiBearingX = bbox.xMin;
           slot->metrics.horiBearingY = bbox.yMax;
     
    -      slot->metrics.vertBearingX = FT_PIX_FLOOR( bbox.xMin + vvector.x );
    -      slot->metrics.vertBearingY = FT_PIX_FLOOR( bbox.yMax + vvector.y );
    +      slot->metrics.vertBearingX = FT_PIX_FLOOR( ADD_LONG( bbox.xMin,
    +                                                           vvector.x ) );
    +      slot->metrics.vertBearingY = FT_PIX_FLOOR( ADD_LONG( bbox.yMax,
    +                                                           vvector.y ) );
     
           /* for mono-width fonts (like Andale, Courier, etc.) we need */
           /* to keep the original rounded advance width; ditto for     */
    diff --git a/src/java.desktop/share/native/libfreetype/src/autofit/afloader.h b/src/java.desktop/share/native/libfreetype/src/autofit/afloader.h
    index 99f0e15f92b..a04b4df0b3b 100644
    --- a/src/java.desktop/share/native/libfreetype/src/autofit/afloader.h
    +++ b/src/java.desktop/share/native/libfreetype/src/autofit/afloader.h
    @@ -4,7 +4,7 @@
      *
      *   Auto-fitter glyph loading routines (specification).
      *
    - * Copyright (C) 2003-2024 by
    + * Copyright (C) 2003-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/autofit/afmodule.c b/src/java.desktop/share/native/libfreetype/src/autofit/afmodule.c
    index 726f6ca2b78..22d85a889e8 100644
    --- a/src/java.desktop/share/native/libfreetype/src/autofit/afmodule.c
    +++ b/src/java.desktop/share/native/libfreetype/src/autofit/afmodule.c
    @@ -4,7 +4,7 @@
      *
      *   Auto-fitter module implementation (body).
      *
    - * Copyright (C) 2003-2024 by
    + * Copyright (C) 2003-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -146,7 +146,7 @@
     
           if ( !af_style_classes[ss] )
           {
    -        FT_TRACE2(( "af_property_set: Invalid value %d for property `%s'\n",
    +        FT_TRACE2(( "af_property_set: Invalid value %u for property `%s'\n",
                         *fallback_script, property_name ));
             return FT_THROW( Invalid_Argument );
           }
    @@ -412,6 +412,11 @@
         module->darken_params[6]  = CFF_CONFIG_OPTION_DARKENING_PARAMETER_X4;
         module->darken_params[7]  = CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y4;
     
    +#if defined( FT_CONFIG_OPTION_USE_HARFBUZZ )         && \
    +    defined( FT_CONFIG_OPTION_USE_HARFBUZZ_DYNAMIC )
    +    ft_hb_funcs_init( module );
    +#endif
    +
         return FT_Err_Ok;
       }
     
    @@ -421,6 +426,11 @@
       {
         FT_UNUSED( ft_module );
     
    +#if defined( FT_CONFIG_OPTION_USE_HARFBUZZ )         && \
    +    defined( FT_CONFIG_OPTION_USE_HARFBUZZ_DYNAMIC )
    +    ft_hb_funcs_done( (AF_Module)ft_module );
    +#endif
    +
     #ifdef FT_DEBUG_AUTOFIT
         if ( af_debug_hints_rec_->memory )
           af_glyph_hints_done( af_debug_hints_rec_ );
    diff --git a/src/java.desktop/share/native/libfreetype/src/autofit/afmodule.h b/src/java.desktop/share/native/libfreetype/src/autofit/afmodule.h
    index 91a1abfef1f..c62421ef696 100644
    --- a/src/java.desktop/share/native/libfreetype/src/autofit/afmodule.h
    +++ b/src/java.desktop/share/native/libfreetype/src/autofit/afmodule.h
    @@ -4,7 +4,7 @@
      *
      *   Auto-fitter module implementation (specification).
      *
    - * Copyright (C) 2003-2024 by
    + * Copyright (C) 2003-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -22,6 +22,7 @@
     #include 
     #include 
     
    +#include "ft-hb.h"
     
     FT_BEGIN_HEADER
     
    @@ -40,6 +41,11 @@ FT_BEGIN_HEADER
         FT_Bool       no_stem_darkening;
         FT_Int        darken_params[8];
     
    +#if defined( FT_CONFIG_OPTION_USE_HARFBUZZ )         && \
    +    defined( FT_CONFIG_OPTION_USE_HARFBUZZ_DYNAMIC )
    +    ft_hb_funcs_t*  hb_funcs;
    +#endif
    +
       } AF_ModuleRec, *AF_Module;
     
     
    diff --git a/src/java.desktop/share/native/libfreetype/src/autofit/afranges.c b/src/java.desktop/share/native/libfreetype/src/autofit/afranges.c
    index 007b4328189..fd54948f3a5 100644
    --- a/src/java.desktop/share/native/libfreetype/src/autofit/afranges.c
    +++ b/src/java.desktop/share/native/libfreetype/src/autofit/afranges.c
    @@ -4,7 +4,7 @@
      *
      *   Auto-fitter Unicode script ranges (body).
      *
    - * Copyright (C) 2013-2024 by
    + * Copyright (C) 2013-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -73,9 +73,11 @@
       {
         AF_UNIRANGE_REC(  0x0600,  0x06FF ),  /* Arabic                                 */
         AF_UNIRANGE_REC(  0x0750,  0x07FF ),  /* Arabic Supplement                      */
    +    AF_UNIRANGE_REC(  0x0870,  0x089F ),  /* Arabic Extended-B                      */
         AF_UNIRANGE_REC(  0x08A0,  0x08FF ),  /* Arabic Extended-A                      */
         AF_UNIRANGE_REC(  0xFB50,  0xFDFF ),  /* Arabic Presentation Forms-A            */
         AF_UNIRANGE_REC(  0xFE70,  0xFEFF ),  /* Arabic Presentation Forms-B            */
    +    AF_UNIRANGE_REC( 0x10EC0, 0x10EFF ),  /* Arabic Extended-C                      */
         AF_UNIRANGE_REC( 0x1EE00, 0x1EEFF ),  /* Arabic Mathematical Alphabetic Symbols */
         AF_UNIRANGE_REC(       0,       0 )
       };
    @@ -90,8 +92,9 @@
         AF_UNIRANGE_REC(  0x06DF,  0x06E4 ),
         AF_UNIRANGE_REC(  0x06E7,  0x06E8 ),
         AF_UNIRANGE_REC(  0x06EA,  0x06ED ),
    -    AF_UNIRANGE_REC(  0x08D4,  0x08E1 ),
    -    AF_UNIRANGE_REC(  0x08D3,  0x08FF ),
    +    AF_UNIRANGE_REC(  0x0897,  0x089F ),
    +    AF_UNIRANGE_REC(  0x08CA,  0x08E1 ),
    +    AF_UNIRANGE_REC(  0x08E3,  0x08FF ),
         AF_UNIRANGE_REC(  0xFBB2,  0xFBC1 ),
         AF_UNIRANGE_REC(  0xFE70,  0xFE70 ),
         AF_UNIRANGE_REC(  0xFE72,  0xFE72 ),
    @@ -101,6 +104,7 @@
         AF_UNIRANGE_REC(  0xFE7A,  0xFE7A ),
         AF_UNIRANGE_REC(  0xFE7C,  0xFE7C ),
         AF_UNIRANGE_REC(  0xFE7E,  0xFE7E ),
    +    AF_UNIRANGE_REC( 0x10EFD, 0x10EFF ),
         AF_UNIRANGE_REC(       0,       0 )
       };
     
    @@ -198,8 +202,9 @@
     
       const AF_Script_UniRangeRec  af_cans_uniranges[] =
       {
    -    AF_UNIRANGE_REC(  0x1400,  0x167F ), /* Unified Canadian Aboriginal Syllabics          */
    -    AF_UNIRANGE_REC(  0x18B0,  0x18FF ), /* Unified Canadian Aboriginal Syllabics Extended */
    +    AF_UNIRANGE_REC(  0x1400,  0x167F ), /* Unified Canadian Aboriginal Syllabics            */
    +    AF_UNIRANGE_REC(  0x18B0,  0x18FF ), /* Unified Canadian Aboriginal Syllabics Extended   */
    +    AF_UNIRANGE_REC( 0x11AB0, 0x11ABF ), /* Unified Canadian Aboriginal Syllabics Extended-A */
         AF_UNIRANGE_REC(       0,       0 )
       };
     
    @@ -259,6 +264,9 @@
       };
     
     
    +  /* TODO: Split off data for new 'cyrb' (subscript) and 'cyrp'     */
    +  /*       (superscript) groups (mainly from the Extended-D block), */
    +  /*       in analogy to 'latb' and 'latp'?                         */
       const AF_Script_UniRangeRec  af_cyrl_uniranges[] =
       {
         AF_UNIRANGE_REC(  0x0400,  0x04FF ),  /* Cyrillic            */
    @@ -266,6 +274,7 @@
         AF_UNIRANGE_REC(  0x2DE0,  0x2DFF ),  /* Cyrillic Extended-A */
         AF_UNIRANGE_REC(  0xA640,  0xA69F ),  /* Cyrillic Extended-B */
         AF_UNIRANGE_REC(  0x1C80,  0x1C8F ),  /* Cyrillic Extended-C */
    +    AF_UNIRANGE_REC( 0x1E030, 0x1E08F ),  /* Cyrillic Extended-D */
         AF_UNIRANGE_REC(       0,       0 )
       };
     
    @@ -285,15 +294,16 @@
     
       const AF_Script_UniRangeRec  af_deva_uniranges[] =
       {
    -    AF_UNIRANGE_REC(  0x0900,  0x093B ),  /* Devanagari          */
    +    AF_UNIRANGE_REC(  0x0900,  0x093B ),  /* Devanagari            */
         /* omitting U+093C nukta */
    -    AF_UNIRANGE_REC(  0x093D,  0x0950 ),  /* ... continued       */
    +    AF_UNIRANGE_REC(  0x093D,  0x0950 ),  /* ... continued         */
         /* omitting U+0951 udatta, U+0952 anudatta */
    -    AF_UNIRANGE_REC(  0x0953,  0x0963 ),  /* ... continued       */
    +    AF_UNIRANGE_REC(  0x0953,  0x0963 ),  /* ... continued         */
         /* omitting U+0964 danda, U+0965 double danda */
    -    AF_UNIRANGE_REC(  0x0966,  0x097F ),  /* ... continued       */
    -    AF_UNIRANGE_REC(  0x20B9,  0x20B9 ),  /* (new) Rupee sign    */
    -    AF_UNIRANGE_REC(  0xA8E0,  0xA8FF ),  /* Devanagari Extended */
    +    AF_UNIRANGE_REC(  0x0966,  0x097F ),  /* ... continued         */
    +    AF_UNIRANGE_REC(  0x20B9,  0x20B9 ),  /* (new) Rupee sign      */
    +    AF_UNIRANGE_REC(  0xA8E0,  0xA8FF ),  /* Devanagari Extended   */
    +    AF_UNIRANGE_REC( 0x11B00, 0x11B5F ),  /* Devanagari Extended-A */
         AF_UNIRANGE_REC(       0,       0 )
       };
     
    @@ -329,6 +339,7 @@
         AF_UNIRANGE_REC(  0x1380,  0x139F ),  /* Ethiopic Supplement */
         AF_UNIRANGE_REC(  0x2D80,  0x2DDF ),  /* Ethiopic Extended   */
         AF_UNIRANGE_REC(  0xAB00,  0xAB2F ),  /* Ethiopic Extended-A */
    +    AF_UNIRANGE_REC( 0x1E7E0, 0x1E7FF ),  /* Ethiopic Extended-B */
         AF_UNIRANGE_REC(       0,       0 )
       };
     
    @@ -534,7 +545,7 @@
       {
         AF_UNIRANGE_REC(  0x0EB1,  0x0EB1 ),
         AF_UNIRANGE_REC(  0x0EB4,  0x0EBC ),
    -    AF_UNIRANGE_REC(  0x0EC8,  0x0ECD ),
    +    AF_UNIRANGE_REC(  0x0EC8,  0x0ECE ),
         AF_UNIRANGE_REC(       0,       0 )
       };
     
    @@ -567,12 +578,15 @@
         AF_UNIRANGE_REC(  0x2C7E,  0x2C7F ),  /* ... continued                          */
         AF_UNIRANGE_REC(  0x2E00,  0x2E7F ),  /* Supplemental Punctuation               */
         AF_UNIRANGE_REC(  0xA720,  0xA76F ),  /* Latin Extended-D                       */
    -    AF_UNIRANGE_REC(  0xA771,  0xA7F7 ),  /* ... continued                          */
    +    AF_UNIRANGE_REC(  0xA771,  0xA7F0 ),  /* ... continued                          */
    +    AF_UNIRANGE_REC(  0xA7F2,  0xA7F7 ),  /* ... continued                          */
         AF_UNIRANGE_REC(  0xA7FA,  0xA7FF ),  /* ... continued                          */
         AF_UNIRANGE_REC(  0xAB30,  0xAB5B ),  /* Latin Extended-E                       */
    -    AF_UNIRANGE_REC(  0xAB60,  0xAB6F ),  /* ... continued                          */
    +    AF_UNIRANGE_REC(  0xAB60,  0xAB68 ),  /* ... continued                          */
    +    AF_UNIRANGE_REC(  0xAB6A,  0xAB6F ),  /* ... continued                          */
         AF_UNIRANGE_REC(  0xFB00,  0xFB06 ),  /* Alphab. Present. Forms (Latin Ligs)    */
         AF_UNIRANGE_REC( 0x1D400, 0x1D7FF ),  /* Mathematical Alphanumeric Symbols      */
    +    AF_UNIRANGE_REC( 0x1DF00, 0x1DFFF ),  /* Latin Extended-G                       */
         AF_UNIRANGE_REC(       0,       0 )
       };
     
    @@ -588,7 +602,7 @@
         AF_UNIRANGE_REC(  0x02B9,  0x02DF ),
         AF_UNIRANGE_REC(  0x02E5,  0x02FF ),
         AF_UNIRANGE_REC(  0x0300,  0x036F ),
    -    AF_UNIRANGE_REC(  0x1AB0,  0x1ABE ),
    +    AF_UNIRANGE_REC(  0x1AB0,  0x1AEB ),
         AF_UNIRANGE_REC(  0x1DC0,  0x1DFF ),
         AF_UNIRANGE_REC(  0x2017,  0x2017 ),
         AF_UNIRANGE_REC(  0x203E,  0x203E ),
    @@ -625,8 +639,11 @@
         AF_UNIRANGE_REC(  0x2070,  0x207F ),  /* superscript digits and letters      */
         AF_UNIRANGE_REC(  0x2C7D,  0x2C7D ),  /* modifier letter capital v           */
         AF_UNIRANGE_REC(  0xA770,  0xA770 ),  /* modifier letter us                  */
    +    AF_UNIRANGE_REC(  0xA7F1,  0xA7F1 ),  /* modifier letter capital s           */
         AF_UNIRANGE_REC(  0xA7F8,  0xA7F9 ),  /* more modifier letters               */
         AF_UNIRANGE_REC(  0xAB5C,  0xAB5F ),  /* more modifier letters               */
    +    AF_UNIRANGE_REC(  0xAB69,  0xAB69 ),  /* modifier letter small turned w      */
    +    AF_UNIRANGE_REC( 0x10780, 0x107FB ),  /* Latin Extended-F                    */
         AF_UNIRANGE_REC(       0,       0 )
       };
     
    @@ -638,7 +655,8 @@
     
       const AF_Script_UniRangeRec  af_lisu_uniranges[] =
       {
    -    AF_UNIRANGE_REC(  0xA4D0,  0xA4FF ),    /* Lisu */
    +    AF_UNIRANGE_REC(  0xA4D0,  0xA4FF ),    /* Lisu            */
    +    AF_UNIRANGE_REC( 0x11FB0, 0x11FBF ),    /* Lisu Supplement */
         AF_UNIRANGE_REC(       0,       0 )
       };
     
    @@ -696,6 +714,7 @@
         AF_UNIRANGE_REC(  0x1000,  0x109F ),    /* Myanmar            */
         AF_UNIRANGE_REC(  0xA9E0,  0xA9FF ),    /* Myanmar Extended-B */
         AF_UNIRANGE_REC(  0xAA60,  0xAA7F ),    /* Myanmar Extended-A */
    +    AF_UNIRANGE_REC( 0x116D0, 0x116FF ),    /* Myanmar Extended-C */
         AF_UNIRANGE_REC(       0,       0 )
       };
     
    @@ -836,6 +855,7 @@
     
       const AF_Script_UniRangeRec  af_sinh_nonbase_uniranges[] =
       {
    +    AF_UNIRANGE_REC(  0x0D81,  0x0D81 ),
         AF_UNIRANGE_REC(  0x0DCA,  0x0DCA ),
         AF_UNIRANGE_REC(  0x0DD2,  0x0DD6 ),
         AF_UNIRANGE_REC(       0,       0 )
    @@ -859,7 +879,8 @@
     
       const AF_Script_UniRangeRec  af_taml_uniranges[] =
       {
    -    AF_UNIRANGE_REC(  0x0B80,  0x0BFF ),  /* Tamil */
    +    AF_UNIRANGE_REC(  0x0B80,  0x0BFF ),  /* Tamil            */
    +    AF_UNIRANGE_REC( 0x11FC0, 0x11FFF ),  /* Tamil Supplement */
         AF_UNIRANGE_REC(       0,       0 )
       };
     
    @@ -899,6 +920,7 @@
       {
         AF_UNIRANGE_REC(  0x0C00,  0x0C00 ),
         AF_UNIRANGE_REC(  0x0C04,  0x0C04 ),
    +    AF_UNIRANGE_REC(  0x0C3C,  0x0C3C ),
         AF_UNIRANGE_REC(  0x0C3E,  0x0C40 ),
         AF_UNIRANGE_REC(  0x0C46,  0x0C56 ),
         AF_UNIRANGE_REC(  0x0C62,  0x0C63 ),
    @@ -992,6 +1014,7 @@
         AF_UNIRANGE_REC(  0xA806,  0xA806 ),
         AF_UNIRANGE_REC(  0xA80B,  0xA80B ),
         AF_UNIRANGE_REC(  0xA825,  0xA826 ),
    +    AF_UNIRANGE_REC(  0xA82C,  0xA82C ),
         AF_UNIRANGE_REC(       0,       0 )
       };
     
    @@ -1048,15 +1071,21 @@
         AF_UNIRANGE_REC(  0xFE10,  0xFE1F ),  /* Vertical forms                          */
         AF_UNIRANGE_REC(  0xFE30,  0xFE4F ),  /* CJK Compatibility Forms                 */
         AF_UNIRANGE_REC(  0xFF00,  0xFFEF ),  /* Halfwidth and Fullwidth Forms           */
    +    AF_UNIRANGE_REC( 0x1AFF0, 0x1AFFF ),  /* Kana Extended-B                         */
         AF_UNIRANGE_REC( 0x1B000, 0x1B0FF ),  /* Kana Supplement                         */
         AF_UNIRANGE_REC( 0x1B100, 0x1B12F ),  /* Kana Extended-A                         */
    +    AF_UNIRANGE_REC( 0x1B130, 0x1B16F ),  /* Small Kana Extension                    */
         AF_UNIRANGE_REC( 0x1D300, 0x1D35F ),  /* Tai Xuan Hing Symbols                   */
         AF_UNIRANGE_REC( 0x20000, 0x2A6DF ),  /* CJK Unified Ideographs Extension B      */
         AF_UNIRANGE_REC( 0x2A700, 0x2B73F ),  /* CJK Unified Ideographs Extension C      */
         AF_UNIRANGE_REC( 0x2B740, 0x2B81F ),  /* CJK Unified Ideographs Extension D      */
         AF_UNIRANGE_REC( 0x2B820, 0x2CEAF ),  /* CJK Unified Ideographs Extension E      */
         AF_UNIRANGE_REC( 0x2CEB0, 0x2EBEF ),  /* CJK Unified Ideographs Extension F      */
    +    AF_UNIRANGE_REC( 0x2EBF0, 0x2EE5D ),  /* CJK Unified Ideographs Extension I      */
         AF_UNIRANGE_REC( 0x2F800, 0x2FA1F ),  /* CJK Compatibility Ideographs Supplement */
    +    AF_UNIRANGE_REC( 0x30000, 0x3134A ),  /* CJK Unified Ideographs Extension G      */
    +    AF_UNIRANGE_REC( 0x31350, 0x323AF ),  /* CJK Unified Ideographs Extension H      */
    +    AF_UNIRANGE_REC( 0x323B0, 0x33479 ),  /* CJK Unified Ideographs Extension J      */
         AF_UNIRANGE_REC(       0,       0 )
       };
     
    diff --git a/src/java.desktop/share/native/libfreetype/src/autofit/afranges.h b/src/java.desktop/share/native/libfreetype/src/autofit/afranges.h
    index 813b3ee78ef..fa00eb75046 100644
    --- a/src/java.desktop/share/native/libfreetype/src/autofit/afranges.h
    +++ b/src/java.desktop/share/native/libfreetype/src/autofit/afranges.h
    @@ -4,7 +4,7 @@
      *
      *   Auto-fitter Unicode script ranges (specification).
      *
    - * Copyright (C) 2013-2024 by
    + * Copyright (C) 2013-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/autofit/afscript.h b/src/java.desktop/share/native/libfreetype/src/autofit/afscript.h
    index 0a83d771501..5c4cbbcb922 100644
    --- a/src/java.desktop/share/native/libfreetype/src/autofit/afscript.h
    +++ b/src/java.desktop/share/native/libfreetype/src/autofit/afscript.h
    @@ -4,7 +4,7 @@
      *
      *   Auto-fitter scripts (specification only).
      *
    - * Copyright (C) 2013-2024 by
    + * Copyright (C) 2013-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/autofit/afshaper.c b/src/java.desktop/share/native/libfreetype/src/autofit/afshaper.c
    index df0f46ada89..f3c0744fd9d 100644
    --- a/src/java.desktop/share/native/libfreetype/src/autofit/afshaper.c
    +++ b/src/java.desktop/share/native/libfreetype/src/autofit/afshaper.c
    @@ -4,7 +4,7 @@
      *
      *   HarfBuzz interface for accessing OpenType features (body).
      *
    - * Copyright (C) 2013-2024 by
    + * Copyright (C) 2013-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -22,8 +22,8 @@
     #include "aftypes.h"
     #include "afshaper.h"
     
    -#ifdef FT_CONFIG_OPTION_USE_HARFBUZZ
     
    +#ifdef FT_CONFIG_OPTION_USE_HARFBUZZ
     
       /**************************************************************************
        *
    @@ -89,17 +89,18 @@
     #define SCRIPT( s, S, d, h, H, ss )  h,
     
     
    -  static const hb_script_t  scripts[] =
    +  FT_LOCAL_ARRAY_DEF( hb_script_t )
    +  af_hb_scripts[] =
       {
     #include "afscript.h"
       };
     
     
    -  FT_Error
    -  af_shaper_get_coverage( AF_FaceGlobals  globals,
    -                          AF_StyleClass   style_class,
    -                          FT_UShort*      gstyles,
    -                          FT_Bool         default_script )
    +  static FT_Error
    +  af_shaper_get_coverage_hb( AF_FaceGlobals  globals,
    +                             AF_StyleClass   style_class,
    +                             FT_UShort*      gstyles,
    +                             FT_Bool         default_script )
       {
         hb_face_t*  face;
     
    @@ -124,10 +125,10 @@
         if ( !globals || !style_class || !gstyles )
           return FT_THROW( Invalid_Argument );
     
    -    face = hb_font_get_face( globals->hb_font );
    +    face = hb( font_get_face )( globals->hb_font );
     
         coverage_tags = coverages[style_class->coverage];
    -    script        = scripts[style_class->script];
    +    script        = af_hb_scripts[style_class->script];
     
         /* Convert a HarfBuzz script tag into the corresponding OpenType */
         /* tag or tags -- some Indic scripts like Devanagari have an old */
    @@ -137,19 +138,19 @@
           hb_tag_t      tags[3];
     
     
    -      hb_ot_tags_from_script_and_language( script,
    -                                           HB_LANGUAGE_INVALID,
    -                                           &tags_count,
    -                                           tags,
    -                                           NULL,
    -                                           NULL );
    +      hb( ot_tags_from_script_and_language )( script,
    +                                              HB_LANGUAGE_INVALID,
    +                                              &tags_count,
    +                                              tags,
    +                                              NULL,
    +                                              NULL );
           script_tags[0] = tags_count > 0 ? tags[0] : HB_TAG_NONE;
           script_tags[1] = tags_count > 1 ? tags[1] : HB_TAG_NONE;
           script_tags[2] = tags_count > 2 ? tags[2] : HB_TAG_NONE;
         }
     
    -    /* If the second tag is HB_OT_TAG_DEFAULT_SCRIPT, change that to     */
    -    /* HB_TAG_NONE except for the default script.                        */
    +    /* If the second tag is HB_OT_TAG_DEFAULT_SCRIPT, change that to */
    +    /* HB_TAG_NONE except for the default script.                    */
         if ( default_script )
         {
           if ( script_tags[0] == HB_TAG_NONE )
    @@ -170,15 +171,15 @@
             goto Exit;
         }
     
    -    gsub_lookups = hb_set_create();
    -    hb_ot_layout_collect_lookups( face,
    -                                  HB_OT_TAG_GSUB,
    -                                  script_tags,
    -                                  NULL,
    -                                  coverage_tags,
    -                                  gsub_lookups );
    +    gsub_lookups = hb( set_create )();
    +    hb( ot_layout_collect_lookups )( face,
    +                                     HB_OT_TAG_GSUB,
    +                                     script_tags,
    +                                     NULL,
    +                                     coverage_tags,
    +                                     gsub_lookups );
     
    -    if ( hb_set_is_empty( gsub_lookups ) )
    +    if ( hb( set_is_empty )( gsub_lookups ) )
           goto Exit; /* nothing to do */
     
         FT_TRACE4(( "GSUB lookups (style `%s'):\n",
    @@ -189,22 +190,22 @@
         count = 0;
     #endif
     
    -    gsub_glyphs = hb_set_create();
    -    for ( idx = HB_SET_VALUE_INVALID; hb_set_next( gsub_lookups, &idx ); )
    +    gsub_glyphs = hb( set_create )();
    +    for ( idx = HB_SET_VALUE_INVALID; hb( set_next )( gsub_lookups, &idx ); )
         {
     #ifdef FT_DEBUG_LEVEL_TRACE
    -      FT_TRACE4(( " %d", idx ));
    +      FT_TRACE4(( " %u", idx ));
           count++;
     #endif
     
           /* get output coverage of GSUB feature */
    -      hb_ot_layout_lookup_collect_glyphs( face,
    -                                          HB_OT_TAG_GSUB,
    -                                          idx,
    -                                          NULL,
    -                                          NULL,
    -                                          NULL,
    -                                          gsub_glyphs );
    +      hb( ot_layout_lookup_collect_glyphs )( face,
    +                                             HB_OT_TAG_GSUB,
    +                                             idx,
    +                                             NULL,
    +                                             NULL,
    +                                             NULL,
    +                                             gsub_glyphs );
         }
     
     #ifdef FT_DEBUG_LEVEL_TRACE
    @@ -218,34 +219,34 @@
                     af_style_names[style_class->style] ));
         FT_TRACE4(( " " ));
     
    -    gpos_lookups = hb_set_create();
    -    hb_ot_layout_collect_lookups( face,
    -                                  HB_OT_TAG_GPOS,
    -                                  script_tags,
    -                                  NULL,
    -                                  coverage_tags,
    -                                  gpos_lookups );
    +    gpos_lookups = hb( set_create )();
    +    hb( ot_layout_collect_lookups )( face,
    +                                     HB_OT_TAG_GPOS,
    +                                     script_tags,
    +                                     NULL,
    +                                     coverage_tags,
    +                                     gpos_lookups );
     
     #ifdef FT_DEBUG_LEVEL_TRACE
         count = 0;
     #endif
     
    -    gpos_glyphs = hb_set_create();
    -    for ( idx = HB_SET_VALUE_INVALID; hb_set_next( gpos_lookups, &idx ); )
    +    gpos_glyphs = hb( set_create )();
    +    for ( idx = HB_SET_VALUE_INVALID; hb( set_next )( gpos_lookups, &idx ); )
         {
     #ifdef FT_DEBUG_LEVEL_TRACE
    -      FT_TRACE4(( " %d", idx ));
    +      FT_TRACE4(( " %u", idx ));
           count++;
     #endif
     
           /* get input coverage of GPOS feature */
    -      hb_ot_layout_lookup_collect_glyphs( face,
    -                                          HB_OT_TAG_GPOS,
    -                                          idx,
    -                                          NULL,
    -                                          gpos_glyphs,
    -                                          NULL,
    -                                          NULL );
    +      hb( ot_layout_lookup_collect_glyphs )( face,
    +                                             HB_OT_TAG_GPOS,
    +                                             idx,
    +                                             NULL,
    +                                             gpos_glyphs,
    +                                             NULL,
    +                                             NULL );
         }
     
     #ifdef FT_DEBUG_LEVEL_TRACE
    @@ -281,14 +282,14 @@
     
               GET_UTF8_CHAR( ch, p );
     
    -          for ( idx = HB_SET_VALUE_INVALID; hb_set_next( gsub_lookups,
    -                                                         &idx ); )
    +          for ( idx = HB_SET_VALUE_INVALID; hb( set_next )( gsub_lookups,
    +                                                            &idx ); )
               {
                 hb_codepoint_t  gidx = FT_Get_Char_Index( globals->face, ch );
     
     
    -            if ( hb_ot_layout_lookup_would_substitute( face, idx,
    -                                                       &gidx, 1, 1 ) )
    +            if ( hb( ot_layout_lookup_would_substitute )( face, idx,
    +                                                          &gidx, 1, 1 ) )
                 {
                   found = 1;
                   break;
    @@ -352,14 +353,14 @@
          *
          */
         if ( style_class->coverage != AF_COVERAGE_DEFAULT )
    -      hb_set_subtract( gsub_glyphs, gpos_glyphs );
    +      hb( set_subtract )( gsub_glyphs, gpos_glyphs );
     
     #ifdef FT_DEBUG_LEVEL_TRACE
         FT_TRACE4(( "  glyphs without GPOS data (`*' means already assigned)" ));
         count = 0;
     #endif
     
    -    for ( idx = HB_SET_VALUE_INVALID; hb_set_next( gsub_glyphs, &idx ); )
    +    for ( idx = HB_SET_VALUE_INVALID; hb( set_next )( gsub_glyphs, &idx ); )
         {
     #ifdef FT_DEBUG_LEVEL_TRACE
           if ( !( count % 10 ) )
    @@ -368,7 +369,7 @@
             FT_TRACE4(( "   " ));
           }
     
    -      FT_TRACE4(( " %d", idx ));
    +      FT_TRACE4(( " %u", idx ));
           count++;
     #endif
     
    @@ -397,10 +398,10 @@
     #endif
     
       Exit:
    -    hb_set_destroy( gsub_lookups );
    -    hb_set_destroy( gsub_glyphs  );
    -    hb_set_destroy( gpos_lookups );
    -    hb_set_destroy( gpos_glyphs  );
    +    hb( set_destroy )( gsub_lookups );
    +    hb( set_destroy )( gsub_glyphs  );
    +    hb( set_destroy )( gpos_lookups );
    +    hb( set_destroy )( gpos_glyphs  );
     
         return FT_Err_Ok;
       }
    @@ -437,31 +438,33 @@
       };
     
     
    -  void*
    -  af_shaper_buf_create( FT_Face  face )
    +  static void*
    +  af_shaper_buf_create_hb( AF_FaceGlobals  globals )
       {
    -    FT_UNUSED( face );
    +    FT_UNUSED( globals );
     
    -    return (void*)hb_buffer_create();
    +    return (void*)hb( buffer_create )();
       }
     
     
    -  void
    -  af_shaper_buf_destroy( FT_Face  face,
    -                         void*    buf )
    +  static void
    +  af_shaper_buf_destroy_hb( AF_FaceGlobals  globals,
    +                            void*           buf )
       {
    -    FT_UNUSED( face );
    +    FT_UNUSED( globals );
     
    -    hb_buffer_destroy( (hb_buffer_t*)buf );
    +    hb( buffer_destroy )( (hb_buffer_t*)buf );
       }
     
     
    -  const char*
    -  af_shaper_get_cluster( const char*      p,
    -                         AF_StyleMetrics  metrics,
    -                         void*            buf_,
    -                         unsigned int*    count )
    +  static const char*
    +  af_shaper_get_cluster_hb( const char*      p,
    +                            AF_StyleMetrics  metrics,
    +                            void*            buf_,
    +                            unsigned int*    count )
       {
    +    AF_FaceGlobals  globals = metrics->globals;
    +
         AF_StyleClass        style_class;
         const hb_feature_t*  feature;
         FT_Int               upem;
    @@ -472,6 +475,8 @@
         hb_font_t*      font;
         hb_codepoint_t  dummy;
     
    +    FT_UNUSED( globals );
    +
     
         upem        = (FT_Int)metrics->globals->face->units_per_EM;
         style_class = metrics->style_class;
    @@ -480,7 +485,7 @@
         font = metrics->globals->hb_font;
     
         /* we shape at a size of units per EM; this means font units */
    -    hb_font_set_scale( font, upem, upem );
    +    hb( font_set_scale )( font, upem, upem );
     
         while ( *p == ' ' )
           p++;
    @@ -492,15 +497,15 @@
         len = (int)( q - p );
     
         /* feed character(s) to the HarfBuzz buffer */
    -    hb_buffer_clear_contents( buf );
    -    hb_buffer_add_utf8( buf, p, len, 0, len );
    +    hb( buffer_clear_contents )( buf );
    +    hb( buffer_add_utf8 )( buf, p, len, 0, len );
     
         /* we let HarfBuzz guess the script and writing direction */
    -    hb_buffer_guess_segment_properties( buf );
    +    hb( buffer_guess_segment_properties )( buf );
     
         /* shape buffer, which means conversion from character codes to */
         /* glyph indices, possibly applying a feature                   */
    -    hb_shape( font, buf, feature, feature ? 1 : 0 );
    +    hb( shape )( font, buf, feature, feature ? 1 : 0 );
     
         if ( feature )
         {
    @@ -517,13 +522,13 @@
           /* glyph indices; otherwise the affected glyph or glyphs aren't     */
           /* available at all in the feature                                  */
     
    -      hb_buffer_clear_contents( hb_buf );
    -      hb_buffer_add_utf8( hb_buf, p, len, 0, len );
    -      hb_buffer_guess_segment_properties( hb_buf );
    -      hb_shape( font, hb_buf, NULL, 0 );
    +      hb( buffer_clear_contents )( hb_buf );
    +      hb( buffer_add_utf8 )( hb_buf, p, len, 0, len );
    +      hb( buffer_guess_segment_properties )( hb_buf );
    +      hb( shape )( font, hb_buf, NULL, 0 );
     
    -      ginfo    = hb_buffer_get_glyph_infos( buf, &gcount );
    -      hb_ginfo = hb_buffer_get_glyph_infos( hb_buf, &hb_gcount );
    +      ginfo    = hb( buffer_get_glyph_infos )( buf, &gcount );
    +      hb_ginfo = hb( buffer_get_glyph_infos )( hb_buf, &hb_gcount );
     
           if ( gcount == hb_gcount )
           {
    @@ -537,12 +542,12 @@
             if ( i == gcount )
             {
               /* both buffers have identical glyph indices */
    -          hb_buffer_clear_contents( buf );
    +          hb( buffer_clear_contents )( buf );
             }
           }
         }
     
    -    *count = hb_buffer_get_length( buf );
    +    *count = hb( buffer_get_length )( buf );
     
     #ifdef FT_DEBUG_LEVEL_TRACE
         if ( feature && *count > 1 )
    @@ -554,23 +559,25 @@
       }
     
     
    -  FT_ULong
    -  af_shaper_get_elem( AF_StyleMetrics  metrics,
    -                      void*            buf_,
    -                      unsigned int     idx,
    -                      FT_Long*         advance,
    -                      FT_Long*         y_offset )
    +  static FT_ULong
    +  af_shaper_get_elem_hb( AF_StyleMetrics  metrics,
    +                         void*            buf_,
    +                         unsigned int     idx,
    +                         FT_Long*         advance,
    +                         FT_Long*         y_offset )
       {
    +    AF_FaceGlobals  globals = metrics->globals;
    +
         hb_buffer_t*          buf = (hb_buffer_t*)buf_;
         hb_glyph_info_t*      ginfo;
         hb_glyph_position_t*  gpos;
         unsigned int          gcount;
     
    -    FT_UNUSED( metrics );
    +    FT_UNUSED( globals );
     
     
    -    ginfo = hb_buffer_get_glyph_infos( buf, &gcount );
    -    gpos  = hb_buffer_get_glyph_positions( buf, &gcount );
    +    ginfo = hb( buffer_get_glyph_infos )( buf, &gcount );
    +    gpos  = hb( buffer_get_glyph_positions )( buf, &gcount );
     
         if ( idx >= gcount )
           return 0;
    @@ -584,14 +591,14 @@
       }
     
     
    -#else /* !FT_CONFIG_OPTION_USE_HARFBUZZ */
    +#endif /* FT_CONFIG_OPTION_USE_HARFBUZZ */
     
     
    -  FT_Error
    -  af_shaper_get_coverage( AF_FaceGlobals  globals,
    -                          AF_StyleClass   style_class,
    -                          FT_UShort*      gstyles,
    -                          FT_Bool         default_script )
    +  static FT_Error
    +  af_shaper_get_coverage_nohb( AF_FaceGlobals  globals,
    +                               AF_StyleClass   style_class,
    +                               FT_UShort*      gstyles,
    +                               FT_Bool         default_script )
       {
         FT_UNUSED( globals );
         FT_UNUSED( style_class );
    @@ -602,29 +609,29 @@
       }
     
     
    -  void*
    -  af_shaper_buf_create( FT_Face  face )
    +  static void*
    +  af_shaper_buf_create_nohb( AF_FaceGlobals  globals )
       {
    -    FT_UNUSED( face );
    +    FT_UNUSED( globals );
     
         return NULL;
       }
     
     
    -  void
    -  af_shaper_buf_destroy( FT_Face  face,
    -                         void*    buf )
    +  static void
    +  af_shaper_buf_destroy_nohb( AF_FaceGlobals  globals,
    +                              void*    buf )
       {
    -    FT_UNUSED( face );
    +    FT_UNUSED( globals );
         FT_UNUSED( buf );
       }
     
     
    -  const char*
    -  af_shaper_get_cluster( const char*      p,
    -                         AF_StyleMetrics  metrics,
    -                         void*            buf_,
    -                         unsigned int*    count )
    +  static const char*
    +  af_shaper_get_cluster_nohb( const char*      p,
    +                              AF_StyleMetrics  metrics,
    +                              void*            buf_,
    +                              unsigned int*    count )
       {
         FT_Face    face      = metrics->globals->face;
         FT_ULong   ch, dummy = 0;
    @@ -656,12 +663,12 @@
       }
     
     
    -  FT_ULong
    -  af_shaper_get_elem( AF_StyleMetrics  metrics,
    -                      void*            buf_,
    -                      unsigned int     idx,
    -                      FT_Long*         advance,
    -                      FT_Long*         y_offset )
    +  static FT_ULong
    +  af_shaper_get_elem_nohb( AF_StyleMetrics  metrics,
    +                           void*            buf_,
    +                           unsigned int     idx,
    +                           FT_Long*         advance,
    +                           FT_Long*         y_offset )
       {
         FT_Face   face        = metrics->globals->face;
         FT_ULong  glyph_index = *(FT_ULong*)buf_;
    @@ -684,7 +691,90 @@
       }
     
     
    -#endif /* !FT_CONFIG_OPTION_USE_HARFBUZZ */
    +  /********************************************************************/
    +
    +  FT_Error
    +  af_shaper_get_coverage( AF_FaceGlobals  globals,
    +                          AF_StyleClass   style_class,
    +                          FT_UShort*      gstyles,
    +                          FT_Bool         default_script )
    +  {
    +#ifdef FT_CONFIG_OPTION_USE_HARFBUZZ
    +    if ( ft_hb_enabled( globals ) )
    +      return af_shaper_get_coverage_hb( globals,
    +                                        style_class,
    +                                        gstyles,
    +                                        default_script );
    +    else
    +#endif
    +      return af_shaper_get_coverage_nohb( globals,
    +                                          style_class,
    +                                          gstyles,
    +                                          default_script );
    +  }
    +
    +
    +  void*
    +  af_shaper_buf_create( AF_FaceGlobals  globals )
    +  {
    +#ifdef FT_CONFIG_OPTION_USE_HARFBUZZ
    +    if ( ft_hb_enabled( globals ) )
    +      return af_shaper_buf_create_hb( globals );
    +    else
    +#endif
    +      return af_shaper_buf_create_nohb( globals );
    +  }
    +
    +
    +  void
    +  af_shaper_buf_destroy( AF_FaceGlobals  globals,
    +                         void*           buf )
    +  {
    +#ifdef FT_CONFIG_OPTION_USE_HARFBUZZ
    +    if ( ft_hb_enabled( globals ) )
    +      af_shaper_buf_destroy_hb( globals, buf );
    +    else
    +#endif
    +      af_shaper_buf_destroy_nohb( globals, buf );
    +  }
    +
    +
    +  const char*
    +  af_shaper_get_cluster( const char*      p,
    +                         AF_StyleMetrics  metrics,
    +                         void*            buf_,
    +                         unsigned int*    count )
    +  {
    +#ifdef FT_CONFIG_OPTION_USE_HARFBUZZ
    +    if ( ft_hb_enabled( metrics->globals ) )
    +      return af_shaper_get_cluster_hb( p, metrics, buf_, count );
    +    else
    +#endif
    +      return af_shaper_get_cluster_nohb( p, metrics, buf_, count );
    +  }
    +
    +
    +  FT_ULong
    +  af_shaper_get_elem( AF_StyleMetrics  metrics,
    +                      void*            buf_,
    +                      unsigned int     idx,
    +                      FT_Long*         advance,
    +                      FT_Long*         y_offset )
    +  {
    +#ifdef FT_CONFIG_OPTION_USE_HARFBUZZ
    +    if ( ft_hb_enabled( metrics->globals ) )
    +      return af_shaper_get_elem_hb( metrics,
    +                                    buf_,
    +                                    idx,
    +                                    advance,
    +                                    y_offset );
    +#endif
    +      return af_shaper_get_elem_nohb( metrics,
    +                                      buf_,
    +                                      idx,
    +                                      advance,
    +                                      y_offset );
    +  }
     
     
     /* END */
    diff --git a/src/java.desktop/share/native/libfreetype/src/autofit/afshaper.h b/src/java.desktop/share/native/libfreetype/src/autofit/afshaper.h
    index 2eb03bb5d98..757368fc9c0 100644
    --- a/src/java.desktop/share/native/libfreetype/src/autofit/afshaper.h
    +++ b/src/java.desktop/share/native/libfreetype/src/autofit/afshaper.h
    @@ -4,7 +4,7 @@
      *
      *   HarfBuzz interface for accessing OpenType features (specification).
      *
    - * Copyright (C) 2013-2024 by
    + * Copyright (C) 2013-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -23,17 +23,14 @@
     #include 
     
     
    +FT_BEGIN_HEADER
    +
     #ifdef FT_CONFIG_OPTION_USE_HARFBUZZ
    -
    -#include 
    -#include 
    -#include "ft-hb.h"
    -
    +  FT_LOCAL_ARRAY( hb_script_t )
    +  af_hb_scripts[];
     #endif
     
     
    -FT_BEGIN_HEADER
    -
       FT_Error
       af_shaper_get_coverage( AF_FaceGlobals  globals,
                               AF_StyleClass   style_class,
    @@ -42,11 +39,11 @@ FT_BEGIN_HEADER
     
     
       void*
    -  af_shaper_buf_create( FT_Face  face );
    +  af_shaper_buf_create( AF_FaceGlobals  globals );
     
       void
    -  af_shaper_buf_destroy( FT_Face  face,
    -                         void*    buf );
    +  af_shaper_buf_destroy( AF_FaceGlobals  globals,
    +                         void*           buf );
     
       const char*
       af_shaper_get_cluster( const char*      p,
    diff --git a/src/java.desktop/share/native/libfreetype/src/autofit/afstyles.h b/src/java.desktop/share/native/libfreetype/src/autofit/afstyles.h
    index 7a33f37a856..206232efe25 100644
    --- a/src/java.desktop/share/native/libfreetype/src/autofit/afstyles.h
    +++ b/src/java.desktop/share/native/libfreetype/src/autofit/afstyles.h
    @@ -4,7 +4,7 @@
      *
      *   Auto-fitter styles (specification only).
      *
    - * Copyright (C) 2013-2024 by
    + * Copyright (C) 2013-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -50,36 +50,36 @@
                      AF_COVERAGE_ ## C )
     
     #undef  META_STYLE_LATIN
    -#define META_STYLE_LATIN( s, S, ds )                     \
    -          STYLE_LATIN( s, S, c2cp, C2CP, ds,             \
    +#define META_STYLE_LATIN( s, S, ds )                    \
    +          STYLE_LATIN( s, S, c2cp, C2CP, ds,            \
                            "petite capitals from capitals", \
    -                       PETITE_CAPITALS_FROM_CAPITALS )   \
    -          STYLE_LATIN( s, S, c2sc, C2SC, ds,             \
    +                       PETITE_CAPITALS_FROM_CAPITALS )  \
    +          STYLE_LATIN( s, S, c2sc, C2SC, ds,            \
                            "small capitals from capitals",  \
    -                       SMALL_CAPITALS_FROM_CAPITALS )    \
    -          STYLE_LATIN( s, S, ordn, ORDN, ds,             \
    -                       "ordinals",                       \
    -                       ORDINALS )                        \
    -          STYLE_LATIN( s, S, pcap, PCAP, ds,             \
    -                       "petite capitals",                \
    -                       PETITE_CAPITALS )                 \
    -          STYLE_LATIN( s, S, sinf, SINF, ds,             \
    -                       "scientific inferiors",           \
    -                       SCIENTIFIC_INFERIORS )            \
    -          STYLE_LATIN( s, S, smcp, SMCP, ds,             \
    -                       "small capitals",                 \
    -                       SMALL_CAPITALS )                  \
    -          STYLE_LATIN( s, S, subs, SUBS, ds,             \
    -                       "subscript",                      \
    -                       SUBSCRIPT )                       \
    -          STYLE_LATIN( s, S, sups, SUPS, ds,             \
    -                       "superscript",                    \
    -                       SUPERSCRIPT )                     \
    -          STYLE_LATIN( s, S, titl, TITL, ds,             \
    -                       "titling",                        \
    -                       TITLING )                         \
    -          STYLE_LATIN( s, S, dflt, DFLT, ds,             \
    -                       "default",                        \
    +                       SMALL_CAPITALS_FROM_CAPITALS )   \
    +          STYLE_LATIN( s, S, ordn, ORDN, ds,            \
    +                       "ordinals",                      \
    +                       ORDINALS )                       \
    +          STYLE_LATIN( s, S, pcap, PCAP, ds,            \
    +                       "petite capitals",               \
    +                       PETITE_CAPITALS )                \
    +          STYLE_LATIN( s, S, sinf, SINF, ds,            \
    +                       "scientific inferiors",          \
    +                       SCIENTIFIC_INFERIORS )           \
    +          STYLE_LATIN( s, S, smcp, SMCP, ds,            \
    +                       "small capitals",                \
    +                       SMALL_CAPITALS )                 \
    +          STYLE_LATIN( s, S, subs, SUBS, ds,            \
    +                       "subscript",                     \
    +                       SUBSCRIPT )                      \
    +          STYLE_LATIN( s, S, sups, SUPS, ds,            \
    +                       "superscript",                   \
    +                       SUPERSCRIPT )                    \
    +          STYLE_LATIN( s, S, titl, TITL, ds,            \
    +                       "titling",                       \
    +                       TITLING )                        \
    +          STYLE_LATIN( s, S, dflt, DFLT, ds,            \
    +                       "default",                       \
                            DEFAULT )
     
     
    diff --git a/src/java.desktop/share/native/libfreetype/src/autofit/aftypes.h b/src/java.desktop/share/native/libfreetype/src/autofit/aftypes.h
    index 27e4185e9f8..959640a12ec 100644
    --- a/src/java.desktop/share/native/libfreetype/src/autofit/aftypes.h
    +++ b/src/java.desktop/share/native/libfreetype/src/autofit/aftypes.h
    @@ -4,7 +4,7 @@
      *
      *   Auto-fitter types (specification only).
      *
    - * Copyright (C) 2003-2024 by
    + * Copyright (C) 2003-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -35,6 +35,7 @@
     
     #include 
     #include 
    +#include 
     #include 
     #include 
     
    @@ -406,6 +407,7 @@ extern void*  af_debug_hints_;
     
       typedef struct AF_FaceGlobalsRec_*  AF_FaceGlobals;
     
    +
       /* This is the main structure that combines everything.  Autofit modules */
       /* specific to writing systems derive their structures from it, for      */
       /* example `AF_LatinMetrics'.                                            */
    @@ -418,6 +420,8 @@ extern void*  af_debug_hints_;
     
         AF_FaceGlobals  globals;    /* to access properties */
     
    +    FT_Hash  reverse_charmap;
    +
       } AF_StyleMetricsRec;
     
     
    diff --git a/src/java.desktop/share/native/libfreetype/src/autofit/afws-decl.h b/src/java.desktop/share/native/libfreetype/src/autofit/afws-decl.h
    index b78745af74e..12fa7a27a2b 100644
    --- a/src/java.desktop/share/native/libfreetype/src/autofit/afws-decl.h
    +++ b/src/java.desktop/share/native/libfreetype/src/autofit/afws-decl.h
    @@ -4,7 +4,7 @@
      *
      *   Auto-fitter writing system declarations (specification only).
      *
    - * Copyright (C) 2013-2024 by
    + * Copyright (C) 2013-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/autofit/afws-iter.h b/src/java.desktop/share/native/libfreetype/src/autofit/afws-iter.h
    index c86d609a352..1752697b375 100644
    --- a/src/java.desktop/share/native/libfreetype/src/autofit/afws-iter.h
    +++ b/src/java.desktop/share/native/libfreetype/src/autofit/afws-iter.h
    @@ -4,7 +4,7 @@
      *
      *   Auto-fitter writing systems iterator (specification only).
      *
    - * Copyright (C) 2013-2024 by
    + * Copyright (C) 2013-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/autofit/ft-hb.c b/src/java.desktop/share/native/libfreetype/src/autofit/ft-hb.c
    new file mode 100644
    index 00000000000..3c145d04640
    --- /dev/null
    +++ b/src/java.desktop/share/native/libfreetype/src/autofit/ft-hb.c
    @@ -0,0 +1,197 @@
    +/****************************************************************************
    + *
    + * ft-hb.c
    + *
    + *   FreeType-HarfBuzz bridge (body).
    + *
    + * Copyright (C) 2025 by
    + * Behdad Esfahbod.
    + *
    + * This file is part of the FreeType project, and may only be used,
    + * modified, and distributed under the terms of the FreeType project
    + * license, LICENSE.TXT.  By continuing to use, modify, or distribute
    + * this file you indicate that you have read the license and
    + * understand and accept it fully.
    + *
    + */
    +
    +
    +#if !defined( _WIN32 ) && !defined( _GNU_SOURCE )
    +#  define _GNU_SOURCE  1  /* for RTLD_DEFAULT */
    +#endif
    +
    +#include 
    +#include 
    +
    +#include "afglobal.h"
    +
    +#include "ft-hb.h"
    +
    +
    +#if defined( FT_CONFIG_OPTION_USE_HARFBUZZ )         && \
    +    defined( FT_CONFIG_OPTION_USE_HARFBUZZ_DYNAMIC )
    +
    +#ifndef FT_LIBHARFBUZZ
    +#  ifdef _WIN32
    +#    define FT_LIBHARFBUZZ "libharfbuzz-0.dll"
    +#  else
    +#    ifdef __APPLE__
    +#      define FT_LIBHARFBUZZ "libharfbuzz.0.dylib"
    +#    else
    +#      define FT_LIBHARFBUZZ "libharfbuzz.so.0"
    +#    endif
    +#  endif
    +#endif
    +
    +#ifdef _WIN32
    +
    +#  include 
    +
    +#else /* !_WIN32 */
    +
    +#  include 
    +
    +  /* The GCC pragma suppresses the warning "ISO C forbids     */
    +  /* assignment between function pointer and 'void *'", which */
    +  /* inevitably gets emitted with `-Wpedantic`; see the man   */
    +  /* page of function `dlsym` for more information.           */
    +#  if defined( __GNUC__ )
    +#    pragma GCC diagnostic push
    +#    ifndef __cplusplus
    +#      pragma GCC diagnostic ignored "-Wpedantic"
    +#    endif
    +#  endif
    +
    +#endif /* !_WIN32 */
    +
    +
    +  FT_LOCAL_DEF( void )
    +  ft_hb_funcs_init( struct AF_ModuleRec_  *af_module )
    +  {
    +    FT_Memory  memory = af_module->root.memory;
    +    FT_Error   error;
    +
    +    ft_hb_funcs_t                *funcs           = NULL;
    +    ft_hb_version_atleast_func_t  version_atleast = NULL;
    +
    +#ifdef _WIN32
    +    HANDLE  lib;
    +#  define DLSYM( lib, name ) \
    +            (ft_ ## name ## _func_t)GetProcAddress( lib, #name )
    +#else
    +    void  *lib;
    +#  define DLSYM( lib, name ) \
    +            (ft_ ## name ## _func_t)dlsym( lib, #name )
    +#endif
    +
    +
    +    af_module->hb_funcs = NULL;
    +
    +    if ( FT_NEW( funcs ) )
    +      return;
    +    FT_ZERO( funcs );
    +
    +#ifdef _WIN32
    +
    +    lib = LoadLibraryA( FT_LIBHARFBUZZ );
    +    if ( !lib )
    +      goto Fail;
    +    version_atleast = DLSYM( lib, hb_version_atleast );
    +
    +#else /* !_WIN32 */
    +
    +#  ifdef RTLD_DEFAULT
    +#    define FT_RTLD_FLAGS RTLD_LAZY | RTLD_GLOBAL
    +    lib             = RTLD_DEFAULT;
    +    version_atleast = DLSYM( lib, hb_version_atleast );
    +#  else
    +#    define FT_RTLD_FLAGS RTLD_LAZY
    +#  endif
    +
    +    if ( !version_atleast )
    +    {
    +      /* Load the HarfBuzz library.
    +       *
    +       * We never close the library, since we opened it with RTLD_GLOBAL.
    +       * This is important for the case where we are using HarfBuzz as a
    +       * shared library, and we want to use the symbols from the library in
    +       * other shared libraries or clients.  HarfBuzz holds onto global
    +       * variables, and closing the library will cause them to be
    +       * invalidated.
    +       */
    +      lib = dlopen( FT_LIBHARFBUZZ, FT_RTLD_FLAGS );
    +      if ( !lib )
    +        goto Fail;
    +      version_atleast = DLSYM( lib, hb_version_atleast );
    +    }
    +
    +#endif /* !_WIN32 */
    +
    +    if ( !version_atleast )
    +      goto Fail;
    +
    +    /* Load all symbols we use. */
    +#define HB_EXTERN( ret, name, args )  \
    +  {                                   \
    +    funcs->name = DLSYM( lib, name ); \
    +    if ( !funcs->name )               \
    +      goto Fail;                      \
    +  }
    +#include "ft-hb-decls.h"
    +#undef HB_EXTERN
    +
    +#undef DLSYM
    +
    +    af_module->hb_funcs = funcs;
    +    return;
    +
    +  Fail:
    +    if ( funcs )
    +      FT_FREE( funcs );
    +  }
    +
    +
    +  FT_LOCAL_DEF( void )
    +  ft_hb_funcs_done( struct AF_ModuleRec_  *af_module )
    +  {
    +    FT_Memory  memory = af_module->root.memory;
    +
    +
    +    if ( af_module->hb_funcs )
    +    {
    +      FT_FREE( af_module->hb_funcs );
    +      af_module->hb_funcs = NULL;
    +    }
    +  }
    +
    +
    +  FT_LOCAL_DEF( FT_Bool )
    +  ft_hb_enabled( struct AF_FaceGlobalsRec_  *globals )
    +  {
    +    return globals->module->hb_funcs != NULL;
    +  }
    +
    +#ifndef _WIN32
    +#  if defined( __GNUC__ )
    +#    pragma GCC diagnostic pop
    +#  endif
    +#endif
    +
    +#else /* !FT_CONFIG_OPTION_USE_HARFBUZZ_DYNAMIC */
    +
    +  FT_LOCAL_DEF( FT_Bool )
    +  ft_hb_enabled( struct AF_FaceGlobalsRec_  *globals )
    +  {
    +    FT_UNUSED( globals );
    +
    +#ifdef FT_CONFIG_OPTION_USE_HARFBUZZ
    +    return TRUE;
    +#else
    +    return FALSE;
    +#endif
    +  }
    +
    +#endif /* !FT_CONFIG_OPTION_USE_HARFBUZZ_DYNAMIC */
    +
    +
    +/* END */
    diff --git a/src/java.desktop/share/native/libfreetype/src/autofit/ft-hb.h b/src/java.desktop/share/native/libfreetype/src/autofit/ft-hb.h
    new file mode 100644
    index 00000000000..95914deb8d3
    --- /dev/null
    +++ b/src/java.desktop/share/native/libfreetype/src/autofit/ft-hb.h
    @@ -0,0 +1,82 @@
    +/****************************************************************************
    + *
    + * ft-hb.h
    + *
    + *   FreeType-HarfBuzz bridge (specification).
    + *
    + * Copyright (C) 2025 by
    + * Behdad Esfahbod.
    + *
    + * This file is part of the FreeType project, and may only be used,
    + * modified, and distributed under the terms of the FreeType project
    + * license, LICENSE.TXT.  By continuing to use, modify, or distribute
    + * this file you indicate that you have read the license and
    + * understand and accept it fully.
    + *
    + */
    +
    +
    +#ifndef FT_HB_H
    +#define FT_HB_H
    +
    +#include 
    +#include 
    +
    +
    +FT_BEGIN_HEADER
    +
    +#ifdef FT_CONFIG_OPTION_USE_HARFBUZZ
    +
    +#  include "ft-hb-types.h"
    +
    +#  ifdef FT_CONFIG_OPTION_USE_HARFBUZZ_DYNAMIC
    +
    +#    define HB_EXTERN( ret, name, args ) \
    +              typedef ret (*ft_ ## name ## _func_t) args;
    +#    include "ft-hb-decls.h"
    +#    undef HB_EXTERN
    +
    +  typedef struct ft_hb_funcs_t
    +  {
    +#    define HB_EXTERN( ret, name, args ) \
    +              ft_ ## name ## _func_t  name;
    +#    include "ft-hb-decls.h"
    +#    undef HB_EXTERN
    +  } ft_hb_funcs_t;
    +
    +  struct  AF_ModuleRec_;
    +
    +  FT_LOCAL( void )
    +  ft_hb_funcs_init( struct AF_ModuleRec_  *af_module );
    +
    +  FT_LOCAL( void )
    +  ft_hb_funcs_done( struct AF_ModuleRec_  *af_module );
    +
    +#    define hb( x )  globals->module->hb_funcs->hb_ ## x
    +
    +#  else /* !FT_CONFIG_OPTION_USE_HARFBUZZ_DYNAMIC */
    +
    +#    define HB_EXTERN( ret, name, args ) \
    +              ret name args;
    +#    include "ft-hb-decls.h"
    +#    undef HB_EXTERN
    +
    +#    define hb( x )  hb_ ## x
    +
    +#  endif /* !FT_CONFIG_OPTION_USE_HARFBUZZ_DYNAMIC */
    +
    +#endif /* FT_CONFIG_OPTION_USE_HARFBUZZ */
    +
    +
    +  struct AF_FaceGlobalsRec_;
    +
    +  FT_LOCAL( FT_Bool )
    +  ft_hb_enabled( struct AF_FaceGlobalsRec_  *globals );
    +
    +
    +FT_END_HEADER
    +
    +#endif /* FT_HB_H */
    +
    +
    +/* END */
    diff --git a/src/java.desktop/share/native/libfreetype/src/base/ftadvanc.c b/src/java.desktop/share/native/libfreetype/src/base/ftadvanc.c
    index 717f7d08b35..7b965c62d58 100644
    --- a/src/java.desktop/share/native/libfreetype/src/base/ftadvanc.c
    +++ b/src/java.desktop/share/native/libfreetype/src/base/ftadvanc.c
    @@ -4,7 +4,7 @@
      *
      *   Quick computation of advance widths (body).
      *
    - * Copyright (C) 2008-2024 by
    + * Copyright (C) 2008-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -20,6 +20,7 @@
     
     #include 
     #include 
    +#include 
     
     
       static FT_Error
    @@ -47,11 +48,43 @@
         /* (see `FT_Load_Glyph' implementation in src/base/ftobjs.c)        */
     
         for ( nn = 0; nn < count; nn++ )
    -      advances[nn] = FT_MulDiv( advances[nn], scale, 64 );
    +      advances[nn] = FT_MulFix( 1024 * advances[nn], scale );
     
         return FT_Err_Ok;
       }
     
    +  /* loading (and hinting) to calculate the advances is slow  */
    +  /* unless TrueType hdmx table is provided as an accelerator */
    +  static FT_Error
    +  ft_load_advances( FT_Face   face,
    +                    FT_UInt   gindex,
    +                    FT_UInt   count,
    +                    FT_Int32  flags,
    +                    FT_Fixed  *padvances )
    +  {
    +    FT_UInt   nn;
    +    FT_Error  error   = FT_Err_Ok;
    +    FT_Pos    factor  = flags & FT_LOAD_NO_SCALE ? 1 : 1024;
    +    FT_Pos*   advance = flags & FT_LOAD_VERTICAL_LAYOUT
    +                          ? &face->glyph->advance.y
    +                          : &face->glyph->advance.x;
    +
    +
    +    flags |= (FT_UInt32)FT_LOAD_ADVANCE_ONLY;
    +
    +    for ( nn = 0; nn < count; nn++ )
    +    {
    +      error = FT_Load_Glyph( face, gindex + nn, flags );
    +      if ( error )
    +        break;
    +
    +      /* scale from 26.6 to 16.16, unless NO_SCALE was requested */
    +      padvances[nn] = *advance * factor;
    +    }
    +
    +    return error;
    +  }
    +
     
        /* at the moment, we can perform fast advance retrieval only in */
        /* the following cases:                                         */
    @@ -102,7 +135,10 @@
             return error;
         }
     
    -    return FT_Get_Advances( face, gindex, 1, flags, padvance );
    +    if ( flags & FT_ADVANCE_FLAG_FAST_ONLY )
    +      return FT_THROW( Unimplemented_Feature );
    +
    +    return ft_load_advances( face, gindex, 1, flags, padvance );
       }
     
     
    @@ -115,12 +151,9 @@
                        FT_Int32   flags,
                        FT_Fixed  *padvances )
       {
    -    FT_Error  error = FT_Err_Ok;
    -
         FT_Face_GetAdvancesFunc  func;
     
    -    FT_UInt  num, end, nn;
    -    FT_Int   factor;
    +    FT_UInt  num, end;
     
     
         if ( !face )
    @@ -140,6 +173,9 @@
         func = face->driver->clazz->get_advances;
         if ( func && LOAD_ADVANCE_FAST_CHECK( face, flags ) )
         {
    +      FT_Error  error;
    +
    +
           error = func( face, start, count, flags, padvances );
           if ( !error )
             return ft_face_scale_advances_( face, padvances, count, flags );
    @@ -148,26 +184,10 @@
             return error;
         }
     
    -    error = FT_Err_Ok;
    -
         if ( flags & FT_ADVANCE_FLAG_FAST_ONLY )
           return FT_THROW( Unimplemented_Feature );
     
    -    flags |= (FT_UInt32)FT_LOAD_ADVANCE_ONLY;
    -    factor = ( flags & FT_LOAD_NO_SCALE ) ? 1 : 1024;
    -    for ( nn = 0; nn < count; nn++ )
    -    {
    -      error = FT_Load_Glyph( face, start + nn, flags );
    -      if ( error )
    -        break;
    -
    -      /* scale from 26.6 to 16.16, unless NO_SCALE was requested */
    -      padvances[nn] = ( flags & FT_LOAD_VERTICAL_LAYOUT )
    -                      ? face->glyph->advance.y * factor
    -                      : face->glyph->advance.x * factor;
    -    }
    -
    -    return error;
    +    return ft_load_advances( face, start, count, flags, padvances );
       }
     
     
    diff --git a/src/java.desktop/share/native/libfreetype/src/base/ftbase.h b/src/java.desktop/share/native/libfreetype/src/base/ftbase.h
    index 1d98b26dd51..66f091165fe 100644
    --- a/src/java.desktop/share/native/libfreetype/src/base/ftbase.h
    +++ b/src/java.desktop/share/native/libfreetype/src/base/ftbase.h
    @@ -4,7 +4,7 @@
      *
      *   Private functions used in the `base' module (specification).
      *
    - * Copyright (C) 2008-2024 by
    + * Copyright (C) 2008-2025 by
      * David Turner, Robert Wilhelm, Werner Lemberg, and suzuki toshiya.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -34,7 +34,7 @@ FT_BEGIN_HEADER
     #ifdef FT_CONFIG_OPTION_MAC_FONTS
     
       /* MacOS resource fork cannot exceed 16MB at least for Carbon code; */
    -  /* see https://support.microsoft.com/en-us/kb/130437                */
    +  /* see https://jeffpar.github.io/kbarchive/kb/130/Q130437/          */
     #define FT_MAC_RFORK_MAX_LEN  0x00FFFFFFUL
     
     
    diff --git a/src/java.desktop/share/native/libfreetype/src/base/ftbbox.c b/src/java.desktop/share/native/libfreetype/src/base/ftbbox.c
    index d6aa5d56df8..feccdee5dd7 100644
    --- a/src/java.desktop/share/native/libfreetype/src/base/ftbbox.c
    +++ b/src/java.desktop/share/native/libfreetype/src/base/ftbbox.c
    @@ -4,7 +4,7 @@
      *
      *   FreeType bbox computation (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used
    diff --git a/src/java.desktop/share/native/libfreetype/src/base/ftbitmap.c b/src/java.desktop/share/native/libfreetype/src/base/ftbitmap.c
    index 4be145679fd..364f881e435 100644
    --- a/src/java.desktop/share/native/libfreetype/src/base/ftbitmap.c
    +++ b/src/java.desktop/share/native/libfreetype/src/base/ftbitmap.c
    @@ -4,7 +4,7 @@
      *
      *   FreeType utility functions for bitmaps (body).
      *
    - * Copyright (C) 2004-2024 by
    + * Copyright (C) 2004-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -876,13 +876,13 @@
     
     #ifdef FT_DEBUG_LEVEL_TRACE
         FT_TRACE5(( "FT_Bitmap_Blend:\n" ));
    -    FT_TRACE5(( "  source bitmap: (%ld, %ld) -- (%ld, %ld); %d x %d\n",
    +    FT_TRACE5(( "  source bitmap: (%ld, %ld) -- (%ld, %ld); %u x %u\n",
           source_llx / 64, source_lly / 64,
           source_urx / 64, source_ury / 64,
           source_->width, source_->rows ));
     
         if ( target->width && target->rows )
    -      FT_TRACE5(( "  target bitmap: (%ld, %ld) -- (%ld, %ld); %d x %d\n",
    +      FT_TRACE5(( "  target bitmap: (%ld, %ld) -- (%ld, %ld); %u x %u\n",
             target_llx / 64, target_lly / 64,
             target_urx / 64, target_ury / 64,
             target->width, target->rows ));
    @@ -890,7 +890,7 @@
           FT_TRACE5(( "  target bitmap: empty\n" ));
     
         if ( final_width && final_rows )
    -      FT_TRACE5(( "  final bitmap: (%ld, %ld) -- (%ld, %ld); %d x %d\n",
    +      FT_TRACE5(( "  final bitmap: (%ld, %ld) -- (%ld, %ld); %u x %u\n",
             final_llx / 64, final_lly / 64,
             final_urx / 64, final_ury / 64,
             final_width, final_rows ));
    @@ -922,14 +922,7 @@
           target->pitch      = (int)final_width * 4;
           target->num_grays  = 256;
     
    -      if ( FT_LONG_MAX / target->pitch < (int)target->rows )
    -      {
    -        FT_TRACE5(( "FT_Blend_Bitmap: target bitmap too large (%d x %d)\n",
    -                     final_width, final_rows ));
    -        return FT_THROW( Invalid_Argument );
    -      }
    -
    -      if ( FT_ALLOC( target->buffer, target->pitch * (int)target->rows ) )
    +      if ( FT_ALLOC_MULT( target->buffer, target->rows, target->pitch ) )
             return error;
     
           free_target_bitmap_on_error = 1;
    @@ -950,16 +943,9 @@
     
           new_pitch = (int)final_width * 4;
     
    -      if ( FT_LONG_MAX / new_pitch < (int)final_rows )
    -      {
    -        FT_TRACE5(( "FT_Blend_Bitmap: target bitmap too large (%d x %d)\n",
    -                     final_width, final_rows ));
    -        return FT_THROW( Invalid_Argument );
    -      }
    -
           /* TODO: provide an in-buffer solution for large bitmaps */
           /*       to avoid allocation of a new buffer             */
    -      if ( FT_ALLOC( buffer, new_pitch * (int)final_rows ) )
    +      if ( FT_ALLOC_MULT( buffer, final_rows, new_pitch ) )
             goto Error;
     
           /* copy data to new buffer */
    diff --git a/src/java.desktop/share/native/libfreetype/src/base/ftcalc.c b/src/java.desktop/share/native/libfreetype/src/base/ftcalc.c
    index 92de09ed877..7d6e12e2543 100644
    --- a/src/java.desktop/share/native/libfreetype/src/base/ftcalc.c
    +++ b/src/java.desktop/share/native/libfreetype/src/base/ftcalc.c
    @@ -4,7 +4,7 @@
      *
      *   Arithmetic computations (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -38,24 +38,11 @@
     #include 
     #include 
     
    -
    -#ifdef FT_MULFIX_ASSEMBLER
    -#undef FT_MulFix
    +  /* cancel inlining macro from internal/ftcalc.h */
    +#ifdef FT_MulFix
    +#  undef FT_MulFix
     #endif
     
    -/* we need to emulate a 64-bit data type if a real one isn't available */
    -
    -#ifndef FT_INT64
    -
    -  typedef struct  FT_Int64_
    -  {
    -    FT_UInt32  lo;
    -    FT_UInt32  hi;
    -
    -  } FT_Int64;
    -
    -#endif /* !FT_INT64 */
    -
     
       /**************************************************************************
        *
    @@ -88,7 +75,7 @@
       FT_EXPORT_DEF( FT_Fixed )
       FT_RoundFix( FT_Fixed  a )
       {
    -    return ( ADD_LONG( a, 0x8000L - ( a < 0 ) ) ) & ~0xFFFFL;
    +    return ADD_LONG( a, 0x8000L - ( a < 0 ) ) & ~0xFFFFL;
       }
     
     
    @@ -97,7 +84,7 @@
       FT_EXPORT_DEF( FT_Fixed )
       FT_CeilFix( FT_Fixed  a )
       {
    -    return ( ADD_LONG( a, 0xFFFFL ) ) & ~0xFFFFL;
    +    return ADD_LONG( a, 0xFFFFL ) & ~0xFFFFL;
       }
     
     
    @@ -225,18 +212,18 @@
       FT_MulFix( FT_Long  a_,
                  FT_Long  b_ )
       {
    -#ifdef FT_MULFIX_ASSEMBLER
    +#ifdef FT_CONFIG_OPTION_INLINE_MULFIX
     
    -    return FT_MULFIX_ASSEMBLER( (FT_Int32)a_, (FT_Int32)b_ );
    +    return FT_MulFix_64( a_, b_ );
     
     #else
     
    -    FT_Int64  ab = (FT_Int64)a_ * (FT_Int64)b_;
    +    FT_Int64  ab = MUL_INT64( a_, b_ );
     
         /* this requires arithmetic right shift of signed numbers */
    -    return (FT_Long)( ( ab + 0x8000L - ( ab < 0 ) ) >> 16 );
    +    return (FT_Long)( ( ab + 0x8000L + ( ab >> 63 ) ) >> 16 );
     
    -#endif /* FT_MULFIX_ASSEMBLER */
    +#endif /* FT_CONFIG_OPTION_INLINE_MULFIX */
       }
     
     
    @@ -975,43 +962,36 @@
     
     #else
     
    -    FT_Int  result;
    +    FT_Int64  z1, z2;
    +    FT_Int    result;
     
     
    -    if ( ADD_LONG( FT_ABS( in_x ), FT_ABS( out_y ) ) <= 131071L &&
    -         ADD_LONG( FT_ABS( in_y ), FT_ABS( out_x ) ) <= 131071L )
    +    if ( (FT_ULong)FT_ABS( in_x ) + (FT_ULong)FT_ABS( out_y ) <= 92681UL )
         {
    -      FT_Long  z1 = MUL_LONG( in_x, out_y );
    -      FT_Long  z2 = MUL_LONG( in_y, out_x );
    -
    -
    -      if ( z1 > z2 )
    -        result = +1;
    -      else if ( z1 < z2 )
    -        result = -1;
    -      else
    -        result = 0;
    +      z1.lo = (FT_UInt32)in_x * (FT_UInt32)out_y;
    +      z1.hi = (FT_UInt32)( (FT_Int32)z1.lo >> 31 );  /* sign-expansion */
         }
    -    else /* products might overflow 32 bits */
    -    {
    -      FT_Int64  z1, z2;
    -
    -
    -      /* XXX: this function does not allow 64-bit arguments */
    +    else
           ft_multo64( (FT_UInt32)in_x, (FT_UInt32)out_y, &z1 );
    +
    +    if ( (FT_ULong)FT_ABS( in_y ) + (FT_ULong)FT_ABS( out_x ) <= 92681UL )
    +    {
    +      z2.lo = (FT_UInt32)in_y * (FT_UInt32)out_x;
    +      z2.hi = (FT_UInt32)( (FT_Int32)z2.lo >> 31 );  /* sign-expansion */
    +    }
    +    else
           ft_multo64( (FT_UInt32)in_y, (FT_UInt32)out_x, &z2 );
     
    -      if ( z1.hi > z2.hi )
    -        result = +1;
    -      else if ( z1.hi < z2.hi )
    -        result = -1;
    -      else if ( z1.lo > z2.lo )
    -        result = +1;
    -      else if ( z1.lo < z2.lo )
    -        result = -1;
    -      else
    -        result = 0;
    -    }
    +    if      ( (FT_Int32)z1.hi > (FT_Int32)z2.hi )
    +      result = +1;
    +    else if ( (FT_Int32)z1.hi < (FT_Int32)z2.hi )
    +      result = -1;
    +    else if ( z1.lo > z2.lo )
    +      result = +1;
    +    else if ( z1.lo < z2.lo )
    +      result = -1;
    +    else
    +      result =  0;
     
         /* XXX: only the sign of return value, +1/0/-1 must be used */
         return result;
    @@ -1065,62 +1045,4 @@
       }
     
     
    -  FT_BASE_DEF( FT_Int32 )
    -  FT_MulAddFix( FT_Fixed*  s,
    -                FT_Int32*  f,
    -                FT_UInt    count )
    -  {
    -    FT_UInt   i;
    -    FT_Int64  temp;
    -
    -
    -#ifdef FT_INT64
    -    temp = 0;
    -
    -    for ( i = 0; i < count; ++i )
    -      temp += (FT_Int64)s[i] * f[i];
    -
    -    return (FT_Int32)( ( temp + 0x8000 ) >> 16 );
    -#else
    -    temp.hi = 0;
    -    temp.lo = 0;
    -
    -    for ( i = 0; i < count; ++i )
    -    {
    -      FT_Int64  multResult;
    -
    -      FT_Int     sign  = 1;
    -      FT_UInt32  carry = 0;
    -
    -      FT_UInt32  scalar;
    -      FT_UInt32  factor;
    -
    -
    -      FT_MOVE_SIGN( FT_UInt32, s[i], scalar, sign );
    -      FT_MOVE_SIGN( FT_UInt32, f[i], factor, sign );
    -
    -      ft_multo64( scalar, factor, &multResult );
    -
    -      if ( sign < 0 )
    -      {
    -        /* Emulated `FT_Int64` negation. */
    -        carry = ( multResult.lo == 0 );
    -
    -        multResult.lo = ~multResult.lo + 1;
    -        multResult.hi = ~multResult.hi + carry;
    -      }
    -
    -      FT_Add64( &temp, &multResult, &temp );
    -    }
    -
    -    /* Shift and round value. */
    -    return (FT_Int32)( ( ( temp.hi << 16 ) | ( temp.lo >> 16 ) )
    -                                     + ( 1 & ( temp.lo >> 15 ) ) );
    -
    -
    -#endif /* !FT_INT64 */
    -
    -  }
    -
    -
     /* END */
    diff --git a/src/java.desktop/share/native/libfreetype/src/base/ftcid.c b/src/java.desktop/share/native/libfreetype/src/base/ftcid.c
    index 4f2deb19a05..35cd0fcd2be 100644
    --- a/src/java.desktop/share/native/libfreetype/src/base/ftcid.c
    +++ b/src/java.desktop/share/native/libfreetype/src/base/ftcid.c
    @@ -4,7 +4,7 @@
      *
      *   FreeType API for accessing CID font information.
      *
    - * Copyright (C) 2007-2024 by
    + * Copyright (C) 2007-2025 by
      * Derek Clegg and Michael Toftdal.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/base/ftcolor.c b/src/java.desktop/share/native/libfreetype/src/base/ftcolor.c
    index c6bf2a3cd1a..90b02b7d2de 100644
    --- a/src/java.desktop/share/native/libfreetype/src/base/ftcolor.c
    +++ b/src/java.desktop/share/native/libfreetype/src/base/ftcolor.c
    @@ -4,7 +4,7 @@
      *
      *   FreeType's glyph color management (body).
      *
    - * Copyright (C) 2018-2024 by
    + * Copyright (C) 2018-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -56,9 +56,7 @@
                          FT_Color*  *apalette )
       {
         FT_Error  error;
    -
    -    TT_Face       ttface;
    -    SFNT_Service  sfnt;
    +    TT_Face   ttface = (TT_Face)face;
     
     
         if ( !face )
    @@ -72,14 +70,17 @@
           return FT_Err_Ok;
         }
     
    -    ttface = (TT_Face)face;
    -    sfnt   = (SFNT_Service)ttface->sfnt;
    +    if ( palette_index != ttface->palette_index )
    +    {
    +      SFNT_Service  sfnt = (SFNT_Service)ttface->sfnt;
     
    -    error = sfnt->set_palette( ttface, palette_index );
    -    if ( error )
    -      return error;
     
    -    ttface->palette_index = palette_index;
    +      error = sfnt->set_palette( ttface, palette_index );
    +      if ( error )
    +        return error;
    +
    +      ttface->palette_index = palette_index;
    +    }
     
         if ( apalette )
           *apalette = ttface->palette;
    diff --git a/src/java.desktop/share/native/libfreetype/src/base/ftdbgmem.c b/src/java.desktop/share/native/libfreetype/src/base/ftdbgmem.c
    index 902a5dc8bbc..7f54e759b16 100644
    --- a/src/java.desktop/share/native/libfreetype/src/base/ftdbgmem.c
    +++ b/src/java.desktop/share/native/libfreetype/src/base/ftdbgmem.c
    @@ -4,7 +4,7 @@
      *
      *   Memory debugger (body).
      *
    - * Copyright (C) 2001-2024 by
    + * Copyright (C) 2001-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -139,7 +139,6 @@
       } FT_MemTableRec;
     
     
    -#define FT_MEM_SIZE_MIN  7
     #define FT_MEM_SIZE_MAX  13845163
     
     #define FT_FILENAME( x )  ( (x) ? (x) : "unknown file" )
    diff --git a/src/java.desktop/share/native/libfreetype/src/base/ftdebug.c b/src/java.desktop/share/native/libfreetype/src/base/ftdebug.c
    index 11307eaace4..c615f29e521 100644
    --- a/src/java.desktop/share/native/libfreetype/src/base/ftdebug.c
    +++ b/src/java.desktop/share/native/libfreetype/src/base/ftdebug.c
    @@ -4,7 +4,7 @@
      *
      *   Debugging and logging component (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -64,7 +64,7 @@
        *    with the actual log message if set to true.
        *
        * 5. The flag `ft_timestamp_flag` prints time along with the actual log
    -   *    message if set to ture.
    +   *    message if set to true.
        *
        * 6. `ft_have_newline_char` is used to differentiate between a log
        *    message with and without a trailing newline character.
    diff --git a/src/java.desktop/share/native/libfreetype/src/base/ftfntfmt.c b/src/java.desktop/share/native/libfreetype/src/base/ftfntfmt.c
    index 77b4089e7e2..7f4f14ffdb0 100644
    --- a/src/java.desktop/share/native/libfreetype/src/base/ftfntfmt.c
    +++ b/src/java.desktop/share/native/libfreetype/src/base/ftfntfmt.c
    @@ -4,7 +4,7 @@
      *
      *   FreeType utility file for font formats (body).
      *
    - * Copyright (C) 2002-2024 by
    + * Copyright (C) 2002-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/base/ftfstype.c b/src/java.desktop/share/native/libfreetype/src/base/ftfstype.c
    index 1565c3b7e25..3a95752ffaa 100644
    --- a/src/java.desktop/share/native/libfreetype/src/base/ftfstype.c
    +++ b/src/java.desktop/share/native/libfreetype/src/base/ftfstype.c
    @@ -4,7 +4,7 @@
      *
      *   FreeType utility file to access FSType data (body).
      *
    - * Copyright (C) 2008-2024 by
    + * Copyright (C) 2008-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/base/ftgasp.c b/src/java.desktop/share/native/libfreetype/src/base/ftgasp.c
    index c63d30e978c..2202240b57e 100644
    --- a/src/java.desktop/share/native/libfreetype/src/base/ftgasp.c
    +++ b/src/java.desktop/share/native/libfreetype/src/base/ftgasp.c
    @@ -4,7 +4,7 @@
      *
      *   Access of TrueType's `gasp' table (body).
      *
    - * Copyright (C) 2007-2024 by
    + * Copyright (C) 2007-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/base/ftgloadr.c b/src/java.desktop/share/native/libfreetype/src/base/ftgloadr.c
    index 484d98f1722..47781bc4d5c 100644
    --- a/src/java.desktop/share/native/libfreetype/src/base/ftgloadr.c
    +++ b/src/java.desktop/share/native/libfreetype/src/base/ftgloadr.c
    @@ -4,7 +4,7 @@
      *
      *   The FreeType glyph loader (body).
      *
    - * Copyright (C) 2002-2024 by
    + * Copyright (C) 2002-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/base/ftglyph.c b/src/java.desktop/share/native/libfreetype/src/base/ftglyph.c
    index 1b5849f99af..6138cfeec2c 100644
    --- a/src/java.desktop/share/native/libfreetype/src/base/ftglyph.c
    +++ b/src/java.desktop/share/native/libfreetype/src/base/ftglyph.c
    @@ -4,7 +4,7 @@
      *
      *   FreeType convenience functions to handle glyphs (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -62,7 +62,7 @@
                             FT_GlyphSlot  slot )
       {
         FT_BitmapGlyph  glyph   = (FT_BitmapGlyph)bitmap_glyph;
    -    FT_Error        error   = FT_Err_Ok;
    +    FT_Error        error;
         FT_Library      library = FT_GLYPH( glyph )->library;
     
     
    @@ -75,17 +75,8 @@
         glyph->left = slot->bitmap_left;
         glyph->top  = slot->bitmap_top;
     
    -    /* do lazy copying whenever possible */
    -    if ( slot->internal->flags & FT_GLYPH_OWN_BITMAP )
    -    {
    -      glyph->bitmap          = slot->bitmap;
    -      slot->internal->flags &= ~FT_GLYPH_OWN_BITMAP;
    -    }
    -    else
    -    {
    -      FT_Bitmap_Init( &glyph->bitmap );
    -      error = FT_Bitmap_Copy( library, &slot->bitmap, &glyph->bitmap );
    -    }
    +    FT_Bitmap_Init( &glyph->bitmap );
    +    error = FT_Bitmap_Copy( library, &slot->bitmap, &glyph->bitmap );
     
       Exit:
         return error;
    diff --git a/src/java.desktop/share/native/libfreetype/src/base/fthash.c b/src/java.desktop/share/native/libfreetype/src/base/fthash.c
    index 313bbbb4b27..ab248ace8bd 100644
    --- a/src/java.desktop/share/native/libfreetype/src/base/fthash.c
    +++ b/src/java.desktop/share/native/libfreetype/src/base/fthash.c
    @@ -41,6 +41,7 @@
     
     #include 
     #include 
    +#include 
     
     
     #define INITIAL_HT_SIZE  241
    @@ -233,7 +234,8 @@
       hash_insert( FT_Hashkey  key,
                    size_t      data,
                    FT_Hash     hash,
    -               FT_Memory   memory )
    +               FT_Memory   memory,
    +               FT_Bool     overwrite )
       {
         FT_Hashnode   nn;
         FT_Hashnode*  bp    = hash_bucket( key, hash );
    @@ -259,7 +261,7 @@
     
           hash->used++;
         }
    -    else
    +    else if ( overwrite )
           nn->data = data;
     
       Exit:
    @@ -278,7 +280,7 @@
     
         hk.str = key;
     
    -    return hash_insert( hk, data, hash, memory );
    +    return hash_insert( hk, data, hash, memory, TRUE );
       }
     
     
    @@ -293,7 +295,37 @@
     
         hk.num = num;
     
    -    return hash_insert( hk, data, hash, memory );
    +    return hash_insert( hk, data, hash, memory, TRUE );
    +  }
    +
    +
    +  FT_Error
    +  ft_hash_str_insert_no_overwrite( const char*  key,
    +                                   size_t       data,
    +                                   FT_Hash      hash,
    +                                   FT_Memory    memory )
    +  {
    +    FT_Hashkey  hk;
    +
    +
    +    hk.str = key;
    +
    +    return hash_insert( hk, data, hash, memory, FALSE );
    +  }
    +
    +
    +  FT_Error
    +  ft_hash_num_insert_no_overwrite( FT_Int     num,
    +                                   size_t     data,
    +                                   FT_Hash    hash,
    +                                   FT_Memory  memory )
    +  {
    +    FT_Hashkey  hk;
    +
    +
    +    hk.num = num;
    +
    +    return hash_insert( hk, data, hash, memory, FALSE );
       }
     
     
    @@ -335,4 +367,68 @@
       }
     
     
    +  FT_Bool
    +  ft_hash_num_iterator( FT_UInt  *idx,
    +                        FT_Int   *key,
    +                        size_t   *value,
    +                        FT_Hash   hash )
    +  {
    +    FT_Hashnode  nn = NULL;
    +
    +
    +    while ( 1 )
    +    {
    +      if ( *idx >= hash->size )
    +        return 0;
    +
    +      nn = hash->table[*idx];
    +      if ( nn )
    +        break;
    +
    +      (*idx)++;
    +    }
    +
    +    if ( key )
    +      *key = nn->key.num;
    +    if ( value )
    +      *value = nn->data;
    +
    +    (*idx)++;
    +
    +    return 1;
    +  }
    +
    +
    +  FT_Bool
    +  ft_hash_str_iterator( FT_UInt      *idx,
    +                        const char*  *key,
    +                        size_t       *value,
    +                        FT_Hash       hash )
    +  {
    +    FT_Hashnode  nn = NULL;
    +
    +
    +    while ( 1 )
    +    {
    +      if ( *idx >= hash->size )
    +        return 0;
    +
    +      nn = hash->table[*idx];
    +      if ( nn )
    +        break;
    +
    +      (*idx)++;
    +    }
    +
    +    if ( key )
    +      *key = nn->key.str;
    +    if ( value )
    +      *value = nn->data;
    +
    +    (*idx)++;
    +
    +    return 1;
    +  }
    +
    +
     /* END */
    diff --git a/src/java.desktop/share/native/libfreetype/src/base/ftinit.c b/src/java.desktop/share/native/libfreetype/src/base/ftinit.c
    index 9a6c00e13ef..37d7f87bcb9 100644
    --- a/src/java.desktop/share/native/libfreetype/src/base/ftinit.c
    +++ b/src/java.desktop/share/native/libfreetype/src/base/ftinit.c
    @@ -4,7 +4,7 @@
      *
      *   FreeType initialization layer (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/base/ftlcdfil.c b/src/java.desktop/share/native/libfreetype/src/base/ftlcdfil.c
    index 1e69d4da70f..51c6fd48a1b 100644
    --- a/src/java.desktop/share/native/libfreetype/src/base/ftlcdfil.c
    +++ b/src/java.desktop/share/native/libfreetype/src/base/ftlcdfil.c
    @@ -4,7 +4,7 @@
      *
      *   FreeType API for color filtering of subpixel bitmap glyphs (body).
      *
    - * Copyright (C) 2006-2024 by
    + * Copyright (C) 2006-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -25,265 +25,28 @@
     
     #ifdef FT_CONFIG_OPTION_SUBPIXEL_RENDERING
     
    -/* define USE_LEGACY to implement the legacy filter */
    -#define  USE_LEGACY
    -
    -#define FT_SHIFTCLAMP( x )  ( x >>= 8, (FT_Byte)( x > 255 ? 255 : x ) )
    -
    -
    -  /* add padding according to filter weights */
    +  /* add padding sufficient for a 5-tap filter, */
    +  /* which is 2/3 of a pixel                    */
       FT_BASE_DEF( void )
       ft_lcd_padding( FT_BBox*        cbox,
                       FT_GlyphSlot    slot,
                       FT_Render_Mode  mode )
       {
    -    FT_Byte*                 lcd_weights;
    -    FT_Bitmap_LcdFilterFunc  lcd_filter_func;
    +    FT_UNUSED( slot );
     
    -
    -    /* Per-face LCD filtering takes priority if set up. */
    -    if ( slot->face && slot->face->internal->lcd_filter_func )
    +    if ( mode == FT_RENDER_MODE_LCD )
         {
    -      lcd_weights     = slot->face->internal->lcd_weights;
    -      lcd_filter_func = slot->face->internal->lcd_filter_func;
    +      cbox->xMin -= 43;
    +      cbox->xMax += 43;
         }
    -    else
    +    else if ( mode == FT_RENDER_MODE_LCD_V )
         {
    -      lcd_weights     = slot->library->lcd_weights;
    -      lcd_filter_func = slot->library->lcd_filter_func;
    -    }
    -
    -    if ( lcd_filter_func == ft_lcd_filter_fir )
    -    {
    -      if ( mode == FT_RENDER_MODE_LCD )
    -      {
    -        cbox->xMin -= lcd_weights[0] ? 43 :
    -                      lcd_weights[1] ? 22 : 0;
    -        cbox->xMax += lcd_weights[4] ? 43 :
    -                      lcd_weights[3] ? 22 : 0;
    -      }
    -      else if ( mode == FT_RENDER_MODE_LCD_V )
    -      {
    -        cbox->yMin -= lcd_weights[0] ? 43 :
    -                      lcd_weights[1] ? 22 : 0;
    -        cbox->yMax += lcd_weights[4] ? 43 :
    -                      lcd_weights[3] ? 22 : 0;
    -      }
    +      cbox->yMin -= 43;
    +      cbox->yMax += 43;
         }
       }
     
     
    -  /* FIR filter used by the default and light filters */
    -  FT_BASE_DEF( void )
    -  ft_lcd_filter_fir( FT_Bitmap*           bitmap,
    -                     FT_LcdFiveTapFilter  weights )
    -  {
    -    FT_UInt   width  = (FT_UInt)bitmap->width;
    -    FT_UInt   height = (FT_UInt)bitmap->rows;
    -    FT_Int    pitch  = bitmap->pitch;
    -    FT_Byte*  origin = bitmap->buffer;
    -    FT_Byte   mode   = bitmap->pixel_mode;
    -
    -
    -    /* take care of bitmap flow */
    -    if ( pitch > 0 && height > 0 )
    -      origin += pitch * (FT_Int)( height - 1 );
    -
    -    /* horizontal in-place FIR filter */
    -    if ( mode == FT_PIXEL_MODE_LCD && width >= 2 )
    -    {
    -      FT_Byte*  line = origin;
    -
    -
    -      /* `fir' must be at least 32 bit wide, since the sum of */
    -      /* the values in `weights' can exceed 0xFF              */
    -
    -      for ( ; height > 0; height--, line -= pitch )
    -      {
    -        FT_UInt  fir[5];
    -        FT_UInt  val, xx;
    -
    -
    -        val    = line[0];
    -        fir[2] = weights[2] * val;
    -        fir[3] = weights[3] * val;
    -        fir[4] = weights[4] * val;
    -
    -        val    = line[1];
    -        fir[1] = fir[2] + weights[1] * val;
    -        fir[2] = fir[3] + weights[2] * val;
    -        fir[3] = fir[4] + weights[3] * val;
    -        fir[4] =          weights[4] * val;
    -
    -        for ( xx = 2; xx < width; xx++ )
    -        {
    -          val    = line[xx];
    -          fir[0] = fir[1] + weights[0] * val;
    -          fir[1] = fir[2] + weights[1] * val;
    -          fir[2] = fir[3] + weights[2] * val;
    -          fir[3] = fir[4] + weights[3] * val;
    -          fir[4] =          weights[4] * val;
    -
    -          line[xx - 2] = FT_SHIFTCLAMP( fir[0] );
    -        }
    -
    -        line[xx - 2] = FT_SHIFTCLAMP( fir[1] );
    -        line[xx - 1] = FT_SHIFTCLAMP( fir[2] );
    -      }
    -    }
    -
    -    /* vertical in-place FIR filter */
    -    else if ( mode == FT_PIXEL_MODE_LCD_V && height >= 2 )
    -    {
    -      FT_Byte*  column = origin;
    -
    -
    -      for ( ; width > 0; width--, column++ )
    -      {
    -        FT_Byte*  col = column;
    -        FT_UInt   fir[5];
    -        FT_UInt   val, yy;
    -
    -
    -        val    = col[0];
    -        fir[2] = weights[2] * val;
    -        fir[3] = weights[3] * val;
    -        fir[4] = weights[4] * val;
    -        col   -= pitch;
    -
    -        val    = col[0];
    -        fir[1] = fir[2] + weights[1] * val;
    -        fir[2] = fir[3] + weights[2] * val;
    -        fir[3] = fir[4] + weights[3] * val;
    -        fir[4] =          weights[4] * val;
    -        col   -= pitch;
    -
    -        for ( yy = 2; yy < height; yy++, col -= pitch )
    -        {
    -          val    = col[0];
    -          fir[0] = fir[1] + weights[0] * val;
    -          fir[1] = fir[2] + weights[1] * val;
    -          fir[2] = fir[3] + weights[2] * val;
    -          fir[3] = fir[4] + weights[3] * val;
    -          fir[4] =          weights[4] * val;
    -
    -          col[pitch * 2]  = FT_SHIFTCLAMP( fir[0] );
    -        }
    -
    -        col[pitch * 2]  = FT_SHIFTCLAMP( fir[1] );
    -        col[pitch]      = FT_SHIFTCLAMP( fir[2] );
    -      }
    -    }
    -  }
    -
    -
    -#ifdef USE_LEGACY
    -
    -  /* intra-pixel filter used by the legacy filter */
    -  static void
    -  _ft_lcd_filter_legacy( FT_Bitmap*      bitmap,
    -                         FT_Byte*        weights )
    -  {
    -    FT_UInt   width  = (FT_UInt)bitmap->width;
    -    FT_UInt   height = (FT_UInt)bitmap->rows;
    -    FT_Int    pitch  = bitmap->pitch;
    -    FT_Byte*  origin = bitmap->buffer;
    -    FT_Byte   mode   = bitmap->pixel_mode;
    -
    -    static const unsigned int  filters[3][3] =
    -    {
    -      { 65538 * 9/13, 65538 * 1/6, 65538 * 1/13 },
    -      { 65538 * 3/13, 65538 * 4/6, 65538 * 3/13 },
    -      { 65538 * 1/13, 65538 * 1/6, 65538 * 9/13 }
    -    };
    -
    -    FT_UNUSED( weights );
    -
    -
    -    /* take care of bitmap flow */
    -    if ( pitch > 0 && height > 0 )
    -      origin += pitch * (FT_Int)( height - 1 );
    -
    -    /* horizontal in-place intra-pixel filter */
    -    if ( mode == FT_PIXEL_MODE_LCD && width >= 3 )
    -    {
    -      FT_Byte*  line = origin;
    -
    -
    -      for ( ; height > 0; height--, line -= pitch )
    -      {
    -        FT_UInt  xx;
    -
    -
    -        for ( xx = 0; xx < width; xx += 3 )
    -        {
    -          FT_UInt  r, g, b;
    -          FT_UInt  p;
    -
    -
    -          p  = line[xx];
    -          r  = filters[0][0] * p;
    -          g  = filters[0][1] * p;
    -          b  = filters[0][2] * p;
    -
    -          p  = line[xx + 1];
    -          r += filters[1][0] * p;
    -          g += filters[1][1] * p;
    -          b += filters[1][2] * p;
    -
    -          p  = line[xx + 2];
    -          r += filters[2][0] * p;
    -          g += filters[2][1] * p;
    -          b += filters[2][2] * p;
    -
    -          line[xx]     = (FT_Byte)( r / 65536 );
    -          line[xx + 1] = (FT_Byte)( g / 65536 );
    -          line[xx + 2] = (FT_Byte)( b / 65536 );
    -        }
    -      }
    -    }
    -    else if ( mode == FT_PIXEL_MODE_LCD_V && height >= 3 )
    -    {
    -      FT_Byte*  column = origin;
    -
    -
    -      for ( ; width > 0; width--, column++ )
    -      {
    -        FT_Byte*  col = column - 2 * pitch;
    -
    -
    -        for ( ; height > 0; height -= 3, col -= 3 * pitch )
    -        {
    -          FT_UInt  r, g, b;
    -          FT_UInt  p;
    -
    -
    -          p  = col[0];
    -          r  = filters[0][0] * p;
    -          g  = filters[0][1] * p;
    -          b  = filters[0][2] * p;
    -
    -          p  = col[pitch];
    -          r += filters[1][0] * p;
    -          g += filters[1][1] * p;
    -          b += filters[1][2] * p;
    -
    -          p  = col[pitch * 2];
    -          r += filters[2][0] * p;
    -          g += filters[2][1] * p;
    -          b += filters[2][2] * p;
    -
    -          col[0]         = (FT_Byte)( r / 65536 );
    -          col[pitch]     = (FT_Byte)( g / 65536 );
    -          col[pitch * 2] = (FT_Byte)( b / 65536 );
    -        }
    -      }
    -    }
    -  }
    -
    -#endif /* USE_LEGACY */
    -
    -
       /* documentation in ftlcdfil.h */
     
       FT_EXPORT_DEF( FT_Error )
    @@ -297,7 +60,6 @@
           return FT_THROW( Invalid_Argument );
     
         ft_memcpy( library->lcd_weights, weights, FT_LCD_FILTER_FIVE_TAPS );
    -    library->lcd_filter_func = ft_lcd_filter_fir;
     
         return FT_Err_Ok;
       }
    @@ -321,32 +83,23 @@
         switch ( filter )
         {
         case FT_LCD_FILTER_NONE:
    -      library->lcd_filter_func = NULL;
    +      ft_memset( library->lcd_weights,
    +                 0,
    +                 FT_LCD_FILTER_FIVE_TAPS );
           break;
     
         case FT_LCD_FILTER_DEFAULT:
           ft_memcpy( library->lcd_weights,
                      default_weights,
                      FT_LCD_FILTER_FIVE_TAPS );
    -      library->lcd_filter_func = ft_lcd_filter_fir;
           break;
     
         case FT_LCD_FILTER_LIGHT:
           ft_memcpy( library->lcd_weights,
                      light_weights,
                      FT_LCD_FILTER_FIVE_TAPS );
    -      library->lcd_filter_func = ft_lcd_filter_fir;
           break;
     
    -#ifdef USE_LEGACY
    -
    -    case FT_LCD_FILTER_LEGACY:
    -    case FT_LCD_FILTER_LEGACY1:
    -      library->lcd_filter_func = _ft_lcd_filter_legacy;
    -      break;
    -
    -#endif
    -
         default:
           return FT_THROW( Invalid_Argument );
         }
    diff --git a/src/java.desktop/share/native/libfreetype/src/base/ftmac.c b/src/java.desktop/share/native/libfreetype/src/base/ftmac.c
    index e8e35627b50..37d97be1838 100644
    --- a/src/java.desktop/share/native/libfreetype/src/base/ftmac.c
    +++ b/src/java.desktop/share/native/libfreetype/src/base/ftmac.c
    @@ -8,7 +8,7 @@
      * This file is for Mac OS X only; see builds/mac/ftoldmac.c for
      * classic platforms built by MPW.
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * Just van Rossum, David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/base/ftmm.c b/src/java.desktop/share/native/libfreetype/src/base/ftmm.c
    index cc4ca22fba3..9e67001406c 100644
    --- a/src/java.desktop/share/native/libfreetype/src/base/ftmm.c
    +++ b/src/java.desktop/share/native/libfreetype/src/base/ftmm.c
    @@ -4,7 +4,7 @@
      *
      *   Multiple Master font support (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -292,6 +292,9 @@
         if ( num_coords && !coords )
           return FT_THROW( Invalid_Argument );
     
    +    if ( !num_coords && !FT_IS_VARIATION( face ) )
    +      return FT_Err_Ok;  /* nothing to be done */
    +
         error = ft_face_get_mm_service( face, &service_mm );
         if ( !error )
         {
    @@ -299,15 +302,21 @@
           if ( service_mm->set_var_design )
             error = service_mm->set_var_design( face, num_coords, coords );
     
    -      if ( !error || error == -1 )
    +      if ( !error || error == -1 || error == -2 )
           {
             FT_Bool  is_variation_old = FT_IS_VARIATION( face );
     
     
    -        if ( num_coords )
    -          face->face_flags |= FT_FACE_FLAG_VARIATION;
    -        else
    -          face->face_flags &= ~FT_FACE_FLAG_VARIATION;
    +        if ( error != -1 )
    +        {
    +          if ( error == -2 ) /* -2 means is_variable. */
    +          {
    +            face->face_flags |= FT_FACE_FLAG_VARIATION;
    +            error             = FT_Err_Ok;
    +          }
    +          else
    +            face->face_flags &= ~FT_FACE_FLAG_VARIATION;
    +        }
     
             if ( service_mm->construct_ps_name )
             {
    @@ -474,15 +483,21 @@
           if ( service_mm->set_mm_blend )
             error = service_mm->set_mm_blend( face, num_coords, coords );
     
    -      if ( !error || error == -1 )
    +      if ( !error || error == -1 || error == -2 )
           {
             FT_Bool  is_variation_old = FT_IS_VARIATION( face );
     
     
    -        if ( num_coords )
    -          face->face_flags |= FT_FACE_FLAG_VARIATION;
    -        else
    -          face->face_flags &= ~FT_FACE_FLAG_VARIATION;
    +        if ( error != -1 )
    +        {
    +          if ( error == -2 ) /* -2 means is_variable. */
    +          {
    +            face->face_flags |= FT_FACE_FLAG_VARIATION;
    +            error             = FT_Err_Ok;
    +          }
    +          else
    +            face->face_flags &= ~FT_FACE_FLAG_VARIATION;
    +        }
     
             if ( service_mm->construct_ps_name )
             {
    diff --git a/src/java.desktop/share/native/libfreetype/src/base/ftobjs.c b/src/java.desktop/share/native/libfreetype/src/base/ftobjs.c
    index 9b97820c379..323dd5efac2 100644
    --- a/src/java.desktop/share/native/libfreetype/src/base/ftobjs.c
    +++ b/src/java.desktop/share/native/libfreetype/src/base/ftobjs.c
    @@ -4,7 +4,7 @@
      *
      *   The FreeType private base classes (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -524,12 +524,28 @@
         bitmap->rows       = (unsigned int)height;
         bitmap->pitch      = pitch;
     
    -    if ( pbox.xMin < -0x8000 || pbox.xMax > 0x7FFF ||
    -         pbox.yMin < -0x8000 || pbox.yMax > 0x7FFF )
    +    /* Flag the bounding box size unsuitable for rendering. */
    +    /* FT_Renderer modules should check the return value.   */
    +    /* The limit is based on the ppem value when available. */
         {
    -      FT_TRACE3(( "ft_glyphslot_preset_bitmap: [%ld %ld %ld %ld]\n",
    -                  pbox.xMin, pbox.yMin, pbox.xMax, pbox.yMax ));
    -      return 1;
    +      FT_Face  face = slot->face;
    +      FT_Pos   xlim = 0x8000;
    +      FT_Pos   ylim = 0x8000;
    +
    +
    +      if ( face )
    +      {
    +        xlim = FT_MIN( xlim, 10 * face->size->metrics.x_ppem );
    +        ylim = FT_MIN( ylim, 10 * face->size->metrics.y_ppem );
    +      }
    +
    +      if ( pbox.xMin < -xlim || pbox.xMax >= xlim ||
    +           pbox.yMin < -ylim || pbox.yMax >= ylim )
    +      {
    +        FT_TRACE3(( "ft_glyphslot_preset_bitmap: [%ld %ld %ld %ld]\n",
    +                    pbox.xMin, pbox.yMin, pbox.xMax, pbox.yMax ));
    +        return 1;
    +      }
         }
     
         return 0;
    @@ -549,8 +565,7 @@
     
     
       FT_BASE_DEF( FT_Error )
    -  ft_glyphslot_alloc_bitmap( FT_GlyphSlot  slot,
    -                             FT_ULong      size )
    +  ft_glyphslot_alloc_bitmap( FT_GlyphSlot  slot )
       {
         FT_Memory  memory = FT_FACE_MEMORY( slot->face );
         FT_Error   error;
    @@ -561,7 +576,10 @@
         else
           slot->internal->flags |= FT_GLYPH_OWN_BITMAP;
     
    -    FT_MEM_ALLOC( slot->bitmap.buffer, size );
    +    /* dimensions must be preset */
    +    FT_MEM_ALLOC_MULT( slot->bitmap.buffer,
    +                       slot->bitmap.rows,
    +                       slot->bitmap.pitch );
         return error;
       }
     
    @@ -905,7 +923,6 @@
         FT_Library    library;
         FT_Bool       autohint = FALSE;
         FT_Module     hinter;
    -    TT_Face       ttface = (TT_Face)face;
     
     
         if ( !face || !face->size || !face->glyph )
    @@ -983,6 +1000,7 @@
           {
             FT_Render_Mode  mode = FT_LOAD_TARGET_MODE( load_flags );
             FT_Bool         is_light_type1;
    +        TT_Face         ttface = (TT_Face)face;
     
     
             /* only the new Adobe engine (for both CFF and Type 1) is `light'; */
    @@ -994,8 +1012,7 @@
             /* the check for `num_locations' assures that we actually    */
             /* test for instructions in a TTF and not in a CFF-based OTF */
             /*                                                           */
    -        /* since `maxSizeOfInstructions' might be unreliable, we     */
    -        /* check the size of the `fpgm' and `prep' tables, too --    */
    +        /* we check the size of the `fpgm' and `prep' tables, too -- */
             /* the assumption is that there don't exist real TTFs where  */
             /* both `fpgm' and `prep' tables are missing                 */
             if ( ( mode == FT_RENDER_MODE_LIGHT           &&
    @@ -1003,9 +1020,8 @@
                      !is_light_type1                    ) )         ||
                  ( FT_IS_SFNT( face )                             &&
                    ttface->num_locations                          &&
    -               ttface->max_profile.maxSizeOfInstructions == 0 &&
                    ttface->font_program_size == 0                 &&
    -               ttface->cvt_program_size == 0                  ) )
    +               ttface->cvt_program_size <= 7                  ) )
               autohint = TRUE;
           }
         }
    @@ -1172,9 +1188,9 @@
         }
     
     #ifdef FT_DEBUG_LEVEL_TRACE
    -    FT_TRACE5(( "FT_Load_Glyph: index %d, flags 0x%x\n",
    +    FT_TRACE5(( "FT_Load_Glyph: index %u, flags 0x%x\n",
                     glyph_index, load_flags ));
    -    FT_TRACE5(( "  bitmap %dx%d %s, %s (mode %d)\n",
    +    FT_TRACE5(( "  bitmap %ux%u %s, %s (mode %d)\n",
                     slot->bitmap.width,
                     slot->bitmap.rows,
                     slot->outline.points ?
    @@ -1253,14 +1269,14 @@
         FT_Driver  driver = (FT_Driver)driver_;
     
     
    -    /* finalize client-specific data */
    -    if ( size->generic.finalizer )
    -      size->generic.finalizer( size );
    -
         /* finalize format-specific stuff */
         if ( driver->clazz->done_size )
           driver->clazz->done_size( size );
     
    +    /* finalize client-specific data */
    +    if ( size->generic.finalizer )
    +      size->generic.finalizer( size );
    +
         FT_FREE( size->internal );
         FT_FREE( size );
       }
    @@ -1322,10 +1338,6 @@
                           driver );
         face->size = NULL;
     
    -    /* now discard client data */
    -    if ( face->generic.finalizer )
    -      face->generic.finalizer( face );
    -
         /* discard charmaps */
         destroy_charmaps( face, memory );
     
    @@ -1340,6 +1352,10 @@
     
         face->stream = NULL;
     
    +    /* now discard client data */
    +    if ( face->generic.finalizer )
    +      face->generic.finalizer( face );
    +
         /* get rid of it */
         if ( face->internal )
         {
    @@ -1359,21 +1375,9 @@
       }
     
     
    -  /**************************************************************************
    -   *
    -   * @Function:
    -   *   find_unicode_charmap
    -   *
    -   * @Description:
    -   *   This function finds a Unicode charmap, if there is one.
    -   *   And if there is more than one, it tries to favour the more
    -   *   extensive one, i.e., one that supports UCS-4 against those which
    -   *   are limited to the BMP (said UCS-2 encoding.)
    -   *
    -   *   This function is called from open_face() (just below), and also
    -   *   from FT_Select_Charmap( ..., FT_ENCODING_UNICODE ).
    -   */
    -  static FT_Error
    +  /* documentation is in ftobjs.h */
    +
    +  FT_BASE_DEF( FT_Error )
       find_unicode_charmap( FT_Face  face )
       {
         FT_CharMap*  first;
    @@ -1427,7 +1431,10 @@
             if ( ( cur[0]->platform_id == TT_PLATFORM_MICROSOFT &&
                    cur[0]->encoding_id == TT_MS_ID_UCS_4        )     ||
                  ( cur[0]->platform_id == TT_PLATFORM_APPLE_UNICODE &&
    -               cur[0]->encoding_id == TT_APPLE_ID_UNICODE_32    ) )
    +               cur[0]->encoding_id == TT_APPLE_ID_UNICODE_32    ) ||
    +             ( cur[0]->platform_id == TT_PLATFORM_APPLE_UNICODE &&
    +               cur[0]->encoding_id == TT_APPLE_ID_FULL_UNICODE  &&
    +               FT_Get_CMap_Format( cur[0] ) == 13               ) )
             {
               face->charmap = cur[0];
               return FT_Err_Ok;
    @@ -2125,7 +2132,7 @@
           if ( pfb_pos > pfb_len || pfb_pos + rlen > pfb_len )
             goto Exit2;
     
    -      FT_TRACE3(( "    Load POST fragment #%d (%ld byte) to buffer"
    +      FT_TRACE3(( "    Load POST fragment #%d (%lu byte) to buffer"
                       " %p + 0x%08lx\n",
                       i, rlen, (void*)pfb_data, pfb_pos ));
     
    @@ -2398,7 +2405,7 @@
           is_darwin_vfs = ft_raccess_rule_by_darwin_vfs( library, i );
           if ( is_darwin_vfs && vfs_rfork_has_no_font )
           {
    -        FT_TRACE3(( "Skip rule %d: darwin vfs resource fork"
    +        FT_TRACE3(( "Skip rule %u: darwin vfs resource fork"
                         " is already checked and"
                         " no font is found\n",
                         i ));
    @@ -2407,7 +2414,7 @@
     
           if ( errors[i] )
           {
    -        FT_TRACE3(( "Error 0x%x has occurred in rule %d\n",
    +        FT_TRACE3(( "Error 0x%x has occurred in rule %u\n",
                         errors[i], i ));
             continue;
           }
    @@ -2415,7 +2422,7 @@
           args2.flags    = FT_OPEN_PATHNAME;
           args2.pathname = file_names[i] ? file_names[i] : args->pathname;
     
    -      FT_TRACE3(( "Try rule %d: %s (offset=%ld) ...",
    +      FT_TRACE3(( "Try rule %u: %s (offset=%ld) ...",
                       i, args2.pathname, offsets[i] ));
     
           error = FT_Stream_New( library, &args2, &stream2 );
    @@ -2812,11 +2819,6 @@
           internal->refcount = 1;
     
           internal->no_stem_darkening = -1;
    -
    -#ifdef FT_CONFIG_OPTION_SUBPIXEL_RENDERING
    -      /* Per-face filtering can only be set up by FT_Face_Properties */
    -      internal->lcd_filter_func = NULL;
    -#endif
         }
     
         if ( aface )
    @@ -4046,18 +4048,8 @@
           }
           else if ( properties->tag == FT_PARAM_TAG_LCD_FILTER_WEIGHTS )
           {
    -#ifdef FT_CONFIG_OPTION_SUBPIXEL_RENDERING
    -        if ( properties->data )
    -        {
    -          ft_memcpy( face->internal->lcd_weights,
    -                     properties->data,
    -                     FT_LCD_FILTER_FIVE_TAPS );
    -          face->internal->lcd_filter_func = ft_lcd_filter_fir;
    -        }
    -#else
             error = FT_THROW( Unimplemented_Feature );
             goto Exit;
    -#endif
           }
           else if ( properties->tag == FT_PARAM_TAG_RANDOM_SEED )
           {
    @@ -5044,9 +5036,9 @@
       static void
       Destroy_Module( FT_Module  module )
       {
    -    FT_Memory         memory  = module->memory;
    -    FT_Module_Class*  clazz   = module->clazz;
    -    FT_Library        library = module->library;
    +    const FT_Module_Class*  clazz   = module->clazz;
    +    FT_Library              library = module->library;
    +    FT_Memory               memory  = module->memory;
     
     
         if ( library && library->auto_hinter == module )
    @@ -5125,9 +5117,9 @@
           goto Exit;
     
         /* base initialization */
    +    module->clazz   = clazz;
         module->library = library;
         module->memory  = memory;
    -    module->clazz   = (FT_Module_Class*)clazz;
     
         /* check whether the module is a renderer - this must be performed */
         /* before the normal module initialization                         */
    diff --git a/src/java.desktop/share/native/libfreetype/src/base/ftoutln.c b/src/java.desktop/share/native/libfreetype/src/base/ftoutln.c
    index ef699b3c7cd..8a15b03eb83 100644
    --- a/src/java.desktop/share/native/libfreetype/src/base/ftoutln.c
    +++ b/src/java.desktop/share/native/libfreetype/src/base/ftoutln.c
    @@ -4,7 +4,7 @@
      *
      *   FreeType outline management (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/base/ftpatent.c b/src/java.desktop/share/native/libfreetype/src/base/ftpatent.c
    index 2055757e023..664bc34deea 100644
    --- a/src/java.desktop/share/native/libfreetype/src/base/ftpatent.c
    +++ b/src/java.desktop/share/native/libfreetype/src/base/ftpatent.c
    @@ -5,7 +5,7 @@
      *   FreeType API for checking patented TrueType bytecode instructions
      *   (body).  Obsolete, retained for backward compatibility.
      *
    - * Copyright (C) 2007-2024 by
    + * Copyright (C) 2007-2025 by
      * David Turner.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/base/ftpsprop.c b/src/java.desktop/share/native/libfreetype/src/base/ftpsprop.c
    index 37a6cee6cc9..0631cd63f62 100644
    --- a/src/java.desktop/share/native/libfreetype/src/base/ftpsprop.c
    +++ b/src/java.desktop/share/native/libfreetype/src/base/ftpsprop.c
    @@ -5,7 +5,7 @@
      *   Get and set properties of PostScript drivers (body).
      *   See `ftdriver.h' for available properties.
      *
    - * Copyright (C) 2017-2024 by
    + * Copyright (C) 2017-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/base/ftrfork.c b/src/java.desktop/share/native/libfreetype/src/base/ftrfork.c
    index dc9b043d8bb..1e241f4f95b 100644
    --- a/src/java.desktop/share/native/libfreetype/src/base/ftrfork.c
    +++ b/src/java.desktop/share/native/libfreetype/src/base/ftrfork.c
    @@ -4,7 +4,7 @@
      *
      *   Embedded resource forks accessor (body).
      *
    - * Copyright (C) 2004-2024 by
    + * Copyright (C) 2004-2025 by
      * Masatake YAMATO and Redhat K.K.
      *
      * FT_Raccess_Get_HeaderInfo() and raccess_guess_darwin_hfsplus() are
    @@ -269,14 +269,8 @@
                * According to Inside Macintosh: More Macintosh Toolbox,
                * "Resource IDs" (1-46), there are some reserved IDs.
                * However, FreeType2 is not a font synthesizer, no need
    -           * to check the acceptable resource ID.
    +           * to check the acceptable resource ID or its attributes.
                */
    -          if ( temp < 0 )
    -          {
    -            error = FT_THROW( Invalid_Table );
    -            goto Exit;
    -          }
    -
               ref[j].offset = temp & 0xFFFFFFL;
     
               FT_TRACE3(( "             [%d]:"
    diff --git a/src/java.desktop/share/native/libfreetype/src/base/ftsnames.c b/src/java.desktop/share/native/libfreetype/src/base/ftsnames.c
    index f7231fd61cc..34a67a148fc 100644
    --- a/src/java.desktop/share/native/libfreetype/src/base/ftsnames.c
    +++ b/src/java.desktop/share/native/libfreetype/src/base/ftsnames.c
    @@ -7,7 +7,7 @@
      *
      *   This is _not_ used to retrieve glyph names!
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/base/ftstream.c b/src/java.desktop/share/native/libfreetype/src/base/ftstream.c
    index 66722246128..c04a0506def 100644
    --- a/src/java.desktop/share/native/libfreetype/src/base/ftstream.c
    +++ b/src/java.desktop/share/native/libfreetype/src/base/ftstream.c
    @@ -4,7 +4,7 @@
      *
      *   I/O stream support (body).
      *
    - * Copyright (C) 2000-2024 by
    + * Copyright (C) 2000-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -242,7 +242,7 @@
         FT_ULong  read_bytes;
     
     
    -    FT_TRACE7(( "FT_Stream_EnterFrame: %ld bytes\n", count ));
    +    FT_TRACE7(( "FT_Stream_EnterFrame: %lu bytes\n", count ));
     
         /* check for nested frame access */
         FT_ASSERT( stream && stream->cursor == 0 );
    diff --git a/src/java.desktop/share/native/libfreetype/src/base/ftstroke.c b/src/java.desktop/share/native/libfreetype/src/base/ftstroke.c
    index 64f46ce43e7..591f18eaa83 100644
    --- a/src/java.desktop/share/native/libfreetype/src/base/ftstroke.c
    +++ b/src/java.desktop/share/native/libfreetype/src/base/ftstroke.c
    @@ -4,7 +4,7 @@
      *
      *   FreeType path stroker (body).
      *
    - * Copyright (C) 2002-2024 by
    + * Copyright (C) 2002-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -1070,7 +1070,7 @@
             if ( theta == FT_ANGLE_PI2 )
               theta = -rotate;
     
    -        phi    = stroker->angle_in + theta + rotate;
    +        phi = stroker->angle_in + theta + rotate;
     
             FT_Vector_From_Polar( &sigma, stroker->miter_limit, theta );
     
    @@ -1371,7 +1371,7 @@
         arc[1] = *control;
         arc[2] = stroker->center;
     
    -    while ( arc >= bez_stack )
    +    do
         {
           FT_Angle  angle_in, angle_out;
     
    @@ -1524,10 +1524,12 @@
             }
           }
     
    -      arc -= 2;
    -
           stroker->angle_in = angle_out;
    -    }
    +
    +      if ( arc == bez_stack )
    +        break;
    +      arc -= 2;
    +    } while ( 1 );
     
         stroker->center      = *to;
         stroker->line_length = 0;
    @@ -1577,7 +1579,7 @@
         arc[2] = *control1;
         arc[3] = stroker->center;
     
    -    while ( arc >= bez_stack )
    +    do
         {
           FT_Angle  angle_in, angle_mid, angle_out;
     
    @@ -1741,10 +1743,12 @@
             }
           }
     
    -      arc -= 3;
    -
           stroker->angle_in = angle_out;
    -    }
    +
    +      if ( arc == bez_stack )
    +        break;
    +      arc -= 3;
    +    } while ( 1 );
     
         stroker->center      = *to;
         stroker->line_length = 0;
    diff --git a/src/java.desktop/share/native/libfreetype/src/base/ftsynth.c b/src/java.desktop/share/native/libfreetype/src/base/ftsynth.c
    index ec05bce33a9..08bc1742202 100644
    --- a/src/java.desktop/share/native/libfreetype/src/base/ftsynth.c
    +++ b/src/java.desktop/share/native/libfreetype/src/base/ftsynth.c
    @@ -4,7 +4,7 @@
      *
      *   FreeType synthesizing code for emboldening and slanting (body).
      *
    - * Copyright (C) 2000-2024 by
    + * Copyright (C) 2000-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -141,7 +141,7 @@
           /*
            * XXX: overflow check for 16-bit system, for compatibility
            *      with FT_GlyphSlot_Embolden() since FreeType 2.1.10.
    -       *      unfortunately, this function return no informations
    +       *      unfortunately, this function returns no information
            *      about the cause of error.
            */
           if ( ( ystr >> 6 ) > FT_INT_MAX || ( ystr >> 6 ) < FT_INT_MIN )
    diff --git a/src/java.desktop/share/native/libfreetype/src/base/ftsystem.c b/src/java.desktop/share/native/libfreetype/src/base/ftsystem.c
    index eee3642334f..186119d5581 100644
    --- a/src/java.desktop/share/native/libfreetype/src/base/ftsystem.c
    +++ b/src/java.desktop/share/native/libfreetype/src/base/ftsystem.c
    @@ -4,7 +4,7 @@
      *
      *   ANSI-specific FreeType low-level system interface (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -280,7 +280,7 @@
         stream->close = ft_ansi_stream_close;
     
         FT_TRACE1(( "FT_Stream_Open:" ));
    -    FT_TRACE1(( " opened `%s' (%ld bytes) successfully\n",
    +    FT_TRACE1(( " opened `%s' (%lu bytes) successfully\n",
                     filepathname, stream->size ));
     
         return FT_Err_Ok;
    diff --git a/src/java.desktop/share/native/libfreetype/src/base/fttrigon.c b/src/java.desktop/share/native/libfreetype/src/base/fttrigon.c
    index 4b1aced1cba..29eff639c51 100644
    --- a/src/java.desktop/share/native/libfreetype/src/base/fttrigon.c
    +++ b/src/java.desktop/share/native/libfreetype/src/base/fttrigon.c
    @@ -4,7 +4,7 @@
      *
      *   FreeType trigonometric functions (body).
      *
    - * Copyright (C) 2001-2024 by
    + * Copyright (C) 2001-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/base/fttype1.c b/src/java.desktop/share/native/libfreetype/src/base/fttype1.c
    index cedf7c40505..77978df674d 100644
    --- a/src/java.desktop/share/native/libfreetype/src/base/fttype1.c
    +++ b/src/java.desktop/share/native/libfreetype/src/base/fttype1.c
    @@ -4,7 +4,7 @@
      *
      *   FreeType utility file for PS names support (body).
      *
    - * Copyright (C) 2002-2024 by
    + * Copyright (C) 2002-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/base/ftutil.c b/src/java.desktop/share/native/libfreetype/src/base/ftutil.c
    index b13512f8704..f83c4394893 100644
    --- a/src/java.desktop/share/native/libfreetype/src/base/ftutil.c
    +++ b/src/java.desktop/share/native/libfreetype/src/base/ftutil.c
    @@ -4,7 +4,7 @@
      *
      *   FreeType utility file for memory and list management (body).
      *
    - * Copyright (C) 2002-2024 by
    + * Copyright (C) 2002-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -424,11 +424,10 @@
         while ( cur )
         {
           FT_ListNode  next = cur->next;
    -      void*        data = cur->data;
     
     
           if ( destroy )
    -        destroy( memory, data, user );
    +        destroy( memory, cur->data, user );
     
           FT_FREE( cur );
           cur = next;
    diff --git a/src/java.desktop/share/native/libfreetype/src/cff/cffcmap.c b/src/java.desktop/share/native/libfreetype/src/cff/cffcmap.c
    index ea5f8ed2885..cb69abdb90f 100644
    --- a/src/java.desktop/share/native/libfreetype/src/cff/cffcmap.c
    +++ b/src/java.desktop/share/native/libfreetype/src/cff/cffcmap.c
    @@ -4,7 +4,7 @@
      *
      *   CFF character mapping table (cmap) support (body).
      *
    - * Copyright (C) 2002-2024 by
    + * Copyright (C) 2002-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/cff/cffcmap.h b/src/java.desktop/share/native/libfreetype/src/cff/cffcmap.h
    index 1dd8700cd8b..60e16d94875 100644
    --- a/src/java.desktop/share/native/libfreetype/src/cff/cffcmap.h
    +++ b/src/java.desktop/share/native/libfreetype/src/cff/cffcmap.h
    @@ -4,7 +4,7 @@
      *
      *   CFF character mapping table (cmap) support (specification).
      *
    - * Copyright (C) 2002-2024 by
    + * Copyright (C) 2002-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/cff/cffdrivr.c b/src/java.desktop/share/native/libfreetype/src/cff/cffdrivr.c
    index f6ebdb3810a..44ff44aecbd 100644
    --- a/src/java.desktop/share/native/libfreetype/src/cff/cffdrivr.c
    +++ b/src/java.desktop/share/native/libfreetype/src/cff/cffdrivr.c
    @@ -4,7 +4,7 @@
      *
      *   OpenType font driver implementation (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, Werner Lemberg, and Dominik Röttsches.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -121,7 +121,20 @@
         kerning->y = 0;
     
         if ( sfnt )
    -      kerning->x = sfnt->get_kerning( cffface, left_glyph, right_glyph );
    +    {
    +      /* Use 'kern' table if available since that can be faster; otherwise */
    +      /* use GPOS kerning pairs if available.                              */
    +      if ( cffface->kern_avail_bits )
    +        kerning->x = sfnt->get_kerning( cffface,
    +                                        left_glyph,
    +                                        right_glyph );
    +#ifdef TT_CONFIG_OPTION_GPOS_KERNING
    +      else if ( cffface->num_gpos_lookups_kerning )
    +        kerning->x = sfnt->get_gpos_kerning( cffface,
    +                                             left_glyph,
    +                                             right_glyph );
    +#endif
    +    }
     
         return FT_Err_Ok;
       }
    @@ -168,25 +181,7 @@
         CFF_Size       cffsize = (CFF_Size)size;
     
     
    -    if ( !cffslot )
    -      return FT_THROW( Invalid_Slot_Handle );
    -
    -    FT_TRACE1(( "cff_glyph_load: glyph index %d\n", glyph_index ));
    -
    -    /* check whether we want a scaled outline or bitmap */
    -    if ( !cffsize )
    -      load_flags |= FT_LOAD_NO_SCALE | FT_LOAD_NO_HINTING;
    -
    -    /* reset the size object if necessary */
    -    if ( load_flags & FT_LOAD_NO_SCALE )
    -      size = NULL;
    -
    -    if ( size )
    -    {
    -      /* these two objects must have the same parent */
    -      if ( size->face != slot->face )
    -        return FT_THROW( Invalid_Face_Handle );
    -    }
    +    FT_TRACE1(( "cff_glyph_load: glyph index %u\n", glyph_index ));
     
         /* now load the glyph outline if necessary */
         error = cff_slot_load( cffslot, cffsize, glyph_index, load_flags );
    @@ -205,105 +200,70 @@
                         FT_Int32   flags,
                         FT_Fixed*  advances )
       {
    -    FT_UInt       nn;
    -    FT_Error      error = FT_Err_Ok;
    -    FT_GlyphSlot  slot  = face->glyph;
    +    CFF_Face  cffface = (CFF_Face)face;
    +    FT_Bool   horz;
    +    FT_UInt   nn;
     
     
    -    if ( FT_IS_SFNT( face ) )
    +    if ( !FT_IS_SFNT( face ) )
    +      return FT_THROW( Unimplemented_Feature );
    +
    +    horz = !( flags & FT_LOAD_VERTICAL_LAYOUT );
    +
    +    if ( horz )
         {
           /* OpenType 1.7 mandates that the data from `hmtx' table be used; */
           /* it is no longer necessary that those values are identical to   */
           /* the values in the `CFF' table                                  */
    +      if ( !cffface->horizontal.number_Of_HMetrics )
    +        return FT_THROW( Unimplemented_Feature );
     
    -      CFF_Face  cffface = (CFF_Face)face;
    -      FT_Short  dummy;
    -
    -
    -      if ( flags & FT_LOAD_VERTICAL_LAYOUT )
    -      {
     #ifdef TT_CONFIG_OPTION_GX_VAR_SUPPORT
    -        /* no fast retrieval for blended MM fonts without VVAR table */
    -        if ( ( FT_IS_NAMED_INSTANCE( face ) || FT_IS_VARIATION( face ) ) &&
    -             !( cffface->variation_support & TT_FACE_FLAG_VAR_VADVANCE ) )
    -          return FT_THROW( Unimplemented_Feature );
    +      /* no fast retrieval for blended MM fonts without HVAR table */
    +      if ( ( FT_IS_NAMED_INSTANCE( face ) || FT_IS_VARIATION( face ) ) &&
    +           !( cffface->variation_support & TT_FACE_FLAG_VAR_HADVANCE ) )
    +        return FT_THROW( Unimplemented_Feature );
     #endif
    +    }
    +    else  /* vertical */
    +    {
    +      /* check whether we have data from the `vmtx' table at all; */
    +      /* otherwise we extract the info from the CFF glyphstrings  */
    +      /* (instead of synthesizing a global value using the `OS/2' */
    +      /* table)                                                   */
    +      if ( !cffface->vertical_info )
    +        return FT_THROW( Unimplemented_Feature );
     
    -        /* check whether we have data from the `vmtx' table at all; */
    -        /* otherwise we extract the info from the CFF glyphstrings  */
    -        /* (instead of synthesizing a global value using the `OS/2' */
    -        /* table)                                                   */
    -        if ( !cffface->vertical_info )
    -          goto Missing_Table;
    -
    -        for ( nn = 0; nn < count; nn++ )
    -        {
    -          FT_UShort  ah;
    -
    -
    -          ( (SFNT_Service)cffface->sfnt )->get_metrics( cffface,
    -                                                        1,
    -                                                        start + nn,
    -                                                        &dummy,
    -                                                        &ah );
    -
    -          FT_TRACE5(( "  idx %d: advance height %d font unit%s\n",
    -                      start + nn,
    -                      ah,
    -                      ah == 1 ? "" : "s" ));
    -          advances[nn] = ah;
    -        }
    -      }
    -      else
    -      {
     #ifdef TT_CONFIG_OPTION_GX_VAR_SUPPORT
    -        /* no fast retrieval for blended MM fonts without HVAR table */
    -        if ( ( FT_IS_NAMED_INSTANCE( face ) || FT_IS_VARIATION( face ) ) &&
    -             !( cffface->variation_support & TT_FACE_FLAG_VAR_HADVANCE ) )
    -          return FT_THROW( Unimplemented_Feature );
    +      /* no fast retrieval for blended MM fonts without VVAR table */
    +      if ( ( FT_IS_NAMED_INSTANCE( face ) || FT_IS_VARIATION( face ) ) &&
    +           !( cffface->variation_support & TT_FACE_FLAG_VAR_VADVANCE ) )
    +        return FT_THROW( Unimplemented_Feature );
     #endif
    -
    -        /* check whether we have data from the `hmtx' table at all */
    -        if ( !cffface->horizontal.number_Of_HMetrics )
    -          goto Missing_Table;
    -
    -        for ( nn = 0; nn < count; nn++ )
    -        {
    -          FT_UShort  aw;
    -
    -
    -          ( (SFNT_Service)cffface->sfnt )->get_metrics( cffface,
    -                                                        0,
    -                                                        start + nn,
    -                                                        &dummy,
    -                                                        &aw );
    -
    -          FT_TRACE5(( "  idx %d: advance width %d font unit%s\n",
    -                      start + nn,
    -                      aw,
    -                      aw == 1 ? "" : "s" ));
    -          advances[nn] = aw;
    -        }
    -      }
    -
    -      return error;
         }
     
    -  Missing_Table:
    -    flags |= (FT_UInt32)FT_LOAD_ADVANCE_ONLY;
    -
    +    /* proceed to fast advances */
         for ( nn = 0; nn < count; nn++ )
         {
    -      error = cff_glyph_load( slot, face->size, start + nn, flags );
    -      if ( error )
    -        break;
    +      FT_UShort  aw;
    +      FT_Short   dummy;
     
    -      advances[nn] = ( flags & FT_LOAD_VERTICAL_LAYOUT )
    -                     ? slot->linearVertAdvance
    -                     : slot->linearHoriAdvance;
    +
    +      ( (SFNT_Service)cffface->sfnt )->get_metrics( cffface,
    +                                                    !horz,
    +                                                    start + nn,
    +                                                    &dummy,
    +                                                    &aw );
    +
    +      FT_TRACE5(( "  idx %u: advance %s %d font unit%s\n",
    +                  start + nn,
    +                  horz ? "width" : "height",
    +                  aw,
    +                  aw == 1 ? "" : "s" ));
    +      advances[nn] = aw;
         }
     
    -    return error;
    +    return FT_Err_Ok;
       }
     
     
    @@ -496,8 +456,8 @@
                                                              dict->weight );
           font_info->italic_angle        = dict->italic_angle;
           font_info->is_fixed_pitch      = dict->is_fixed_pitch;
    -      font_info->underline_position  = (FT_Short)dict->underline_position;
    -      font_info->underline_thickness = (FT_UShort)dict->underline_thickness;
    +      font_info->underline_position  = dict->underline_position;
    +      font_info->underline_thickness = dict->underline_thickness;
     
           cff->font_info = font_info;
         }
    diff --git a/src/java.desktop/share/native/libfreetype/src/cff/cffdrivr.h b/src/java.desktop/share/native/libfreetype/src/cff/cffdrivr.h
    index fd5bc37ecd4..52a1e727a6a 100644
    --- a/src/java.desktop/share/native/libfreetype/src/cff/cffdrivr.h
    +++ b/src/java.desktop/share/native/libfreetype/src/cff/cffdrivr.h
    @@ -4,7 +4,7 @@
      *
      *   High-level OpenType driver interface (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/cff/cfferrs.h b/src/java.desktop/share/native/libfreetype/src/cff/cfferrs.h
    index 128adc3b716..7491886c7be 100644
    --- a/src/java.desktop/share/native/libfreetype/src/cff/cfferrs.h
    +++ b/src/java.desktop/share/native/libfreetype/src/cff/cfferrs.h
    @@ -4,7 +4,7 @@
      *
      *   CFF error codes (specification only).
      *
    - * Copyright (C) 2001-2024 by
    + * Copyright (C) 2001-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/cff/cffgload.c b/src/java.desktop/share/native/libfreetype/src/cff/cffgload.c
    index cbb071abdfe..e8bab3c1e33 100644
    --- a/src/java.desktop/share/native/libfreetype/src/cff/cffgload.c
    +++ b/src/java.desktop/share/native/libfreetype/src/cff/cffgload.c
    @@ -4,7 +4,7 @@
      *
      *   OpenType Glyph Loader (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -238,24 +238,12 @@
         else if ( glyph_index >= cff->num_glyphs )
           return FT_THROW( Invalid_Argument );
     
    -    if ( load_flags & FT_LOAD_NO_RECURSE )
    -      load_flags |= FT_LOAD_NO_SCALE | FT_LOAD_NO_HINTING;
    -
    -    glyph->x_scale = 0x10000L;
    -    glyph->y_scale = 0x10000L;
    -    if ( size )
    -    {
    -      glyph->x_scale = size->root.metrics.x_scale;
    -      glyph->y_scale = size->root.metrics.y_scale;
    -    }
    -
     #ifdef TT_CONFIG_OPTION_EMBEDDED_BITMAPS
     
         /* try to load embedded bitmap if any              */
         /*                                                 */
         /* XXX: The convention should be emphasized in     */
         /*      the documents because it can be confusing. */
    -    if ( size )
         {
           CFF_Face      cff_face = (CFF_Face)size->root.face;
           SFNT_Service  sfnt     = (SFNT_Service)cff_face->sfnt;
    @@ -284,9 +272,6 @@
               FT_Short   dummy;
     
     
    -          glyph->root.outline.n_points   = 0;
    -          glyph->root.outline.n_contours = 0;
    -
               glyph->root.metrics.width  = (FT_Pos)metrics.width  * 64;
               glyph->root.metrics.height = (FT_Pos)metrics.height * 64;
     
    @@ -423,6 +408,25 @@
     
     #endif /* FT_CONFIG_OPTION_SVG */
     
    +    /* top-level code ensures that FT_LOAD_NO_HINTING is set */
    +    /* if FT_LOAD_NO_SCALE is active                         */
    +    hinting = FT_BOOL( ( load_flags & FT_LOAD_NO_HINTING ) == 0 );
    +    scaled  = FT_BOOL( ( load_flags & FT_LOAD_NO_SCALE   ) == 0 );
    +
    +    glyph->hint        = hinting;
    +    glyph->scaled      = scaled;
    +
    +    if ( scaled )
    +    {
    +      glyph->x_scale = size->root.metrics.x_scale;
    +      glyph->y_scale = size->root.metrics.y_scale;
    +    }
    +    else
    +    {
    +      glyph->x_scale = 0x10000L;
    +      glyph->y_scale = 0x10000L;
    +    }
    +
         /* if we have a CID subfont, use its matrix (which has already */
         /* been multiplied with the root matrix)                       */
     
    @@ -457,18 +461,6 @@
           font_offset = cff->top_font.font_dict.font_offset;
         }
     
    -    glyph->root.outline.n_points   = 0;
    -    glyph->root.outline.n_contours = 0;
    -
    -    /* top-level code ensures that FT_LOAD_NO_HINTING is set */
    -    /* if FT_LOAD_NO_SCALE is active                         */
    -    hinting = FT_BOOL( ( load_flags & FT_LOAD_NO_HINTING ) == 0 );
    -    scaled  = FT_BOOL( ( load_flags & FT_LOAD_NO_SCALE   ) == 0 );
    -
    -    glyph->hint        = hinting;
    -    glyph->scaled      = scaled;
    -    glyph->root.format = FT_GLYPH_FORMAT_OUTLINE;  /* by default */
    -
         {
     #ifdef CFF_CONFIG_OPTION_OLD_ENGINE
           PS_Driver  driver = (PS_Driver)FT_FACE_DRIVER( face );
    @@ -602,10 +594,8 @@
         {
           /* Now, set the metrics -- this is rather simple, as   */
           /* the left side bearing is the xMin, and the top side */
    -      /* bearing the yMax.                                   */
    -
    -      /* For composite glyphs, return only left side bearing and */
    -      /* advance width.                                          */
    +      /* bearing the yMax. For composite glyphs, return only */
    +      /* left side bearing and advance width.                */
           if ( load_flags & FT_LOAD_NO_RECURSE )
           {
             FT_Slot_Internal  internal = glyph->root.internal;
    @@ -624,6 +614,12 @@
             FT_Bool            has_vertical_info;
     
     
    +        glyph->root.format = FT_GLYPH_FORMAT_OUTLINE;
    +
    +        glyph->root.outline.flags = FT_OUTLINE_REVERSE_FILL;
    +        if ( size && size->root.metrics.y_ppem < 24 )
    +          glyph->root.outline.flags |= FT_OUTLINE_HIGH_PRECISION;
    +
             if ( face->horizontal.number_Of_HMetrics )
             {
               FT_Short   horiBearingX = 0;
    @@ -677,14 +673,6 @@
     
             glyph->root.linearVertAdvance = metrics->vertAdvance;
     
    -        glyph->root.format = FT_GLYPH_FORMAT_OUTLINE;
    -
    -        glyph->root.outline.flags = 0;
    -        if ( size && size->root.metrics.y_ppem < 24 )
    -          glyph->root.outline.flags |= FT_OUTLINE_HIGH_PRECISION;
    -
    -        glyph->root.outline.flags |= FT_OUTLINE_REVERSE_FILL;
    -
             /* apply the font matrix, if any */
             if ( font_matrix.xx != 0x10000L || font_matrix.yy != 0x10000L ||
                  font_matrix.xy != 0        || font_matrix.yx != 0        )
    @@ -707,7 +695,7 @@
               metrics->vertAdvance += font_offset.y;
             }
     
    -        if ( ( load_flags & FT_LOAD_NO_SCALE ) == 0 || force_scaling )
    +        if ( scaled || force_scaling )
             {
               /* scale the outline and the metrics */
               FT_Int       n;
    diff --git a/src/java.desktop/share/native/libfreetype/src/cff/cffgload.h b/src/java.desktop/share/native/libfreetype/src/cff/cffgload.h
    index 346d4b11c31..662bb7cff53 100644
    --- a/src/java.desktop/share/native/libfreetype/src/cff/cffgload.h
    +++ b/src/java.desktop/share/native/libfreetype/src/cff/cffgload.h
    @@ -4,7 +4,7 @@
      *
      *   OpenType Glyph Loader (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/cff/cffload.c b/src/java.desktop/share/native/libfreetype/src/cff/cffload.c
    index 979fd45f6ca..39d662eb434 100644
    --- a/src/java.desktop/share/native/libfreetype/src/cff/cffload.c
    +++ b/src/java.desktop/share/native/libfreetype/src/cff/cffload.c
    @@ -4,7 +4,7 @@
      *
      *   OpenType and CFF data/program tables loader (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -442,7 +442,7 @@
           if ( cur_offset != 0 )
           {
             FT_TRACE0(( "cff_index_get_pointers:"
    -                    " invalid first offset value %ld set to zero\n",
    +                    " invalid first offset value %lu set to zero\n",
                         cur_offset ));
             cur_offset = 0;
           }
    @@ -559,8 +559,8 @@
                idx->data_offset > stream->size - off2 + 1 )
           {
             FT_ERROR(( "cff_index_access_element:"
    -                   " offset to next entry (%ld)"
    -                   " exceeds the end of stream (%ld)\n",
    +                   " offset to next entry (%lu)"
    +                   " exceeds the end of stream (%lu)\n",
                        off2, stream->size - idx->data_offset + 1 ));
             off2 = stream->size - idx->data_offset + 1;
           }
    @@ -982,7 +982,7 @@
                 if ( glyph_sid > 0xFFFFL - nleft )
                 {
                   FT_ERROR(( "cff_charset_load: invalid SID range trimmed"
    -                         " nleft=%d -> %ld\n", nleft, 0xFFFFL - glyph_sid ));
    +                         " nleft=%u -> %ld\n", nleft, 0xFFFFL - glyph_sid ));
                   nleft = ( FT_UInt )( 0xFFFFL - glyph_sid );
                 }
     
    @@ -1315,7 +1315,7 @@
     
         if ( numOperands > count )
         {
    -      FT_TRACE4(( " cff_blend_doBlend: Stack underflow %d argument%s\n",
    +      FT_TRACE4(( " cff_blend_doBlend: Stack underflow %u argument%s\n",
                       count,
                       count == 1 ? "" : "s" ));
     
    @@ -1466,7 +1466,7 @@
           if ( master == 0 )
           {
             blend->BV[master] = FT_FIXED_ONE;
    -        FT_TRACE4(( "   build blend vector len %d\n", len ));
    +        FT_TRACE4(( "   build blend vector len %u\n", len ));
             FT_TRACE4(( "   [ %f ", blend->BV[master] / 65536.0 ));
             continue;
           }
    @@ -2014,8 +2014,8 @@
         /* set defaults */
         FT_ZERO( top );
     
    -    top->underline_position  = -( 100L << 16 );
    -    top->underline_thickness = 50L << 16;
    +    top->underline_position  = -100;
    +    top->underline_thickness = 50;
         top->charstring_type     = 2;
         top->font_matrix.xx      = 0x10000L;
         top->font_matrix.yy      = 0x10000L;
    @@ -2341,7 +2341,7 @@
           if ( face_index > 0 && subfont_index >= font->name_index.count )
           {
             FT_ERROR(( "cff_font_load:"
    -                   " invalid subfont index for pure CFF font (%d)\n",
    +                   " invalid subfont index for pure CFF font (%u)\n",
                        subfont_index ));
             error = FT_THROW( Invalid_Argument );
             goto Exit;
    diff --git a/src/java.desktop/share/native/libfreetype/src/cff/cffload.h b/src/java.desktop/share/native/libfreetype/src/cff/cffload.h
    index 02209245421..fdc132c8f3f 100644
    --- a/src/java.desktop/share/native/libfreetype/src/cff/cffload.h
    +++ b/src/java.desktop/share/native/libfreetype/src/cff/cffload.h
    @@ -4,7 +4,7 @@
      *
      *   OpenType & CFF data/program tables loader (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/cff/cffobjs.c b/src/java.desktop/share/native/libfreetype/src/cff/cffobjs.c
    index 7c6713739a1..9e00943a95d 100644
    --- a/src/java.desktop/share/native/libfreetype/src/cff/cffobjs.c
    +++ b/src/java.desktop/share/native/libfreetype/src/cff/cffobjs.c
    @@ -4,7 +4,7 @@
      *
      *   OpenType objects manager (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -537,8 +537,8 @@
     
           sfnt_format = 1;
     
    -      /* now, the font can be either an OpenType/CFF font, or an SVG CEF */
    -      /* font; in the latter case it doesn't have a `head' table         */
    +      /* the font may be OpenType/CFF, SVG CEF, or sfnt/CFF; a `head' table */
    +      /* implies OpenType/CFF, otherwise just look for an optional cmap     */
           error = face->goto_table( face, TTAG_head, stream, 0 );
           if ( !error )
           {
    @@ -554,7 +554,9 @@
           {
             /* load the `cmap' table explicitly */
             error = sfnt->load_cmap( face, stream );
    -        if ( error )
    +
    +        /* this may fail because CID-keyed fonts don't have a cmap */
    +        if ( FT_ERR_NEQ( error, Table_Missing ) && FT_ERR_NEQ( error, Ok ) )
               goto Exit;
           }
     
    @@ -651,7 +653,7 @@
             {
               s = cff_index_get_sid_string( cff, idx );
               if ( s )
    -            FT_TRACE4(( "  %5d %s\n", idx, s ));
    +            FT_TRACE4(( "  %5u %s\n", idx, s ));
             }
     
             /* In Multiple Master CFFs, two SIDs hold the Normalize Design  */
    @@ -666,7 +668,7 @@
               FT_PtrDist  l;
     
     
    -          FT_TRACE4(( "  %5d ", idx + 390 ));
    +          FT_TRACE4(( "  %5u ", idx + 390 ));
               for ( l = 0; l < s1len; l++ )
                 FT_TRACE4(( "%c", s1[l] ));
               FT_TRACE4(( "\n" ));
    @@ -681,7 +683,7 @@
               FT_PtrDist  l;
     
     
    -          FT_TRACE4(( "  %5d ", cff->num_strings + 390 ));
    +          FT_TRACE4(( "  %5u ", cff->num_strings + 390 ));
               for ( l = 0; l < s1len; l++ )
                 FT_TRACE4(( "%c", s1[l] ));
               FT_TRACE4(( "\n" ));
    @@ -844,10 +846,8 @@
               cffface->height = (FT_Short)( cffface->ascender -
                                             cffface->descender );
     
    -        cffface->underline_position  =
    -          (FT_Short)( dict->underline_position >> 16 );
    -        cffface->underline_thickness =
    -          (FT_Short)( dict->underline_thickness >> 16 );
    +        cffface->underline_position  = (FT_Short)dict->underline_position;
    +        cffface->underline_thickness = (FT_Short)dict->underline_thickness;
     
             /* retrieve font family & style name */
             if ( dict->family_name )
    diff --git a/src/java.desktop/share/native/libfreetype/src/cff/cffobjs.h b/src/java.desktop/share/native/libfreetype/src/cff/cffobjs.h
    index 91ad83b1cd0..982dcd64dd0 100644
    --- a/src/java.desktop/share/native/libfreetype/src/cff/cffobjs.h
    +++ b/src/java.desktop/share/native/libfreetype/src/cff/cffobjs.h
    @@ -4,7 +4,7 @@
      *
      *   OpenType objects manager (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/cff/cffparse.c b/src/java.desktop/share/native/libfreetype/src/cff/cffparse.c
    index 92a69c3b516..864b2490b3b 100644
    --- a/src/java.desktop/share/native/libfreetype/src/cff/cffparse.c
    +++ b/src/java.desktop/share/native/libfreetype/src/cff/cffparse.c
    @@ -4,7 +4,7 @@
      *
      *   CFF token stream parser (body)
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -892,7 +892,7 @@
                        dict->cid_supplement ));
           error = FT_Err_Ok;
     
    -      FT_TRACE4(( " %d %d %ld\n",
    +      FT_TRACE4(( " %u %u %ld\n",
                       dict->cid_registry,
                       dict->cid_ordering,
                       dict->cid_supplement ));
    @@ -929,7 +929,7 @@
     
         priv->vsindex = (FT_UInt)cff_parse_num( parser, data++ );
     
    -    FT_TRACE4(( " %d\n", priv->vsindex ));
    +    FT_TRACE4(( " %u\n", priv->vsindex ));
     
         error = FT_Err_Ok;
     
    @@ -979,7 +979,7 @@
           goto Exit;
         }
     
    -    FT_TRACE4(( "   %d value%s blended\n",
    +    FT_TRACE4(( "   %u value%s blended\n",
                     numBlends,
                     numBlends == 1 ? "" : "s" ));
     
    @@ -1014,7 +1014,7 @@
         if ( dict->maxstack < CFF2_DEFAULT_STACK )
           dict->maxstack = CFF2_DEFAULT_STACK;
     
    -    FT_TRACE4(( " %d\n", dict->maxstack ));
    +    FT_TRACE4(( " %u\n", dict->maxstack ));
     
       Exit:
         return error;
    diff --git a/src/java.desktop/share/native/libfreetype/src/cff/cffparse.h b/src/java.desktop/share/native/libfreetype/src/cff/cffparse.h
    index ca6b18af6aa..47cceb1a4a0 100644
    --- a/src/java.desktop/share/native/libfreetype/src/cff/cffparse.h
    +++ b/src/java.desktop/share/native/libfreetype/src/cff/cffparse.h
    @@ -4,7 +4,7 @@
      *
      *   CFF token stream parser (specification)
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/cff/cfftoken.h b/src/java.desktop/share/native/libfreetype/src/cff/cfftoken.h
    index da45faa7f4e..a7ee1cb3fe7 100644
    --- a/src/java.desktop/share/native/libfreetype/src/cff/cfftoken.h
    +++ b/src/java.desktop/share/native/libfreetype/src/cff/cfftoken.h
    @@ -4,7 +4,7 @@
      *
      *   CFF token definitions (specification only).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -30,8 +30,8 @@
       CFF_FIELD_STRING  ( 4,     weight,              "Weight" )
       CFF_FIELD_BOOL    ( 0x101, is_fixed_pitch,      "isFixedPitch" )
       CFF_FIELD_FIXED   ( 0x102, italic_angle,        "ItalicAngle" )
    -  CFF_FIELD_FIXED   ( 0x103, underline_position,  "UnderlinePosition" )
    -  CFF_FIELD_FIXED   ( 0x104, underline_thickness, "UnderlineThickness" )
    +  CFF_FIELD_NUM     ( 0x103, underline_position,  "UnderlinePosition" )
    +  CFF_FIELD_NUM     ( 0x104, underline_thickness, "UnderlineThickness" )
       CFF_FIELD_NUM     ( 0x105, paint_type,          "PaintType" )
       CFF_FIELD_NUM     ( 0x106, charstring_type,     "CharstringType" )
       CFF_FIELD_CALLBACK( 0x107, font_matrix,         "FontMatrix" )
    diff --git a/src/java.desktop/share/native/libfreetype/src/cid/ciderrs.h b/src/java.desktop/share/native/libfreetype/src/cid/ciderrs.h
    index c439a8c4a0b..1591979d370 100644
    --- a/src/java.desktop/share/native/libfreetype/src/cid/ciderrs.h
    +++ b/src/java.desktop/share/native/libfreetype/src/cid/ciderrs.h
    @@ -4,7 +4,7 @@
      *
      *   CID error codes (specification only).
      *
    - * Copyright (C) 2001-2024 by
    + * Copyright (C) 2001-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/cid/cidgload.c b/src/java.desktop/share/native/libfreetype/src/cid/cidgload.c
    index 7b571322d45..249ede5757d 100644
    --- a/src/java.desktop/share/native/libfreetype/src/cid/cidgload.c
    +++ b/src/java.desktop/share/native/libfreetype/src/cid/cidgload.c
    @@ -4,7 +4,7 @@
      *
      *   CID-keyed Type1 Glyph Loader (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -103,20 +103,20 @@
           if ( ( cid->fd_bytes == 1 && fd_select == 0xFFU   ) ||
                ( cid->fd_bytes == 2 && fd_select == 0xFFFFU ) )
           {
    -        FT_TRACE1(( "cid_load_glyph: fail for glyph index %d:\n",
    +        FT_TRACE1(( "cid_load_glyph: fail for glyph index %u:\n",
                         glyph_index ));
    -        FT_TRACE1(( "                FD number %ld is the maximum\n",
    +        FT_TRACE1(( "                FD number %lu is the maximum\n",
                         fd_select ));
    -        FT_TRACE1(( "                integer fitting into %d byte%s\n",
    +        FT_TRACE1(( "                integer fitting into %u byte%s\n",
                         cid->fd_bytes, cid->fd_bytes == 1 ? "" : "s" ));
           }
           else
           {
    -        FT_TRACE0(( "cid_load_glyph: fail for glyph index %d:\n",
    +        FT_TRACE0(( "cid_load_glyph: fail for glyph index %u:\n",
                         glyph_index ));
    -        FT_TRACE0(( "                FD number %ld is larger\n",
    +        FT_TRACE0(( "                FD number %lu is larger\n",
                         fd_select ));
    -        FT_TRACE0(( "                than number of dictionaries (%d)\n",
    +        FT_TRACE0(( "                than number of dictionaries (%u)\n",
                         cid->num_dicts ));
           }
     
    @@ -125,7 +125,7 @@
         }
         else if ( off2 > stream->size )
         {
    -      FT_TRACE0(( "cid_load_glyph: fail for glyph index %d:\n",
    +      FT_TRACE0(( "cid_load_glyph: fail for glyph index %u:\n",
                       glyph_index ));
           FT_TRACE0(( "               end of the glyph data\n" ));
           FT_TRACE0(( "               is beyond the data stream\n" ));
    @@ -135,7 +135,7 @@
         }
         else if ( off1 > off2 )
         {
    -      FT_TRACE0(( "cid_load_glyph: fail for glyph index %d:\n",
    +      FT_TRACE0(( "cid_load_glyph: fail for glyph index %u:\n",
                       glyph_index ));
           FT_TRACE0(( "                the end position of glyph data\n" ));
           FT_TRACE0(( "                is set before the start position\n" ));
    @@ -252,8 +252,8 @@
           cs_offset = decoder->lenIV >= 0 ? (FT_UInt)decoder->lenIV : 0;
           if ( cs_offset > glyph_length )
           {
    -        FT_TRACE0(( "cid_load_glyph: fail for glyph_index=%d, "
    -                    "offset to the charstring is beyond glyph length\n",
    +        FT_TRACE0(( "cid_load_glyph: fail for glyph_index=%u,"
    +                    " offset to the charstring is beyond glyph length\n",
                         glyph_index ));
             error = FT_THROW( Invalid_Offset );
             goto Exit;
    @@ -452,16 +452,12 @@
         glyph->x_scale = cidsize->metrics.x_scale;
         glyph->y_scale = cidsize->metrics.y_scale;
     
    -    cidglyph->outline.n_points   = 0;
    -    cidglyph->outline.n_contours = 0;
    -
         hinting = FT_BOOL( ( load_flags & FT_LOAD_NO_SCALE   ) == 0 &&
                            ( load_flags & FT_LOAD_NO_HINTING ) == 0 );
         scaled  = FT_BOOL( ( load_flags & FT_LOAD_NO_SCALE   ) == 0 );
     
         glyph->hint      = hinting;
         glyph->scaled    = scaled;
    -    cidglyph->format = FT_GLYPH_FORMAT_OUTLINE;
     
         error = psaux->t1_decoder_funcs->init( &decoder,
                                                cidglyph->face,
    @@ -501,12 +497,8 @@
     
         /* now set the metrics -- this is rather simple, as    */
         /* the left side bearing is the xMin, and the top side */
    -    /* bearing the yMax                                    */
    -    cidglyph->outline.flags &= FT_OUTLINE_OWNER;
    -    cidglyph->outline.flags |= FT_OUTLINE_REVERSE_FILL;
    -
    -    /* for composite glyphs, return only left side bearing and */
    -    /* advance width                                           */
    +    /* bearing the yMax; for composite glyphs, return only */
    +    /* left side bearing and advance width                 */
         if ( load_flags & FT_LOAD_NO_RECURSE )
         {
           FT_Slot_Internal  internal = cidglyph->internal;
    @@ -527,6 +519,13 @@
           FT_Glyph_Metrics*  metrics = &cidglyph->metrics;
     
     
    +      cidglyph->format = FT_GLYPH_FORMAT_OUTLINE;
    +
    +      cidglyph->outline.flags &= FT_OUTLINE_OWNER;
    +      cidglyph->outline.flags |= FT_OUTLINE_REVERSE_FILL;
    +      if ( cidsize->metrics.y_ppem < 24 )
    +        cidglyph->outline.flags |= FT_OUTLINE_HIGH_PRECISION;
    +
           /* copy the _unscaled_ advance width */
           metrics->horiAdvance =
             FIXED_TO_INT( decoder.builder.advance.x );
    @@ -539,11 +538,6 @@
                                           face->cid.font_bbox.yMin ) >> 16;
           cidglyph->linearVertAdvance = metrics->vertAdvance;
     
    -      cidglyph->format            = FT_GLYPH_FORMAT_OUTLINE;
    -
    -      if ( cidsize->metrics.y_ppem < 24 )
    -        cidglyph->outline.flags |= FT_OUTLINE_HIGH_PRECISION;
    -
           /* apply the font matrix, if any */
           if ( font_matrix.xx != 0x10000L || font_matrix.yy != 0x10000L ||
                font_matrix.xy != 0        || font_matrix.yx != 0        )
    diff --git a/src/java.desktop/share/native/libfreetype/src/cid/cidgload.h b/src/java.desktop/share/native/libfreetype/src/cid/cidgload.h
    index 9fdc9db5892..cef96073ded 100644
    --- a/src/java.desktop/share/native/libfreetype/src/cid/cidgload.h
    +++ b/src/java.desktop/share/native/libfreetype/src/cid/cidgload.h
    @@ -4,7 +4,7 @@
      *
      *   OpenType Glyph Loader (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/cid/cidload.c b/src/java.desktop/share/native/libfreetype/src/cid/cidload.c
    index 722f5a34ddf..bb1bf13e221 100644
    --- a/src/java.desktop/share/native/libfreetype/src/cid/cidload.c
    +++ b/src/java.desktop/share/native/libfreetype/src/cid/cidload.c
    @@ -4,7 +4,7 @@
      *
      *   CID-keyed Type1 font loader (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/cid/cidload.h b/src/java.desktop/share/native/libfreetype/src/cid/cidload.h
    index 7f030b32df7..659dd0e378c 100644
    --- a/src/java.desktop/share/native/libfreetype/src/cid/cidload.h
    +++ b/src/java.desktop/share/native/libfreetype/src/cid/cidload.h
    @@ -4,7 +4,7 @@
      *
      *   CID-keyed Type1 font loader (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/cid/cidobjs.c b/src/java.desktop/share/native/libfreetype/src/cid/cidobjs.c
    index 8d337c41128..634bbf2f135 100644
    --- a/src/java.desktop/share/native/libfreetype/src/cid/cidobjs.c
    +++ b/src/java.desktop/share/native/libfreetype/src/cid/cidobjs.c
    @@ -4,7 +4,7 @@
      *
      *   CID objects manager (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/cid/cidobjs.h b/src/java.desktop/share/native/libfreetype/src/cid/cidobjs.h
    index d371cbe9954..800268efa2f 100644
    --- a/src/java.desktop/share/native/libfreetype/src/cid/cidobjs.h
    +++ b/src/java.desktop/share/native/libfreetype/src/cid/cidobjs.h
    @@ -4,7 +4,7 @@
      *
      *   CID objects manager (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/cid/cidparse.c b/src/java.desktop/share/native/libfreetype/src/cid/cidparse.c
    index 73a3ade893b..4d1ba335960 100644
    --- a/src/java.desktop/share/native/libfreetype/src/cid/cidparse.c
    +++ b/src/java.desktop/share/native/libfreetype/src/cid/cidparse.c
    @@ -4,7 +4,7 @@
      *
      *   CID-keyed Type1 parser (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/cid/cidparse.h b/src/java.desktop/share/native/libfreetype/src/cid/cidparse.h
    index 0f5baddcb92..6ae2e542394 100644
    --- a/src/java.desktop/share/native/libfreetype/src/cid/cidparse.h
    +++ b/src/java.desktop/share/native/libfreetype/src/cid/cidparse.h
    @@ -4,7 +4,7 @@
      *
      *   CID-keyed Type1 parser (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/cid/cidriver.c b/src/java.desktop/share/native/libfreetype/src/cid/cidriver.c
    index 4be8a5c00d5..a3a587c57bf 100644
    --- a/src/java.desktop/share/native/libfreetype/src/cid/cidriver.c
    +++ b/src/java.desktop/share/native/libfreetype/src/cid/cidriver.c
    @@ -4,7 +4,7 @@
      *
      *   CID driver interface (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/cid/cidriver.h b/src/java.desktop/share/native/libfreetype/src/cid/cidriver.h
    index 7ddce431c5b..55d0b8a0d9b 100644
    --- a/src/java.desktop/share/native/libfreetype/src/cid/cidriver.h
    +++ b/src/java.desktop/share/native/libfreetype/src/cid/cidriver.h
    @@ -4,7 +4,7 @@
      *
      *   High-level CID driver interface (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/cid/cidtoken.h b/src/java.desktop/share/native/libfreetype/src/cid/cidtoken.h
    index 160897d1447..d40ebfab86d 100644
    --- a/src/java.desktop/share/native/libfreetype/src/cid/cidtoken.h
    +++ b/src/java.desktop/share/native/libfreetype/src/cid/cidtoken.h
    @@ -4,7 +4,7 @@
      *
      *   CID token definitions (specification only).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -47,7 +47,7 @@
       T1_FIELD_STRING( "FullName",           full_name,           0 )
       T1_FIELD_STRING( "FamilyName",         family_name,         0 )
       T1_FIELD_STRING( "Weight",             weight,              0 )
    -  T1_FIELD_NUM   ( "ItalicAngle",        italic_angle,        0 )
    +  T1_FIELD_FIXED ( "ItalicAngle",        italic_angle,        0 )
       T1_FIELD_BOOL  ( "isFixedPitch",       is_fixed_pitch,      0 )
       T1_FIELD_NUM   ( "UnderlinePosition",  underline_position,  0 )
       T1_FIELD_NUM   ( "UnderlineThickness", underline_thickness, 0 )
    diff --git a/src/java.desktop/share/native/libfreetype/src/psaux/afmparse.c b/src/java.desktop/share/native/libfreetype/src/psaux/afmparse.c
    index e2f6a8e5adb..b813efde4eb 100644
    --- a/src/java.desktop/share/native/libfreetype/src/psaux/afmparse.c
    +++ b/src/java.desktop/share/native/libfreetype/src/psaux/afmparse.c
    @@ -4,7 +4,7 @@
      *
      *   AFM parser (body).
      *
    - * Copyright (C) 2006-2024 by
    + * Copyright (C) 2006-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/psaux/afmparse.h b/src/java.desktop/share/native/libfreetype/src/psaux/afmparse.h
    index b7766372821..add8597717d 100644
    --- a/src/java.desktop/share/native/libfreetype/src/psaux/afmparse.h
    +++ b/src/java.desktop/share/native/libfreetype/src/psaux/afmparse.h
    @@ -4,7 +4,7 @@
      *
      *   AFM parser (specification).
      *
    - * Copyright (C) 2006-2024 by
    + * Copyright (C) 2006-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/psaux/cffdecode.c b/src/java.desktop/share/native/libfreetype/src/psaux/cffdecode.c
    index 9556e11a586..17bdd23c7d4 100644
    --- a/src/java.desktop/share/native/libfreetype/src/psaux/cffdecode.c
    +++ b/src/java.desktop/share/native/libfreetype/src/psaux/cffdecode.c
    @@ -4,7 +4,7 @@
      *
      *   PostScript CFF (Type 2) decoding routines (body).
      *
    - * Copyright (C) 2017-2024 by
    + * Copyright (C) 2017-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -2141,7 +2141,7 @@
                                           decoder->locals_bias );
     
     
    -            FT_TRACE4(( " callsubr (idx %d, entering level %td)\n",
    +            FT_TRACE4(( " callsubr (idx %u, entering level %td)\n",
                             idx,
                             zone - decoder->zones + 1 ));
     
    @@ -2185,7 +2185,7 @@
                                           decoder->globals_bias );
     
     
    -            FT_TRACE4(( " callgsubr (idx %d, entering level %td)\n",
    +            FT_TRACE4(( " callgsubr (idx %u, entering level %td)\n",
                             idx,
                             zone - decoder->zones + 1 ));
     
    diff --git a/src/java.desktop/share/native/libfreetype/src/psaux/cffdecode.h b/src/java.desktop/share/native/libfreetype/src/psaux/cffdecode.h
    index 038f7235c3d..e72ec043baa 100644
    --- a/src/java.desktop/share/native/libfreetype/src/psaux/cffdecode.h
    +++ b/src/java.desktop/share/native/libfreetype/src/psaux/cffdecode.h
    @@ -4,7 +4,7 @@
      *
      *   PostScript CFF (Type 2) decoding routines (specification).
      *
    - * Copyright (C) 2017-2024 by
    + * Copyright (C) 2017-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/psaux/psauxerr.h b/src/java.desktop/share/native/libfreetype/src/psaux/psauxerr.h
    index 18428c40d5a..0d7fe2b6121 100644
    --- a/src/java.desktop/share/native/libfreetype/src/psaux/psauxerr.h
    +++ b/src/java.desktop/share/native/libfreetype/src/psaux/psauxerr.h
    @@ -4,7 +4,7 @@
      *
      *   PS auxiliary module error codes (specification only).
      *
    - * Copyright (C) 2001-2024 by
    + * Copyright (C) 2001-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/psaux/psauxmod.c b/src/java.desktop/share/native/libfreetype/src/psaux/psauxmod.c
    index 6826f9d8d3e..942804190c5 100644
    --- a/src/java.desktop/share/native/libfreetype/src/psaux/psauxmod.c
    +++ b/src/java.desktop/share/native/libfreetype/src/psaux/psauxmod.c
    @@ -4,7 +4,7 @@
      *
      *   FreeType auxiliary PostScript module implementation (body).
      *
    - * Copyright (C) 2000-2024 by
    + * Copyright (C) 2000-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/psaux/psauxmod.h b/src/java.desktop/share/native/libfreetype/src/psaux/psauxmod.h
    index 82d7e348af8..4a5ebc1b607 100644
    --- a/src/java.desktop/share/native/libfreetype/src/psaux/psauxmod.h
    +++ b/src/java.desktop/share/native/libfreetype/src/psaux/psauxmod.h
    @@ -4,7 +4,7 @@
      *
      *   FreeType auxiliary PostScript module implementation (specification).
      *
    - * Copyright (C) 2000-2024 by
    + * Copyright (C) 2000-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -46,9 +46,6 @@ FT_BEGIN_HEADER
       const CFF_Decoder_FuncsRec  cff_decoder_funcs;
     
     
    -  FT_EXPORT_VAR( const FT_Module_Class )  psaux_driver_class;
    -
    -
       FT_DECLARE_MODULE( psaux_module_class )
     
     
    diff --git a/src/java.desktop/share/native/libfreetype/src/psaux/psconv.c b/src/java.desktop/share/native/libfreetype/src/psaux/psconv.c
    index 56c0ecd1d7f..4567d3f3c06 100644
    --- a/src/java.desktop/share/native/libfreetype/src/psaux/psconv.c
    +++ b/src/java.desktop/share/native/libfreetype/src/psaux/psconv.c
    @@ -4,7 +4,7 @@
      *
      *   Some convenience conversions (body).
      *
    - * Copyright (C) 2006-2024 by
    + * Copyright (C) 2006-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/psaux/psconv.h b/src/java.desktop/share/native/libfreetype/src/psaux/psconv.h
    index 91fcd15a1c9..63735af411f 100644
    --- a/src/java.desktop/share/native/libfreetype/src/psaux/psconv.h
    +++ b/src/java.desktop/share/native/libfreetype/src/psaux/psconv.h
    @@ -4,7 +4,7 @@
      *
      *   Some convenience conversions (specification).
      *
    - * Copyright (C) 2006-2024 by
    + * Copyright (C) 2006-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/psaux/psintrp.c b/src/java.desktop/share/native/libfreetype/src/psaux/psintrp.c
    index 7572e225e37..7e3475e6f58 100644
    --- a/src/java.desktop/share/native/libfreetype/src/psaux/psintrp.c
    +++ b/src/java.desktop/share/native/libfreetype/src/psaux/psintrp.c
    @@ -618,7 +618,7 @@
         /*       Our copy of it does not change that requirement.         */
         cf2_arrstack_setCount( &subrStack, CF2_MAX_SUBR + 1 );
     
    -    charstring  = (CF2_Buffer)cf2_arrstack_getBuffer( &subrStack );
    +    charstring = (CF2_Buffer)cf2_arrstack_getBuffer( &subrStack );
     
         /* catch errors so far */
         if ( *error )
    diff --git a/src/java.desktop/share/native/libfreetype/src/psaux/psobjs.c b/src/java.desktop/share/native/libfreetype/src/psaux/psobjs.c
    index eca465f009e..8159fd6ef15 100644
    --- a/src/java.desktop/share/native/libfreetype/src/psaux/psobjs.c
    +++ b/src/java.desktop/share/native/libfreetype/src/psaux/psobjs.c
    @@ -4,7 +4,7 @@
      *
      *   Auxiliary functions for PostScript fonts (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -460,6 +460,9 @@
           case '%':
             skip_comment( &cur, limit );
             break;
    +
    +      default:
    +        break;
           }
         }
     
    @@ -1145,7 +1148,7 @@
                 FT_ERROR(( "ps_parser_load_field:"
                            " expected a name or string\n" ));
                 FT_ERROR(( "                     "
    -                       " but found token of type %d instead\n",
    +                       " but found token of type %u instead\n",
                            token.type ));
                 error = FT_THROW( Invalid_File_Format );
                 goto Exit;
    @@ -1225,7 +1228,7 @@
                 if ( result < 0 || (FT_UInt)result < max_objects )
                 {
                   FT_ERROR(( "ps_parser_load_field:"
    -                         " expected %d integer%s in the %s subarray\n",
    +                         " expected %u integer%s in the %s subarray\n",
                              max_objects, max_objects > 1 ? "s" : "",
                              i == 0 ? "first"
                                     : ( i == 1 ? "second"
    diff --git a/src/java.desktop/share/native/libfreetype/src/psaux/psobjs.h b/src/java.desktop/share/native/libfreetype/src/psaux/psobjs.h
    index 345fc8a7335..277aa1247c5 100644
    --- a/src/java.desktop/share/native/libfreetype/src/psaux/psobjs.h
    +++ b/src/java.desktop/share/native/libfreetype/src/psaux/psobjs.h
    @@ -4,7 +4,7 @@
      *
      *   Auxiliary functions for PostScript fonts (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/psaux/t1cmap.c b/src/java.desktop/share/native/libfreetype/src/psaux/t1cmap.c
    index 5681c3bd0fd..66493b68123 100644
    --- a/src/java.desktop/share/native/libfreetype/src/psaux/t1cmap.c
    +++ b/src/java.desktop/share/native/libfreetype/src/psaux/t1cmap.c
    @@ -4,7 +4,7 @@
      *
      *   Type 1 character map support (body).
      *
    - * Copyright (C) 2002-2024 by
    + * Copyright (C) 2002-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/psaux/t1cmap.h b/src/java.desktop/share/native/libfreetype/src/psaux/t1cmap.h
    index 445e6a2784f..114bfbb0410 100644
    --- a/src/java.desktop/share/native/libfreetype/src/psaux/t1cmap.h
    +++ b/src/java.desktop/share/native/libfreetype/src/psaux/t1cmap.h
    @@ -4,7 +4,7 @@
      *
      *   Type 1 character map support (specification).
      *
    - * Copyright (C) 2002-2024 by
    + * Copyright (C) 2002-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/psaux/t1decode.c b/src/java.desktop/share/native/libfreetype/src/psaux/t1decode.c
    index c74baa8038f..c3fb343d4c9 100644
    --- a/src/java.desktop/share/native/libfreetype/src/psaux/t1decode.c
    +++ b/src/java.desktop/share/native/libfreetype/src/psaux/t1decode.c
    @@ -4,7 +4,7 @@
      *
      *   PostScript Type 1 decoding routines (body).
      *
    - * Copyright (C) 2000-2024 by
    + * Copyright (C) 2000-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -1633,7 +1633,7 @@
     
             default:
               FT_ERROR(( "t1_decoder_parse_charstrings:"
    -                     " unhandled opcode %d\n", op ));
    +                     " unhandled opcode %u\n", op ));
               goto Syntax_Error;
             }
     
    diff --git a/src/java.desktop/share/native/libfreetype/src/psaux/t1decode.h b/src/java.desktop/share/native/libfreetype/src/psaux/t1decode.h
    index 16203b8f734..7b913f55dff 100644
    --- a/src/java.desktop/share/native/libfreetype/src/psaux/t1decode.h
    +++ b/src/java.desktop/share/native/libfreetype/src/psaux/t1decode.h
    @@ -4,7 +4,7 @@
      *
      *   PostScript Type 1 decoding routines (specification).
      *
    - * Copyright (C) 2000-2024 by
    + * Copyright (C) 2000-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/pshinter/pshalgo.c b/src/java.desktop/share/native/libfreetype/src/pshinter/pshalgo.c
    index 967767b3485..e053dba17b2 100644
    --- a/src/java.desktop/share/native/libfreetype/src/pshinter/pshalgo.c
    +++ b/src/java.desktop/share/native/libfreetype/src/pshinter/pshalgo.c
    @@ -4,7 +4,7 @@
      *
      *   PostScript hinting algorithm (body).
      *
    - * Copyright (C) 2001-2024 by
    + * Copyright (C) 2001-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used
    @@ -35,10 +35,6 @@
     #endif
     
     
    -#define  COMPUTE_INFLEXS  /* compute inflection points to optimize `S' */
    -                          /* and similar glyphs                        */
    -
    -
       /*************************************************************************/
       /*************************************************************************/
       /*****                                                               *****/
    @@ -100,7 +96,7 @@
     
         if ( idx >= table->max_hints )
         {
    -      FT_TRACE0(( "psh_hint_table_record: invalid hint index %d\n", idx ));
    +      FT_TRACE0(( "psh_hint_table_record: invalid hint index %u\n", idx ));
           return;
         }
     
    @@ -920,117 +916,6 @@
     #define  psh_corner_orientation  ft_corner_orientation
     
     
    -#ifdef COMPUTE_INFLEXS
    -
    -  /* compute all inflex points in a given glyph */
    -  static void
    -  psh_glyph_compute_inflections( PSH_Glyph  glyph )
    -  {
    -    FT_UInt  n;
    -
    -
    -    for ( n = 0; n < glyph->num_contours; n++ )
    -    {
    -      PSH_Point  first, start, end, before, after;
    -      FT_Pos     in_x, in_y, out_x, out_y;
    -      FT_Int     orient_prev, orient_cur;
    -      FT_Int     finished = 0;
    -
    -
    -      /* we need at least 4 points to create an inflection point */
    -      if ( glyph->contours[n].count < 4 )
    -        continue;
    -
    -      /* compute first segment in contour */
    -      first = glyph->contours[n].start;
    -
    -      start = end = first;
    -      do
    -      {
    -        end = end->next;
    -        if ( end == first )
    -          goto Skip;
    -
    -        in_x = end->org_u - start->org_u;
    -        in_y = end->org_v - start->org_v;
    -
    -      } while ( in_x == 0 && in_y == 0 );
    -
    -      /* extend the segment start whenever possible */
    -      before = start;
    -      do
    -      {
    -        do
    -        {
    -          start  = before;
    -          before = before->prev;
    -          if ( before == first )
    -            goto Skip;
    -
    -          out_x = start->org_u - before->org_u;
    -          out_y = start->org_v - before->org_v;
    -
    -        } while ( out_x == 0 && out_y == 0 );
    -
    -        orient_prev = psh_corner_orientation( in_x, in_y, out_x, out_y );
    -
    -      } while ( orient_prev == 0 );
    -
    -      first = start;
    -      in_x  = out_x;
    -      in_y  = out_y;
    -
    -      /* now, process all segments in the contour */
    -      do
    -      {
    -        /* first, extend current segment's end whenever possible */
    -        after = end;
    -        do
    -        {
    -          do
    -          {
    -            end   = after;
    -            after = after->next;
    -            if ( after == first )
    -              finished = 1;
    -
    -            out_x = after->org_u - end->org_u;
    -            out_y = after->org_v - end->org_v;
    -
    -          } while ( out_x == 0 && out_y == 0 );
    -
    -          orient_cur = psh_corner_orientation( in_x, in_y, out_x, out_y );
    -
    -        } while ( orient_cur == 0 );
    -
    -        if ( ( orient_cur ^ orient_prev ) < 0 )
    -        {
    -          do
    -          {
    -            psh_point_set_inflex( start );
    -            start = start->next;
    -          }
    -          while ( start != end );
    -
    -          psh_point_set_inflex( start );
    -        }
    -
    -        start       = end;
    -        end         = after;
    -        orient_prev = orient_cur;
    -        in_x        = out_x;
    -        in_y        = out_y;
    -
    -      } while ( !finished );
    -
    -    Skip:
    -      ;
    -    }
    -  }
    -
    -#endif /* COMPUTE_INFLEXS */
    -
    -
       static void
       psh_glyph_done( PSH_Glyph  glyph )
       {
    @@ -1258,11 +1143,6 @@
         glyph->outline = outline;
         glyph->globals = globals;
     
    -#ifdef COMPUTE_INFLEXS
    -    psh_glyph_load_points( glyph, 0 );
    -    psh_glyph_compute_inflections( glyph );
    -#endif /* COMPUTE_INFLEXS */
    -
         /* now deal with hints tables */
         error = psh_hint_table_init( &glyph->hint_tables [0],
                                      &ps_hints->dimension[0].hints,
    @@ -1285,122 +1165,47 @@
       }
     
     
    -  /* compute all extrema in a glyph for a given dimension */
    +  /* compute all extreme and inflection points */
    +  /* in a glyph for a given dimension          */
       static void
       psh_glyph_compute_extrema( PSH_Glyph  glyph )
       {
         FT_UInt  n;
     
     
    -    /* first of all, compute all local extrema */
         for ( n = 0; n < glyph->num_contours; n++ )
         {
    -      PSH_Point  first = glyph->contours[n].start;
    -      PSH_Point  point, before, after;
    +      PSH_Point  first, point, before, after;
     
     
    -      if ( glyph->contours[n].count == 0 )
    +      /* we need at least 3 points to create an extremum */
    +      if ( glyph->contours[n].count < 3 )
             continue;
     
    -      point  = first;
    -      before = point;
    +      first = glyph->contours[n].start;
    +      point = first->prev;
    +      after = first;
     
           do
           {
    -        before = before->prev;
    -        if ( before == first )
    -          goto Skip;
    -
    -      } while ( before->org_u == point->org_u );
    -
    -      first = point = before->next;
    -
    -      for (;;)
    -      {
    -        after = point;
    -        do
    -        {
    -          after = after->next;
    -          if ( after == first )
    -            goto Next;
    -
    -        } while ( after->org_u == point->org_u );
    -
    -        if ( before->org_u < point->org_u )
    -        {
    -          if ( after->org_u < point->org_u )
    -          {
    -            /* local maximum */
    -            goto Extremum;
    -          }
    -        }
    -        else /* before->org_u > point->org_u */
    -        {
    -          if ( after->org_u > point->org_u )
    -          {
    -            /* local minimum */
    -          Extremum:
    -            do
    -            {
    -              psh_point_set_extremum( point );
    -              point = point->next;
    -
    -            } while ( point != after );
    -          }
    -        }
    -
    -        before = after->prev;
    +        before = point;
             point  = after;
    +        after  = point->next;
     
    -      } /* for  */
    +        if ( ( before->org_u < point->org_u && point->org_u < after->org_u ) ||
    +             ( before->org_u > point->org_u && point->org_u > after->org_u ) )
    +          continue;
     
    -    Next:
    -      ;
    -    }
    +        /* otherwise this is either extremum or inflection point */
    +        psh_point_set_extremum( point );
     
    -    /* for each extremum, determine its direction along the */
    -    /* orthogonal axis                                      */
    -    for ( n = 0; n < glyph->num_points; n++ )
    -    {
    -      PSH_Point  point, before, after;
    +        /* also note its direction */
    +        if ( before->org_v < after->org_v )
    +          psh_point_set_positive( point );
    +        else if ( before->org_v > after->org_v )
    +          psh_point_set_negative( point );
     
    -
    -      point  = &glyph->points[n];
    -      before = point;
    -      after  = point;
    -
    -      if ( psh_point_is_extremum( point ) )
    -      {
    -        do
    -        {
    -          before = before->prev;
    -          if ( before == point )
    -            goto Skip;
    -
    -        } while ( before->org_v == point->org_v );
    -
    -        do
    -        {
    -          after = after->next;
    -          if ( after == point )
    -            goto Skip;
    -
    -        } while ( after->org_v == point->org_v );
    -      }
    -
    -      if ( before->org_v < point->org_v &&
    -           after->org_v  > point->org_v )
    -      {
    -        psh_point_set_positive( point );
    -      }
    -      else if ( before->org_v > point->org_v &&
    -                after->org_v  < point->org_v )
    -      {
    -        psh_point_set_negative( point );
    -      }
    -
    -    Skip:
    -      ;
    +      } while ( after != first );
         }
       }
     
    @@ -1836,8 +1641,7 @@
                  point->dir_in != point->dir_out )
               continue;
     
    -        if ( !psh_point_is_extremum( point ) &&
    -             !psh_point_is_inflex( point )   )
    +        if ( !psh_point_is_extremum( point ) )
               continue;
     
             point->flags &= ~PSH_POINT_SMOOTH;
    diff --git a/src/java.desktop/share/native/libfreetype/src/pshinter/pshalgo.h b/src/java.desktop/share/native/libfreetype/src/pshinter/pshalgo.h
    index fb362f061b6..f4aa8540559 100644
    --- a/src/java.desktop/share/native/libfreetype/src/pshinter/pshalgo.h
    +++ b/src/java.desktop/share/native/libfreetype/src/pshinter/pshalgo.h
    @@ -4,7 +4,7 @@
      *
      *   PostScript hinting algorithm (specification).
      *
    - * Copyright (C) 2001-2024 by
    + * Copyright (C) 2001-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/pshinter/pshglob.c b/src/java.desktop/share/native/libfreetype/src/pshinter/pshglob.c
    index 435f45838ff..a772b66f309 100644
    --- a/src/java.desktop/share/native/libfreetype/src/pshinter/pshglob.c
    +++ b/src/java.desktop/share/native/libfreetype/src/pshinter/pshglob.c
    @@ -5,7 +5,7 @@
      *   PostScript hinter global hinting management (body).
      *   Inspired by the new auto-hinter module.
      *
    - * Copyright (C) 2001-2024 by
    + * Copyright (C) 2001-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used
    @@ -376,36 +376,24 @@
         /* not.  We simply need to compare the vertical scale     */
         /* parameter to the raw bluescale value.  Here is why:    */
         /*                                                        */
    -    /*   We need to suppress overshoots for all pointsizes.   */
    -    /*   At 300dpi that satisfies:                            */
    +    /* The specs explain how the bluescale is calculated      */
    +    /* from the desired maximum rounded pointsize at 300 dpi  */
    +    /* and assuming upem of 1000.                             */
         /*                                                        */
    -    /*      pointsize < 240*bluescale + 0.49                  */
    +    /*    bluescale = ( pointsize - 0.49 ) / 240              */
         /*                                                        */
    -    /*   This corresponds to:                                 */
    +    /* For unrounded pointsize in general terms               */
         /*                                                        */
    -    /*      pixelsize < 1000*bluescale + 49/24                */
    +    /*    bluescale = ( pointsize * dpi / 72 ) / upem         */
         /*                                                        */
    -    /*      scale*EM_Size < 1000*bluescale + 49/24            */
    +    /* which is                                               */
         /*                                                        */
    -    /*   However, for normal Type 1 fonts, EM_Size is 1000!   */
    -    /*   We thus only check:                                  */
    +    /*    bluescale = pixelsize / upem                        */
         /*                                                        */
    -    /*      scale < bluescale + 49/24000                      */
    +    /* Therefore, the bluescale value can be used directly    */
    +    /* as a scale limit, now that it is in comparable units   */
         /*                                                        */
    -    /*   which we shorten to                                  */
    -    /*                                                        */
    -    /*      "scale < bluescale"                               */
    -    /*                                                        */
    -    /* Note that `blue_scale' is stored 1000 times its real   */
    -    /* value, and that `scale' converts from font units to    */
    -    /* fractional pixels.                                     */
    -    /*                                                        */
    -
    -    /* 1000 / 64 = 125 / 8 */
    -    if ( scale >= 0x20C49BAL )
    -      blues->no_overshoots = FT_BOOL( scale < blues->blue_scale * 8 / 125 );
    -    else
    -      blues->no_overshoots = FT_BOOL( scale * 125 < blues->blue_scale * 8 );
    +    blues->no_overshoots = FT_BOOL( scale < blues->blue_scale );
     
         /*                                                        */
         /*  The blue threshold is the font units distance under   */
    @@ -420,8 +408,8 @@
           FT_Int  threshold = blues->blue_shift;
     
     
    -      while ( threshold > 0 && FT_MulFix( threshold, scale ) > 32 )
    -        threshold--;
    +      if ( threshold > 0 && FT_MulFix( threshold, scale ) > 32 )
    +        threshold = 32 * 0x10000L / scale;
     
           blues->blue_threshold = threshold;
         }
    @@ -708,7 +696,6 @@
     
           /* limit the BlueScale value to `1 / max_of_blue_zone_heights' */
           {
    -        FT_Fixed  max_scale;
             FT_Short  max_height = 1;
     
     
    @@ -725,11 +712,12 @@
                                               priv->family_other_blues,
                                               max_height );
     
    -        /* BlueScale is scaled 1000 times */
    -        max_scale = FT_DivFix( 1000, max_height );
    -        globals->blues.blue_scale = priv->blue_scale < max_scale
    -                                      ? priv->blue_scale
    -                                      : max_scale;
    +        /* restrict BlueScale value that is amplified 1000-fold and */
    +        /* rescale it to be comparable to the metrics scale         */
    +        if ( FT_MulFix( max_height, priv->blue_scale ) < 1000 )
    +          globals->blues.blue_scale = priv->blue_scale * 8 / 125;
    +        else
    +          globals->blues.blue_scale = 64 * 0x10000L / max_height;
           }
     
           globals->blues.blue_shift = priv->blue_shift;
    diff --git a/src/java.desktop/share/native/libfreetype/src/pshinter/pshglob.h b/src/java.desktop/share/native/libfreetype/src/pshinter/pshglob.h
    index c5a5c913168..555e99facb2 100644
    --- a/src/java.desktop/share/native/libfreetype/src/pshinter/pshglob.h
    +++ b/src/java.desktop/share/native/libfreetype/src/pshinter/pshglob.h
    @@ -4,7 +4,7 @@
      *
      *   PostScript hinter global hinting management.
      *
    - * Copyright (C) 2001-2024 by
    + * Copyright (C) 2001-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/pshinter/pshmod.c b/src/java.desktop/share/native/libfreetype/src/pshinter/pshmod.c
    index 9965d5b16bf..c9f4a94fe98 100644
    --- a/src/java.desktop/share/native/libfreetype/src/pshinter/pshmod.c
    +++ b/src/java.desktop/share/native/libfreetype/src/pshinter/pshmod.c
    @@ -4,7 +4,7 @@
      *
      *   FreeType PostScript hinter module implementation (body).
      *
    - * Copyright (C) 2001-2024 by
    + * Copyright (C) 2001-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/pshinter/pshmod.h b/src/java.desktop/share/native/libfreetype/src/pshinter/pshmod.h
    index 62ac0a60fdc..de9c398e9fb 100644
    --- a/src/java.desktop/share/native/libfreetype/src/pshinter/pshmod.h
    +++ b/src/java.desktop/share/native/libfreetype/src/pshinter/pshmod.h
    @@ -4,7 +4,7 @@
      *
      *   PostScript hinter module interface (specification).
      *
    - * Copyright (C) 2001-2024 by
    + * Copyright (C) 2001-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/pshinter/pshnterr.h b/src/java.desktop/share/native/libfreetype/src/pshinter/pshnterr.h
    index e9641340e53..7076664ddde 100644
    --- a/src/java.desktop/share/native/libfreetype/src/pshinter/pshnterr.h
    +++ b/src/java.desktop/share/native/libfreetype/src/pshinter/pshnterr.h
    @@ -4,7 +4,7 @@
      *
      *   PS Hinter error codes (specification only).
      *
    - * Copyright (C) 2003-2024 by
    + * Copyright (C) 2003-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/pshinter/pshrec.c b/src/java.desktop/share/native/libfreetype/src/pshinter/pshrec.c
    index 0b2b549fc29..13754313fbb 100644
    --- a/src/java.desktop/share/native/libfreetype/src/pshinter/pshrec.c
    +++ b/src/java.desktop/share/native/libfreetype/src/pshinter/pshrec.c
    @@ -4,7 +4,7 @@
      *
      *   FreeType PostScript hints recorder (body).
      *
    - * Copyright (C) 2001-2024 by
    + * Copyright (C) 2001-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -467,7 +467,7 @@
           table->num_masks--;
         }
         else
    -      FT_TRACE0(( "ps_mask_table_merge: ignoring invalid indices (%d,%d)\n",
    +      FT_TRACE0(( "ps_mask_table_merge: ignoring invalid indices (%u,%u)\n",
                       index1, index2 ));
     
       Exit:
    @@ -817,7 +817,7 @@
         /* limit "dimension" to 0..1 */
         if ( dimension > 1 )
         {
    -      FT_TRACE0(( "ps_hints_stem: invalid dimension (%d) used\n",
    +      FT_TRACE0(( "ps_hints_stem: invalid dimension (%u) used\n",
                       dimension ));
           dimension = ( dimension != 0 );
         }
    @@ -870,7 +870,7 @@
           /* limit "dimension" to 0..1 */
           if ( dimension > 1 )
           {
    -        FT_TRACE0(( "ps_hints_t1stem3: invalid dimension (%d) used\n",
    +        FT_TRACE0(( "ps_hints_t1stem3: invalid dimension (%u) used\n",
                         dimension ));
             dimension = ( dimension != 0 );
           }
    @@ -976,7 +976,7 @@
           if ( bit_count !=  count1 + count2 )
           {
             FT_TRACE0(( "ps_hints_t2mask:"
    -                    " called with invalid bitcount %d (instead of %d)\n",
    +                    " called with invalid bitcount %u (instead of %u)\n",
                        bit_count, count1 + count2 ));
     
             /* simply ignore the operator */
    @@ -1022,7 +1022,7 @@
           if ( bit_count !=  count1 + count2 )
           {
             FT_TRACE0(( "ps_hints_t2counter:"
    -                    " called with invalid bitcount %d (instead of %d)\n",
    +                    " called with invalid bitcount %u (instead of %u)\n",
                        bit_count, count1 + count2 ));
     
             /* simply ignore the operator */
    diff --git a/src/java.desktop/share/native/libfreetype/src/pshinter/pshrec.h b/src/java.desktop/share/native/libfreetype/src/pshinter/pshrec.h
    index 7e375af7ba8..a79069f98d2 100644
    --- a/src/java.desktop/share/native/libfreetype/src/pshinter/pshrec.h
    +++ b/src/java.desktop/share/native/libfreetype/src/pshinter/pshrec.h
    @@ -4,7 +4,7 @@
      *
      *   Postscript (Type1/Type2) hints recorder (specification).
      *
    - * Copyright (C) 2001-2024 by
    + * Copyright (C) 2001-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/psnames/psmodule.c b/src/java.desktop/share/native/libfreetype/src/psnames/psmodule.c
    index 35d054d1cfb..c5d71edad88 100644
    --- a/src/java.desktop/share/native/libfreetype/src/psnames/psmodule.c
    +++ b/src/java.desktop/share/native/libfreetype/src/psnames/psmodule.c
    @@ -4,7 +4,7 @@
      *
      *   psnames module implementation (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/psnames/psmodule.h b/src/java.desktop/share/native/libfreetype/src/psnames/psmodule.h
    index 770458316b1..482fd0a36d1 100644
    --- a/src/java.desktop/share/native/libfreetype/src/psnames/psmodule.h
    +++ b/src/java.desktop/share/native/libfreetype/src/psnames/psmodule.h
    @@ -4,7 +4,7 @@
      *
      *   High-level psnames module interface (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/psnames/psnamerr.h b/src/java.desktop/share/native/libfreetype/src/psnames/psnamerr.h
    index e123eb65e39..17987f9cd4f 100644
    --- a/src/java.desktop/share/native/libfreetype/src/psnames/psnamerr.h
    +++ b/src/java.desktop/share/native/libfreetype/src/psnames/psnamerr.h
    @@ -4,7 +4,7 @@
      *
      *   PS names module error codes (specification only).
      *
    - * Copyright (C) 2001-2024 by
    + * Copyright (C) 2001-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/psnames/pstables.h b/src/java.desktop/share/native/libfreetype/src/psnames/pstables.h
    index 2a941b04609..65ce6c0b47f 100644
    --- a/src/java.desktop/share/native/libfreetype/src/psnames/pstables.h
    +++ b/src/java.desktop/share/native/libfreetype/src/psnames/pstables.h
    @@ -4,7 +4,7 @@
      *
      *   PostScript glyph names.
      *
    - * Copyright (C) 2005-2024 by
    + * Copyright (C) 2005-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/raster/ftmisc.h b/src/java.desktop/share/native/libfreetype/src/raster/ftmisc.h
    index 943f2aa0a50..9d97223e94e 100644
    --- a/src/java.desktop/share/native/libfreetype/src/raster/ftmisc.h
    +++ b/src/java.desktop/share/native/libfreetype/src/raster/ftmisc.h
    @@ -5,7 +5,7 @@
      *   Miscellaneous macros for stand-alone rasterizer (specification
      *   only).
      *
    - * Copyright (C) 2005-2024 by
    + * Copyright (C) 2005-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used
    diff --git a/src/java.desktop/share/native/libfreetype/src/raster/ftraster.c b/src/java.desktop/share/native/libfreetype/src/raster/ftraster.c
    index e4b7b937d5a..807d444e7aa 100644
    --- a/src/java.desktop/share/native/libfreetype/src/raster/ftraster.c
    +++ b/src/java.desktop/share/native/libfreetype/src/raster/ftraster.c
    @@ -4,7 +4,7 @@
      *
      *   The FreeType glyph rasterizer (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -251,7 +251,11 @@
       /* On the other hand, SMulDiv means `Slow MulDiv', and is used typically */
       /* for clipping computations.  It simply uses the FT_MulDiv() function   */
       /* defined in `ftcalc.h'.                                                */
    -#define SMulDiv_No_Round  FT_MulDiv_No_Round
    +#ifdef FT_INT64
    +#define SMulDiv( a, b, c )  (Long)( (FT_Int64)(a) * (b) / (c) )
    +#else
    +#define SMulDiv  FT_MulDiv_No_Round
    +#endif
     
       /* The rasterizer is a very general purpose component; please leave */
       /* the following redefinitions there (you never know your target    */
    @@ -653,7 +657,7 @@
           ras.cProfile->height = 0;
         }
     
    -    ras.cProfile->flags  = ras.dropOutControl;
    +    ras.cProfile->flags = ras.dropOutControl;
     
         switch ( aState )
         {
    @@ -967,14 +971,14 @@
           goto Fin;
         }
     
    -    Ix     = SMulDiv_No_Round( e - y1, Dx, Dy );
    +    Ix     = SMulDiv( e - y1, Dx, Dy );
         x1    += Ix;
         *top++ = x1;
     
         if ( --size )
         {
           Ax = Dx * ( e - y1 )    - Dy * Ix;  /* remainder */
    -      Ix = FMulDiv( ras.precision, Dx, Dy );
    +      Ix = SMulDiv( ras.precision, Dx, Dy );
           Rx = Dx * ras.precision - Dy * Ix;  /* remainder */
           Dx = 1;
     
    @@ -1090,8 +1094,8 @@
         PLong  top;
     
     
    -    y1  = arc[degree].y;
    -    y2  = arc[0].y;
    +    y1 = arc[degree].y;
    +    y2 = arc[0].y;
     
         if ( y2 < miny || y1 > maxy )
           return SUCCESS;
    diff --git a/src/java.desktop/share/native/libfreetype/src/raster/ftraster.h b/src/java.desktop/share/native/libfreetype/src/raster/ftraster.h
    index ad9cb1b9fe0..64499bf955b 100644
    --- a/src/java.desktop/share/native/libfreetype/src/raster/ftraster.h
    +++ b/src/java.desktop/share/native/libfreetype/src/raster/ftraster.h
    @@ -4,7 +4,7 @@
      *
      *   The FreeType glyph rasterizer (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used
    diff --git a/src/java.desktop/share/native/libfreetype/src/raster/ftrend1.c b/src/java.desktop/share/native/libfreetype/src/raster/ftrend1.c
    index fd9f174f2e1..3fa008704e5 100644
    --- a/src/java.desktop/share/native/libfreetype/src/raster/ftrend1.c
    +++ b/src/java.desktop/share/native/libfreetype/src/raster/ftrend1.c
    @@ -4,7 +4,7 @@
      *
      *   The FreeType glyph rasterizer interface (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/raster/ftrend1.h b/src/java.desktop/share/native/libfreetype/src/raster/ftrend1.h
    index cf3e73c0a24..d838a942b04 100644
    --- a/src/java.desktop/share/native/libfreetype/src/raster/ftrend1.h
    +++ b/src/java.desktop/share/native/libfreetype/src/raster/ftrend1.h
    @@ -4,7 +4,7 @@
      *
      *   The FreeType glyph rasterizer interface (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/raster/rasterrs.h b/src/java.desktop/share/native/libfreetype/src/raster/rasterrs.h
    index 326d42e0438..39d82a8051a 100644
    --- a/src/java.desktop/share/native/libfreetype/src/raster/rasterrs.h
    +++ b/src/java.desktop/share/native/libfreetype/src/raster/rasterrs.h
    @@ -4,7 +4,7 @@
      *
      *   monochrome renderer error codes (specification only).
      *
    - * Copyright (C) 2001-2024 by
    + * Copyright (C) 2001-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/sfnt/pngshim.c b/src/java.desktop/share/native/libfreetype/src/sfnt/pngshim.c
    index 76181568af9..24fb3455598 100644
    --- a/src/java.desktop/share/native/libfreetype/src/sfnt/pngshim.c
    +++ b/src/java.desktop/share/native/libfreetype/src/sfnt/pngshim.c
    @@ -4,7 +4,7 @@
      *
      *   PNG Bitmap glyph support.
      *
    - * Copyright (C) 2013-2024 by
    + * Copyright (C) 2013-2025 by
      * Google, Inc.
      * Written by Stuart Gill and Behdad Esfahbod.
      *
    @@ -420,10 +420,7 @@
         if ( populate_map_and_metrics )
         {
           /* this doesn't overflow: 0x7FFF * 0x7FFF * 4 < 2^32 */
    -      FT_ULong  size = map->rows * (FT_ULong)map->pitch;
    -
    -
    -      error = ft_glyphslot_alloc_bitmap( slot, size );
    +      error = ft_glyphslot_alloc_bitmap( slot );
           if ( error )
             goto DestroyExit;
         }
    diff --git a/src/java.desktop/share/native/libfreetype/src/sfnt/pngshim.h b/src/java.desktop/share/native/libfreetype/src/sfnt/pngshim.h
    index 6e7a5c08e71..c59199e60df 100644
    --- a/src/java.desktop/share/native/libfreetype/src/sfnt/pngshim.h
    +++ b/src/java.desktop/share/native/libfreetype/src/sfnt/pngshim.h
    @@ -4,7 +4,7 @@
      *
      *   PNG Bitmap glyph support.
      *
    - * Copyright (C) 2013-2024 by
    + * Copyright (C) 2013-2025 by
      * Google, Inc.
      * Written by Stuart Gill and Behdad Esfahbod.
      *
    diff --git a/src/java.desktop/share/native/libfreetype/src/sfnt/sfdriver.c b/src/java.desktop/share/native/libfreetype/src/sfnt/sfdriver.c
    index 81072207b49..32291e23e36 100644
    --- a/src/java.desktop/share/native/libfreetype/src/sfnt/sfdriver.c
    +++ b/src/java.desktop/share/native/libfreetype/src/sfnt/sfdriver.c
    @@ -4,7 +4,7 @@
      *
      *   High-level SFNT driver interface (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -895,7 +895,7 @@
             FT_TRACE0(( "sfnt_get_var_ps_name:"
                         " Shortening variation PS name prefix\n" ));
             FT_TRACE0(( "                     "
    -                    " to %d characters\n", len ));
    +                    " to %u characters\n", len ));
           }
     
           face->var_postscript_prefix     = result;
    @@ -1142,12 +1142,7 @@
         FT_Error         error;
     
     
    -    /* XXX: I don't know whether this is correct, since
    -     *      tt_face_find_bdf_prop only returns something correct if we have
    -     *      previously selected a size that is listed in the BDF table.
    -     *      Should we change the BDF table format to include single offsets
    -     *      for `CHARSET_REGISTRY' and `CHARSET_ENCODING'?
    -     */
    +    /* We expect that a bitmap strike has been selected. */
         error = tt_face_find_bdf_prop( face, "CHARSET_REGISTRY", ®istry );
         if ( !error )
         {
    diff --git a/src/java.desktop/share/native/libfreetype/src/sfnt/sfdriver.h b/src/java.desktop/share/native/libfreetype/src/sfnt/sfdriver.h
    index 6f71489fdc1..be4e33166c1 100644
    --- a/src/java.desktop/share/native/libfreetype/src/sfnt/sfdriver.h
    +++ b/src/java.desktop/share/native/libfreetype/src/sfnt/sfdriver.h
    @@ -4,7 +4,7 @@
      *
      *   High-level SFNT driver interface (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/sfnt/sferrors.h b/src/java.desktop/share/native/libfreetype/src/sfnt/sferrors.h
    index d3ca1d9aa8b..2da4ac776b0 100644
    --- a/src/java.desktop/share/native/libfreetype/src/sfnt/sferrors.h
    +++ b/src/java.desktop/share/native/libfreetype/src/sfnt/sferrors.h
    @@ -4,7 +4,7 @@
      *
      *   SFNT error codes (specification only).
      *
    - * Copyright (C) 2001-2024 by
    + * Copyright (C) 2001-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/sfnt/sfobjs.c b/src/java.desktop/share/native/libfreetype/src/sfnt/sfobjs.c
    index 6ee4e5e939b..6af35787e85 100644
    --- a/src/java.desktop/share/native/libfreetype/src/sfnt/sfobjs.c
    +++ b/src/java.desktop/share/native/libfreetype/src/sfnt/sfobjs.c
    @@ -4,7 +4,7 @@
      *
      *   SFNT object management (base).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -579,6 +579,9 @@
         if ( face_instance_index < 0 && face_index > 0 )
           face_index--;
     
    +    /* Note that `face_index` is also used to enumerate elements */
    +    /* of containers like a Mac Resource; this means we must     */
    +    /* check whether we actually have a TTC.                     */
         if ( face_index >= face->ttc_header.count )
         {
           if ( face_instance_index >= 0 )
    @@ -1127,9 +1130,9 @@
             flags |= FT_FACE_FLAG_VERTICAL;
     
           /* kerning available ? */
    -      if ( TT_FACE_HAS_KERNING( face )
    +      if ( face->kern_avail_bits
     #ifdef TT_CONFIG_OPTION_GPOS_KERNING
    -           || face->gpos_kerning_available
    +           || face->num_gpos_lookups_kerning
     #endif
              )
             flags |= FT_FACE_FLAG_KERNING;
    diff --git a/src/java.desktop/share/native/libfreetype/src/sfnt/sfobjs.h b/src/java.desktop/share/native/libfreetype/src/sfnt/sfobjs.h
    index 90847d95732..8c38b727950 100644
    --- a/src/java.desktop/share/native/libfreetype/src/sfnt/sfobjs.h
    +++ b/src/java.desktop/share/native/libfreetype/src/sfnt/sfobjs.h
    @@ -4,7 +4,7 @@
      *
      *   SFNT object management (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/sfnt/sfwoff.c b/src/java.desktop/share/native/libfreetype/src/sfnt/sfwoff.c
    index 14514bf9574..015c7b78b4d 100644
    --- a/src/java.desktop/share/native/libfreetype/src/sfnt/sfwoff.c
    +++ b/src/java.desktop/share/native/libfreetype/src/sfnt/sfwoff.c
    @@ -4,7 +4,7 @@
      *
      *   WOFF format management (base).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/sfnt/sfwoff.h b/src/java.desktop/share/native/libfreetype/src/sfnt/sfwoff.h
    index a04735ffe28..df7ace5c209 100644
    --- a/src/java.desktop/share/native/libfreetype/src/sfnt/sfwoff.h
    +++ b/src/java.desktop/share/native/libfreetype/src/sfnt/sfwoff.h
    @@ -4,7 +4,7 @@
      *
      *   WOFFF format management (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/sfnt/sfwoff2.c b/src/java.desktop/share/native/libfreetype/src/sfnt/sfwoff2.c
    index 589b3e0c6b7..41c233597b8 100644
    --- a/src/java.desktop/share/native/libfreetype/src/sfnt/sfwoff2.c
    +++ b/src/java.desktop/share/native/libfreetype/src/sfnt/sfwoff2.c
    @@ -4,7 +4,7 @@
      *
      *   WOFF2 format management (base).
      *
    - * Copyright (C) 2019-2024 by
    + * Copyright (C) 2019-2025 by
      * Nikhil Ramakrishnan, David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -902,7 +902,7 @@
           substreams[i].offset = pos + offset;
           substreams[i].size   = substream_size;
     
    -      FT_TRACE5(( "  Substream %d: offset = %lu; size = %lu;\n",
    +      FT_TRACE5(( "  Substream %u: offset = %lu; size = %lu;\n",
                       i, substreams[i].offset, substreams[i].size ));
           offset += substream_size;
         }
    @@ -1043,7 +1043,6 @@
             FT_ULong   total_n_points = 0;
             FT_UShort  n_points_contour;
             FT_UInt    j;
    -        FT_ULong   flag_size;
             FT_ULong   triplet_size;
             FT_ULong   triplet_bytes_used;
             FT_Bool    have_overlap  = FALSE;
    @@ -1088,8 +1087,8 @@
             }
             substreams[N_POINTS_STREAM].offset = FT_STREAM_POS();
     
    -        flag_size = total_n_points;
    -        if ( flag_size > substreams[FLAG_STREAM].size )
    +        points_size += total_n_points;
    +        if ( points_size > substreams[FLAG_STREAM].size )
               goto Fail;
     
             flags_buf   = stream->base + substreams[FLAG_STREAM].offset;
    @@ -1106,8 +1105,7 @@
             triplet_bytes_used = 0;
     
             /* Create array to store point information. */
    -        points_size = total_n_points;
    -        if ( FT_QNEW_ARRAY( points, points_size ) )
    +        if ( FT_QNEW_ARRAY( points, total_n_points ) )
               goto Fail;
     
             if ( triplet_decode( flags_buf,
    @@ -1118,7 +1116,7 @@
                                  &triplet_bytes_used ) )
               goto Fail;
     
    -        substreams[FLAG_STREAM].offset  += flag_size;
    +        substreams[FLAG_STREAM].offset  += total_n_points;
             substreams[GLYPH_STREAM].offset += triplet_bytes_used;
     
             if ( FT_STREAM_SEEK( substreams[GLYPH_STREAM].offset ) ||
    @@ -1592,7 +1590,7 @@
           WOFF2_TableRec  table = *( indices[nn] );
     
     
    -      FT_TRACE3(( "Seeking to %ld with table size %ld.\n",
    +      FT_TRACE3(( "Seeking to %lu with table size %lu.\n",
                       table.src_offset, table.src_length ));
           FT_TRACE3(( "Table tag: %c%c%c%c.\n",
                       (FT_Char)( table.Tag >> 24 ),
    @@ -1943,7 +1941,7 @@
           src_offset       += table->TransformLength;
           table->dst_offset = 0;
     
    -      FT_TRACE2(( "  %c%c%c%c  %08d  %08d   %08ld    %08ld    %08ld\n",
    +      FT_TRACE2(( "  %c%c%c%c  %08d  %08d   %08lu    %08lu    %08lu\n",
                       (FT_Char)( table->Tag >> 24 ),
                       (FT_Char)( table->Tag >> 16 ),
                       (FT_Char)( table->Tag >> 8  ),
    diff --git a/src/java.desktop/share/native/libfreetype/src/sfnt/sfwoff2.h b/src/java.desktop/share/native/libfreetype/src/sfnt/sfwoff2.h
    index f41140648dc..588761d0c8e 100644
    --- a/src/java.desktop/share/native/libfreetype/src/sfnt/sfwoff2.h
    +++ b/src/java.desktop/share/native/libfreetype/src/sfnt/sfwoff2.h
    @@ -4,7 +4,7 @@
      *
      *   WOFFF2 format management (specification).
      *
    - * Copyright (C) 2019-2024 by
    + * Copyright (C) 2019-2025 by
      * Nikhil Ramakrishnan, David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/sfnt/ttcmap.c b/src/java.desktop/share/native/libfreetype/src/sfnt/ttcmap.c
    index 28f4d1173c0..91b02344224 100644
    --- a/src/java.desktop/share/native/libfreetype/src/sfnt/ttcmap.c
    +++ b/src/java.desktop/share/native/libfreetype/src/sfnt/ttcmap.c
    @@ -4,7 +4,7 @@
      *
      *   TrueType character mapping table (cmap) support (body).
      *
    - * Copyright (C) 2002-2024 by
    + * Copyright (C) 2002-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -179,7 +179,7 @@
     
     
         cmap_info->format   = 0;
    -    cmap_info->language = (FT_ULong)TT_PEEK_USHORT( p );
    +    cmap_info->language = TT_PEEK_USHORT( p );
     
         return FT_Err_Ok;
       }
    @@ -596,7 +596,7 @@
     
     
         cmap_info->format   = 2;
    -    cmap_info->language = (FT_ULong)TT_PEEK_USHORT( p );
    +    cmap_info->language = TT_PEEK_USHORT( p );
     
         return FT_Err_Ok;
       }
    @@ -1539,7 +1539,7 @@
     
     
         cmap_info->format   = 4;
    -    cmap_info->language = (FT_ULong)TT_PEEK_USHORT( p );
    +    cmap_info->language = TT_PEEK_USHORT( p );
     
         return FT_Err_Ok;
       }
    @@ -1712,7 +1712,7 @@
     
     
         cmap_info->format   = 6;
    -    cmap_info->language = (FT_ULong)TT_PEEK_USHORT( p );
    +    cmap_info->language = TT_PEEK_USHORT( p );
     
         return FT_Err_Ok;
       }
    @@ -2009,7 +2009,7 @@
     
     
         cmap_info->format   = 8;
    -    cmap_info->language = (FT_ULong)TT_PEEK_ULONG( p );
    +    cmap_info->language = TT_PEEK_ULONG( p );
     
         return FT_Err_Ok;
       }
    @@ -2184,7 +2184,7 @@
     
     
         cmap_info->format   = 10;
    -    cmap_info->language = (FT_ULong)TT_PEEK_ULONG( p );
    +    cmap_info->language = TT_PEEK_ULONG( p );
     
         return FT_Err_Ok;
       }
    @@ -2528,7 +2528,7 @@
     
     
         cmap_info->format   = 12;
    -    cmap_info->language = (FT_ULong)TT_PEEK_ULONG( p );
    +    cmap_info->language = TT_PEEK_ULONG( p );
     
         return FT_Err_Ok;
       }
    @@ -2844,7 +2844,7 @@
     
     
         cmap_info->format   = 13;
    -    cmap_info->language = (FT_ULong)TT_PEEK_ULONG( p );
    +    cmap_info->language = TT_PEEK_ULONG( p );
     
         return FT_Err_Ok;
       }
    @@ -3792,7 +3792,7 @@
           return FT_THROW( Invalid_Table );
     
         /* Version 1.8.3 of the OpenType specification contains the following */
    -    /* (https://docs.microsoft.com/en-us/typography/opentype/spec/cmap):  */
    +    /* (https://learn.microsoft.com/typography/opentype/spec/cmap):       */
         /*                                                                    */
         /*   The 'cmap' table version number remains at 0x0000 for fonts that */
         /*   make use of the newer subtable formats.                          */
    @@ -3803,7 +3803,7 @@
         p += 2;
     
         num_cmaps = TT_NEXT_USHORT( p );
    -    FT_TRACE4(( "tt_face_build_cmaps: %d cmaps\n", num_cmaps ));
    +    FT_TRACE4(( "tt_face_build_cmaps: %u cmaps\n", num_cmaps ));
     
         limit = table + face->cmap_size;
         for ( ; num_cmaps > 0 && p + 8 <= limit; num_cmaps-- )
    diff --git a/src/java.desktop/share/native/libfreetype/src/sfnt/ttcmap.h b/src/java.desktop/share/native/libfreetype/src/sfnt/ttcmap.h
    index e2c5e72bf02..645e9e37e0c 100644
    --- a/src/java.desktop/share/native/libfreetype/src/sfnt/ttcmap.h
    +++ b/src/java.desktop/share/native/libfreetype/src/sfnt/ttcmap.h
    @@ -4,7 +4,7 @@
      *
      *   TrueType character mapping table (cmap) support (specification).
      *
    - * Copyright (C) 2002-2024 by
    + * Copyright (C) 2002-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/sfnt/ttcmapc.h b/src/java.desktop/share/native/libfreetype/src/sfnt/ttcmapc.h
    index 370898363f3..65807bb7378 100644
    --- a/src/java.desktop/share/native/libfreetype/src/sfnt/ttcmapc.h
    +++ b/src/java.desktop/share/native/libfreetype/src/sfnt/ttcmapc.h
    @@ -4,7 +4,7 @@
      *
      *   TT CMAP classes definitions (specification only).
      *
    - * Copyright (C) 2009-2024 by
    + * Copyright (C) 2009-2025 by
      * Oran Agra and Mickey Gabel.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/sfnt/ttcolr.c b/src/java.desktop/share/native/libfreetype/src/sfnt/ttcolr.c
    index b37658dde9e..7929b7aaf4c 100644
    --- a/src/java.desktop/share/native/libfreetype/src/sfnt/ttcolr.c
    +++ b/src/java.desktop/share/native/libfreetype/src/sfnt/ttcolr.c
    @@ -4,7 +4,7 @@
      *
      *   TrueType and OpenType colored glyph layer support (body).
      *
    - * Copyright (C) 2018-2024 by
    + * Copyright (C) 2018-2025 by
      * David Turner, Robert Wilhelm, Dominik Röttsches, and Werner Lemberg.
      *
      * Originally written by Shao Yu Zhang .
    @@ -51,7 +51,7 @@
     #define COLOR_STOP_SIZE                   6U
     #define VAR_IDX_BASE_SIZE                 4U
     #define LAYER_SIZE                        4U
    -/* https://docs.microsoft.com/en-us/typography/opentype/spec/colr#colr-header */
    +/* https://learn.microsoft.com/typography/opentype/spec/colr#colr-header */
     /* 3 * uint16 + 2 * Offset32 */
     #define COLRV0_HEADER_SIZE               14U
     /* COLRV0_HEADER_SIZE + 5 * Offset32 */
    @@ -1749,7 +1749,6 @@
         FT_UInt  x, y;
         FT_Byte  b, g, r, alpha;
     
    -    FT_ULong  size;
         FT_Byte*  src;
         FT_Byte*  dst;
     
    @@ -1767,13 +1766,9 @@
           dstSlot->bitmap.pitch      = (int)dstSlot->bitmap.width * 4;
           dstSlot->bitmap.num_grays  = 256;
     
    -      size = dstSlot->bitmap.rows * (unsigned int)dstSlot->bitmap.pitch;
    -
    -      error = ft_glyphslot_alloc_bitmap( dstSlot, size );
    +      error = ft_glyphslot_alloc_bitmap( dstSlot );
           if ( error )
             return error;
    -
    -      FT_MEM_ZERO( dstSlot->bitmap.buffer, size );
         }
         else
         {
    @@ -1805,8 +1800,7 @@
             FT_Byte*  q;
     
     
    -        size  = rows * pitch;
    -        if ( FT_ALLOC( buf, size ) )
    +        if ( FT_ALLOC_MULT( buf, rows, pitch ) )
               return error;
     
             p = dstSlot->bitmap.buffer;
    diff --git a/src/java.desktop/share/native/libfreetype/src/sfnt/ttcolr.h b/src/java.desktop/share/native/libfreetype/src/sfnt/ttcolr.h
    index 30031464c73..3913acc74d5 100644
    --- a/src/java.desktop/share/native/libfreetype/src/sfnt/ttcolr.h
    +++ b/src/java.desktop/share/native/libfreetype/src/sfnt/ttcolr.h
    @@ -4,7 +4,7 @@
      *
      *   TrueType and OpenType colored glyph layer support (specification).
      *
    - * Copyright (C) 2018-2024 by
    + * Copyright (C) 2018-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * Originally written by Shao Yu Zhang .
    diff --git a/src/java.desktop/share/native/libfreetype/src/sfnt/ttcpal.c b/src/java.desktop/share/native/libfreetype/src/sfnt/ttcpal.c
    index 997eb869ffc..6d1208f6af2 100644
    --- a/src/java.desktop/share/native/libfreetype/src/sfnt/ttcpal.c
    +++ b/src/java.desktop/share/native/libfreetype/src/sfnt/ttcpal.c
    @@ -4,7 +4,7 @@
      *
      *   TrueType and OpenType color palette support (body).
      *
    - * Copyright (C) 2018-2024 by
    + * Copyright (C) 2018-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * Originally written by Shao Yu Zhang .
    diff --git a/src/java.desktop/share/native/libfreetype/src/sfnt/ttcpal.h b/src/java.desktop/share/native/libfreetype/src/sfnt/ttcpal.h
    index bb301ae88b6..a0b4c9d927f 100644
    --- a/src/java.desktop/share/native/libfreetype/src/sfnt/ttcpal.h
    +++ b/src/java.desktop/share/native/libfreetype/src/sfnt/ttcpal.h
    @@ -4,7 +4,7 @@
      *
      *   TrueType and OpenType color palette support (specification).
      *
    - * Copyright (C) 2018-2024 by
    + * Copyright (C) 2018-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * Originally written by Shao Yu Zhang .
    diff --git a/src/java.desktop/share/native/libfreetype/src/sfnt/ttkern.c b/src/java.desktop/share/native/libfreetype/src/sfnt/ttkern.c
    index f0411366af4..76618b0d3bb 100644
    --- a/src/java.desktop/share/native/libfreetype/src/sfnt/ttkern.c
    +++ b/src/java.desktop/share/native/libfreetype/src/sfnt/ttkern.c
    @@ -2,10 +2,9 @@
      *
      * ttkern.c
      *
    - *   Load the basic TrueType kerning table.  This doesn't handle
    - *   kerning data within the GPOS table at the moment.
    + *   Routines to parse and access the 'kern' table for kerning (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/sfnt/ttkern.h b/src/java.desktop/share/native/libfreetype/src/sfnt/ttkern.h
    index a54e51df12d..e0075dce61d 100644
    --- a/src/java.desktop/share/native/libfreetype/src/sfnt/ttkern.h
    +++ b/src/java.desktop/share/native/libfreetype/src/sfnt/ttkern.h
    @@ -2,10 +2,10 @@
      *
      * ttkern.h
      *
    - *   Load the basic TrueType kerning table.  This doesn't handle
    - *   kerning data within the GPOS table at the moment.
    + *   Routines to parse and access the 'kern' table for kerning
    + *   (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -40,8 +40,6 @@ FT_BEGIN_HEADER
                            FT_UInt     left_glyph,
                            FT_UInt     right_glyph );
     
    -#define TT_FACE_HAS_KERNING( face )  ( (face)->kern_avail_bits != 0 )
    -
     
     FT_END_HEADER
     
    diff --git a/src/java.desktop/share/native/libfreetype/src/sfnt/ttload.c b/src/java.desktop/share/native/libfreetype/src/sfnt/ttload.c
    index c3a5fae2cb9..0c257ce4d31 100644
    --- a/src/java.desktop/share/native/libfreetype/src/sfnt/ttload.c
    +++ b/src/java.desktop/share/native/libfreetype/src/sfnt/ttload.c
    @@ -5,7 +5,7 @@
      *   Load the basic TrueType tables, i.e., tables that can be either in
      *   TTF or OTF fonts (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -535,7 +535,8 @@
        *     The tag of table to load.  Use the value 0 if you want
        *     to access the whole font file, else set this parameter
        *     to a valid TrueType table tag that you can forge with
    -   *     the MAKE_TT_TAG macro.
    +   *     the MAKE_TT_TAG macro.  Use value 1 to access the table
    +   *     directory.
        *
        *   offset ::
        *     The starting offset in the table (or the file if
    @@ -577,7 +578,29 @@
         FT_ULong   size;
     
     
    -    if ( tag != 0 )
    +    if ( tag == 0 )
    +    {
    +      /* The whole font file. */
    +      size = face->root.stream->size;
    +    }
    +    else if ( tag == 1 )
    +    {
    +      /* The currently selected font's table directory.            */
    +      /*                                                           */
    +      /* Note that `face_index` is also used to enumerate elements */
    +      /* of containers like a Mac Resource; this means we must     */
    +      /* check whether we actually have a TTC (with multiple table */
    +      /* directories).                                             */
    +      FT_Long  idx = face->root.face_index & 0xFFFF;
    +
    +
    +      if ( idx >= face->ttc_header.count )
    +        idx = 0;
    +
    +      offset += face->ttc_header.offsets[idx];
    +      size    = 4 + 8 + 16 * face->num_tables;
    +    }
    +    else
         {
           /* look for tag in font directory */
           table = tt_face_lookup_table( face, tag );
    @@ -590,9 +613,6 @@
           offset += table->Offset;
           size    = table->Length;
         }
    -    else
    -      /* tag == 0 -- the user wants to access the font file directly */
    -      size = face->root.stream->size;
     
         if ( length && *length == 0 )
         {
    diff --git a/src/java.desktop/share/native/libfreetype/src/sfnt/ttload.h b/src/java.desktop/share/native/libfreetype/src/sfnt/ttload.h
    index 2b1d62d9bd9..e3666c901b1 100644
    --- a/src/java.desktop/share/native/libfreetype/src/sfnt/ttload.h
    +++ b/src/java.desktop/share/native/libfreetype/src/sfnt/ttload.h
    @@ -5,7 +5,7 @@
      *   Load the basic TrueType tables, i.e., tables that can be either in
      *   TTF or OTF fonts (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/sfnt/ttmtx.c b/src/java.desktop/share/native/libfreetype/src/sfnt/ttmtx.c
    index 27884118563..541d8447470 100644
    --- a/src/java.desktop/share/native/libfreetype/src/sfnt/ttmtx.c
    +++ b/src/java.desktop/share/native/libfreetype/src/sfnt/ttmtx.c
    @@ -4,7 +4,7 @@
      *
      *   Load the metrics tables common to TTF and OTF fonts (body).
      *
    - * Copyright (C) 2006-2024 by
    + * Copyright (C) 2006-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -306,7 +306,7 @@
         }
     
     #ifdef TT_CONFIG_OPTION_GX_VAR_SUPPORT
    -    if ( var && face->blend )
    +    if ( var && FT_IS_VARIATION( &face->root ) )
         {
           FT_Face  f = FT_FACE( face );
           FT_Int   a = (FT_Int)*aadvance;
    diff --git a/src/java.desktop/share/native/libfreetype/src/sfnt/ttmtx.h b/src/java.desktop/share/native/libfreetype/src/sfnt/ttmtx.h
    index 34b3c0e18f2..1ee84507f15 100644
    --- a/src/java.desktop/share/native/libfreetype/src/sfnt/ttmtx.h
    +++ b/src/java.desktop/share/native/libfreetype/src/sfnt/ttmtx.h
    @@ -4,7 +4,7 @@
      *
      *   Load the metrics tables common to TTF and OTF fonts (specification).
      *
    - * Copyright (C) 2006-2024 by
    + * Copyright (C) 2006-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/sfnt/ttpost.c b/src/java.desktop/share/native/libfreetype/src/sfnt/ttpost.c
    index 5698a62c8d1..4246b6c8eff 100644
    --- a/src/java.desktop/share/native/libfreetype/src/sfnt/ttpost.c
    +++ b/src/java.desktop/share/native/libfreetype/src/sfnt/ttpost.c
    @@ -5,7 +5,7 @@
      *   PostScript name table processing for TrueType and OpenType fonts
      *   (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/sfnt/ttpost.h b/src/java.desktop/share/native/libfreetype/src/sfnt/ttpost.h
    index 150db6c3981..a11b6696854 100644
    --- a/src/java.desktop/share/native/libfreetype/src/sfnt/ttpost.h
    +++ b/src/java.desktop/share/native/libfreetype/src/sfnt/ttpost.h
    @@ -5,7 +5,7 @@
      *   PostScript name table processing for TrueType and OpenType fonts
      *   (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/sfnt/ttsbit.c b/src/java.desktop/share/native/libfreetype/src/sfnt/ttsbit.c
    index cb3a8abf182..34e45619817 100644
    --- a/src/java.desktop/share/native/libfreetype/src/sfnt/ttsbit.c
    +++ b/src/java.desktop/share/native/libfreetype/src/sfnt/ttsbit.c
    @@ -4,7 +4,7 @@
      *
      *   TrueType and OpenType embedded bitmap support (body).
      *
    - * Copyright (C) 2005-2024 by
    + * Copyright (C) 2005-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * Copyright 2013 by Google, Inc.
    @@ -342,7 +342,7 @@
                 FT_TRACE2(( "tt_face_load_strike_metrics:"
                             " sanitizing invalid ascender and descender\n" ));
                 FT_TRACE2(( "                            "
    -                        " values for strike %ld (%dppem, %dppem)\n",
    +                        " values for strike %lu (%dppem, %dppem)\n",
                             strike_index,
                             metrics->x_ppem, metrics->y_ppem ));
     
    @@ -547,7 +547,6 @@
         FT_Error    error = FT_Err_Ok;
         FT_UInt     width, height;
         FT_Bitmap*  map = decoder->bitmap;
    -    FT_ULong    size;
     
     
         if ( !decoder->metrics_loaded )
    @@ -599,17 +598,11 @@
           goto Exit;
         }
     
    -    size = map->rows * (FT_ULong)map->pitch;
    -
    -    /* check that there is no empty image */
    -    if ( size == 0 )
    -      goto Exit;     /* exit successfully! */
    -
         if ( metrics_only )
           goto Exit;     /* only metrics are requested */
     
    -    error = ft_glyphslot_alloc_bitmap( decoder->face->root.glyph, size );
    -    if ( error )
    +    error = ft_glyphslot_alloc_bitmap( decoder->face->root.glyph );
    +    if ( error || !map->buffer )
           goto Exit;
     
         decoder->bitmap_allocated = 1;
    @@ -993,7 +986,7 @@
           goto Fail;
         }
     
    -    FT_TRACE3(( "tt_sbit_decoder_load_compound: loading %d component%s\n",
    +    FT_TRACE3(( "tt_sbit_decoder_load_compound: loading %u component%s\n",
                     num_components,
                     num_components == 1 ? "" : "s" ));
     
    @@ -1419,7 +1412,7 @@
         image_start = image_offset + image_start;
     
         FT_TRACE3(( "tt_sbit_decoder_load_image:"
    -                " found sbit (format %d) for glyph index %d\n",
    +                " found sbit (format %u) for glyph index %u\n",
                     image_format, glyph_index ));
     
         return tt_sbit_decoder_load_bitmap( decoder,
    @@ -1438,13 +1431,13 @@
         if ( recurse_count )
         {
           FT_TRACE4(( "tt_sbit_decoder_load_image:"
    -                  " missing subglyph sbit with glyph index %d\n",
    +                  " missing subglyph sbit with glyph index %u\n",
                       glyph_index ));
           return FT_THROW( Invalid_Composite );
         }
     
         FT_TRACE4(( "tt_sbit_decoder_load_image:"
    -                " no sbit found for glyph index %d\n", glyph_index ));
    +                " no sbit found for glyph index %u\n", glyph_index ));
         return FT_THROW( Missing_Bitmap );
       }
     
    @@ -1462,12 +1455,13 @@
         FT_Int    originOffsetX, originOffsetY;
         FT_Tag    graphicType;
         FT_Int    recurse_depth = 0;
    +    FT_Bool   flipped       = FALSE;
     
         FT_Error  error;
         FT_Byte*  p;
     
    -    FT_UNUSED( map );
     #ifndef FT_CONFIG_OPTION_USE_PNG
    +    FT_UNUSED( map );
         FT_UNUSED( metrics_only );
     #endif
     
    @@ -1517,12 +1511,16 @@
     
         switch ( graphicType )
         {
    +    case FT_MAKE_TAG( 'f', 'l', 'i', 'p' ):
    +      flipped = !flipped;
    +      FALL_THROUGH;
    +
         case FT_MAKE_TAG( 'd', 'u', 'p', 'e' ):
    -      if ( recurse_depth < 4 )
    +      if ( recurse_depth++ < 4 )
           {
             glyph_index = FT_GET_USHORT();
             FT_FRAME_EXIT();
    -        recurse_depth++;
    +
             goto retry;
           }
           error = FT_THROW( Invalid_File_Format );
    @@ -1540,6 +1538,38 @@
                                  glyph_end - glyph_start - 8,
                                  TRUE,
                                  metrics_only );
    +      if ( flipped && !metrics_only && !error )
    +      {
    +        FT_UInt32*  curr_pos = (FT_UInt32*)map->buffer;
    +
    +        /* `Load_SBit_Png` always returns a pixmap with 32 bits per pixel */
    +        /* and no extra pitch bytes.                                      */
    +        FT_UInt  width = map->width;
    +        FT_UInt  y;
    +
    +
    +        for ( y = 0; y < map->rows; y++ )
    +        {
    +          FT_UInt32*  left  = curr_pos;
    +          FT_UInt32*  right = curr_pos + width - 1;
    +
    +
    +          while ( left < right )
    +          {
    +            FT_UInt32  value;
    +
    +
    +            value  = *right;
    +            *right = *left;
    +            *left  = value;
    +
    +            left++;
    +            right--;
    +          }
    +
    +          curr_pos += width;
    +        }
    +      }
     #else
           error = FT_THROW( Unimplemented_Feature );
     #endif
    diff --git a/src/java.desktop/share/native/libfreetype/src/sfnt/ttsbit.h b/src/java.desktop/share/native/libfreetype/src/sfnt/ttsbit.h
    index 96f80a58424..7427149d68f 100644
    --- a/src/java.desktop/share/native/libfreetype/src/sfnt/ttsbit.h
    +++ b/src/java.desktop/share/native/libfreetype/src/sfnt/ttsbit.h
    @@ -4,7 +4,7 @@
      *
      *   TrueType and OpenType embedded bitmap support (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/sfnt/woff2tags.c b/src/java.desktop/share/native/libfreetype/src/sfnt/woff2tags.c
    index 532ccfa1737..0f9e3889aab 100644
    --- a/src/java.desktop/share/native/libfreetype/src/sfnt/woff2tags.c
    +++ b/src/java.desktop/share/native/libfreetype/src/sfnt/woff2tags.c
    @@ -4,7 +4,7 @@
      *
      *   WOFF2 Font table tags (base).
      *
    - * Copyright (C) 2019-2024 by
    + * Copyright (C) 2019-2025 by
      * Nikhil Ramakrishnan, David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/sfnt/woff2tags.h b/src/java.desktop/share/native/libfreetype/src/sfnt/woff2tags.h
    index d03b4b41bc9..e223022962e 100644
    --- a/src/java.desktop/share/native/libfreetype/src/sfnt/woff2tags.h
    +++ b/src/java.desktop/share/native/libfreetype/src/sfnt/woff2tags.h
    @@ -4,7 +4,7 @@
      *
      *   WOFF2 Font table tags (specification).
      *
    - * Copyright (C) 2019-2024 by
    + * Copyright (C) 2019-2025 by
      * Nikhil Ramakrishnan, David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/smooth/ftgrays.c b/src/java.desktop/share/native/libfreetype/src/smooth/ftgrays.c
    index b7c0632a6fa..3c387aea0ac 100644
    --- a/src/java.desktop/share/native/libfreetype/src/smooth/ftgrays.c
    +++ b/src/java.desktop/share/native/libfreetype/src/smooth/ftgrays.c
    @@ -4,7 +4,7 @@
      *
      *   A new `perfect' anti-aliasing renderer (body).
      *
    - * Copyright (C) 2000-2024 by
    + * Copyright (C) 2000-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -157,10 +157,6 @@
     
     #define ft_memset   memset
     
    -#define ft_setjmp   setjmp
    -#define ft_longjmp  longjmp
    -#define ft_jmp_buf  jmp_buf
    -
     typedef ptrdiff_t  FT_PtrDist;
     
     
    @@ -170,8 +166,8 @@ typedef ptrdiff_t  FT_PtrDist;
     #define Smooth_Err_Invalid_Argument     -3
     #define Smooth_Err_Raster_Overflow      -4
     
    -#define FT_BEGIN_HEADER
    -#define FT_END_HEADER
    +#define FT_BEGIN_HEADER  /* nothing */
    +#define FT_END_HEADER    /* nothing */
     
     #include "ftimage.h"
     #include "ftgrays.h"
    @@ -495,6 +491,7 @@ typedef ptrdiff_t  FT_PtrDist;
         TCoord  min_ey, max_ey;
         TCoord  count_ey;        /* same as (max_ey - min_ey) */
     
    +    int         error;       /* pool overflow exception                  */
         PCell       cell;        /* current cell                             */
         PCell       cell_free;   /* call allocation next free slot           */
         PCell       cell_null;   /* last cell, used as dumpster and limit    */
    @@ -510,8 +507,6 @@ typedef ptrdiff_t  FT_PtrDist;
         FT_Raster_Span_Func  render_span;
         void*                render_span_data;
     
    -    ft_jmp_buf  jump_buffer;
    -
       } gray_TWorker, *gray_PWorker;
     
     #if defined( _MSC_VER )
    @@ -613,9 +608,14 @@ typedef ptrdiff_t  FT_PtrDist;
           }
     
           /* insert new cell */
    -      cell = ras.cell_free++;
    -      if ( cell >= ras.cell_null )
    -        ft_longjmp( ras.jump_buffer, 1 );
    +      cell = ras.cell_free;
    +      if ( cell == ras.cell_null )
    +      {
    +        ras.error = FT_THROW( Raster_Overflow );
    +        goto Found;
    +      }
    +
    +      ras.cell_free = cell + 1;
     
           cell->x     = ex;
           cell->area  = 0;
    @@ -1353,7 +1353,8 @@ typedef ptrdiff_t  FT_PtrDist;
     
         ras.x = x;
         ras.y = y;
    -    return 0;
    +
    +    return ras.error;
       }
     
     
    @@ -1365,7 +1366,8 @@ typedef ptrdiff_t  FT_PtrDist;
     
     
         gray_render_line( RAS_VAR_ UPSCALE( to->x ), UPSCALE( to->y ) );
    -    return 0;
    +
    +    return ras.error;
       }
     
     
    @@ -1378,7 +1380,8 @@ typedef ptrdiff_t  FT_PtrDist;
     
     
         gray_render_conic( RAS_VAR_ control, to );
    -    return 0;
    +
    +    return ras.error;
       }
     
     
    @@ -1392,7 +1395,8 @@ typedef ptrdiff_t  FT_PtrDist;
     
     
         gray_render_cubic( RAS_VAR_ control1, control2, to );
    -    return 0;
    +
    +    return ras.error;
       }
     
     
    @@ -1700,30 +1704,22 @@ typedef ptrdiff_t  FT_PtrDist;
       gray_convert_glyph_inner( RAS_ARG_
                                 int  continued )
       {
    -    volatile int  error;
    +    int  error;
     
     
    -    if ( ft_setjmp( ras.jump_buffer ) == 0 )
    -    {
    -      if ( continued )
    -        FT_Trace_Disable();
    -      error = FT_Outline_Decompose( &ras.outline, &func_interface, &ras );
    -      if ( continued )
    -        FT_Trace_Enable();
    +    if ( continued )
    +      FT_Trace_Disable();
    +    error = FT_Outline_Decompose( &ras.outline, &func_interface, &ras );
    +    if ( continued )
    +      FT_Trace_Enable();
     
    -      FT_TRACE7(( "band [%d..%d]: %td cell%s remaining\n",
    -                  ras.min_ey,
    -                  ras.max_ey,
    -                  ras.cell_null - ras.cell_free,
    -                  ras.cell_null - ras.cell_free == 1 ? "" : "s" ));
    -    }
    -    else
    -    {
    -      error = FT_THROW( Raster_Overflow );
    -
    -      FT_TRACE7(( "band [%d..%d]: to be bisected\n",
    -                  ras.min_ey, ras.max_ey ));
    -    }
    +    FT_TRACE7(( error == Smooth_Err_Raster_Overflow
    +                  ? "band [%d..%d]: to be bisected\n"
    +                  : "band [%d..%d]: %td cell%s remaining\n",
    +                ras.min_ey,
    +                ras.max_ey,
    +                ras.cell_null - ras.cell_free,
    +                ras.cell_null - ras.cell_free == 1 ? "" : "s" ));
     
         return error;
       }
    @@ -1808,7 +1804,7 @@ typedef ptrdiff_t  FT_PtrDist;
               FT_FILL_RULE( coverage, cover, fill );
     
               span[n].coverage = (unsigned char)coverage;
    -          span[n].x        = (short)x;
    +          span[n].x        = (unsigned short)x;
               span[n].len      = (unsigned short)( cell->x - x );
     
               if ( ++n == FT_MAX_GRAY_SPANS )
    @@ -1827,7 +1823,7 @@ typedef ptrdiff_t  FT_PtrDist;
               FT_FILL_RULE( coverage, area, fill );
     
               span[n].coverage = (unsigned char)coverage;
    -          span[n].x        = (short)cell->x;
    +          span[n].x        = (unsigned short)cell->x;
               span[n].len      = 1;
     
               if ( ++n == FT_MAX_GRAY_SPANS )
    @@ -1873,6 +1869,7 @@ typedef ptrdiff_t  FT_PtrDist;
         TCoord*  band;
     
         int  continued = 0;
    +    int  error     = Smooth_Err_Ok;
     
     
         /* Initialize the null cell at the end of the poll. */
    @@ -1907,7 +1904,6 @@ typedef ptrdiff_t  FT_PtrDist;
           do
           {
             TCoord  i;
    -        int     error;
     
     
             ras.min_ex = band[1];
    @@ -1922,6 +1918,7 @@ typedef ptrdiff_t  FT_PtrDist;
     
             ras.cell_free = buffer + n;
             ras.cell      = ras.cell_null;
    +        ras.error     = Smooth_Err_Ok;
     
             error     = gray_convert_glyph_inner( RAS_VAR_ continued );
             continued = 1;
    @@ -1936,7 +1933,7 @@ typedef ptrdiff_t  FT_PtrDist;
               continue;
             }
             else if ( error != Smooth_Err_Raster_Overflow )
    -          return error;
    +          goto Exit;
     
             /* render pool overflow; we will reduce the render band by half */
             i = ( band[0] - band[1] ) >> 1;
    @@ -1945,7 +1942,8 @@ typedef ptrdiff_t  FT_PtrDist;
             if ( i == 0 )
             {
               FT_TRACE7(( "gray_convert_glyph: rotten glyph\n" ));
    -          return FT_THROW( Raster_Overflow );
    +          error = FT_THROW( Raster_Overflow );
    +          goto Exit;
             }
     
             band++;
    @@ -1954,7 +1952,11 @@ typedef ptrdiff_t  FT_PtrDist;
           } while ( band >= bands );
         }
     
    -    return Smooth_Err_Ok;
    +  Exit:
    +    ras.cell   = ras.cell_free = ras.cell_null = NULL;
    +    ras.ycells = NULL;
    +
    +    return error;
       }
     
     
    diff --git a/src/java.desktop/share/native/libfreetype/src/smooth/ftgrays.h b/src/java.desktop/share/native/libfreetype/src/smooth/ftgrays.h
    index 940fbe8c79b..e463e5b3eb8 100644
    --- a/src/java.desktop/share/native/libfreetype/src/smooth/ftgrays.h
    +++ b/src/java.desktop/share/native/libfreetype/src/smooth/ftgrays.h
    @@ -4,7 +4,7 @@
      *
      *   FreeType smooth renderer declaration
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -19,11 +19,6 @@
     #ifndef FTGRAYS_H_
     #define FTGRAYS_H_
     
    -#ifdef __cplusplus
    -  extern "C" {
    -#endif
    -
    -
     #ifdef STANDALONE_
     #include "ftimage.h"
     #else
    @@ -31,6 +26,7 @@
     #include 
     #endif
     
    +FT_BEGIN_HEADER
     
       /**************************************************************************
        *
    @@ -46,10 +42,7 @@
     
       FT_EXPORT_VAR( const FT_Raster_Funcs )  ft_grays_raster;
     
    -
    -#ifdef __cplusplus
    -  }
    -#endif
    +FT_END_HEADER
     
     #endif /* FTGRAYS_H_ */
     
    diff --git a/src/java.desktop/share/native/libfreetype/src/smooth/ftsmerrs.h b/src/java.desktop/share/native/libfreetype/src/smooth/ftsmerrs.h
    index 6d41fb8e0fd..8d5068549fa 100644
    --- a/src/java.desktop/share/native/libfreetype/src/smooth/ftsmerrs.h
    +++ b/src/java.desktop/share/native/libfreetype/src/smooth/ftsmerrs.h
    @@ -4,7 +4,7 @@
      *
      *   smooth renderer error codes (specification only).
      *
    - * Copyright (C) 2001-2024 by
    + * Copyright (C) 2001-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/smooth/ftsmooth.c b/src/java.desktop/share/native/libfreetype/src/smooth/ftsmooth.c
    index f0acc1ea4a6..5a7a852a619 100644
    --- a/src/java.desktop/share/native/libfreetype/src/smooth/ftsmooth.c
    +++ b/src/java.desktop/share/native/libfreetype/src/smooth/ftsmooth.c
    @@ -4,7 +4,7 @@
      *
      *   Anti-aliasing renderer interface (body).
      *
    - * Copyright (C) 2000-2024 by
    + * Copyright (C) 2000-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -80,6 +80,7 @@
       {
         unsigned char*  origin;  /* pixmap origin at the bottom-left */
         int             pitch;   /* pitch to go down one row */
    +    unsigned char   wght[5]; /* filtering weights */
     
       } TOrigin;
     
    @@ -274,6 +275,32 @@
       }
     
     
    +  /* This function applies a horizontal filter in direct rendering mode */
    +  static void
    +  ft_smooth_lcd_spans( int             y,
    +                       int             count,
    +                       const FT_Span*  spans,
    +                       void*           target_ )   /* TOrigin* */
    +  {
    +    TOrigin*  target = (TOrigin*)target_;
    +
    +    unsigned char*  dst_line = target->origin - y * target->pitch - 2;
    +    unsigned char*  dst;
    +    unsigned short  w;
    +
    +
    +    for ( ; count--; spans++ )
    +      for ( dst = dst_line + spans->x, w = spans->len; w--; dst++ )
    +      {
    +        dst[0] += ( spans->coverage * target->wght[0] + 85 ) >> 8;
    +        dst[1] += ( spans->coverage * target->wght[1] + 85 ) >> 8;
    +        dst[2] += ( spans->coverage * target->wght[2] + 85 ) >> 8;
    +        dst[3] += ( spans->coverage * target->wght[3] + 85 ) >> 8;
    +        dst[4] += ( spans->coverage * target->wght[4] + 85 ) >> 8;
    +      }
    +  }
    +
    +
       static FT_Error
       ft_smooth_raster_lcd( FT_Renderer  render,
                             FT_Outline*  outline,
    @@ -285,11 +312,47 @@
         FT_Vector*  vec;
     
         FT_Raster_Params  params;
    +    TOrigin           target;
     
     
    -    params.target = bitmap;
    -    params.source = outline;
    -    params.flags  = FT_RASTER_FLAG_AA;
    +    if ( render->root.library->lcd_weights[2] )
    +    {
    +      /* Reject outlines that are too wide for 16-bit FT_Span.       */
    +      /* Other limits are applied upstream with the same error code. */
    +      if ( bitmap->width > 0x7FFF )
    +        return FT_THROW( Raster_Overflow );
    +
    +      /* Set up direct rendering for instant filtering. */
    +      params.source     = outline;
    +      params.flags      = FT_RASTER_FLAG_AA | FT_RASTER_FLAG_DIRECT;
    +      params.gray_spans = ft_smooth_lcd_spans;
    +      params.user       = ⌖
    +
    +      params.clip_box.xMin = 0;
    +      params.clip_box.yMin = 0;
    +      params.clip_box.xMax = bitmap->width;
    +      params.clip_box.yMax = bitmap->rows;
    +
    +      if ( bitmap->pitch < 0 )
    +        target.origin = bitmap->buffer;
    +      else
    +        target.origin = bitmap->buffer
    +                        + ( bitmap->rows - 1 ) * (unsigned int)bitmap->pitch;
    +
    +      target.pitch = bitmap->pitch;
    +
    +      target.wght[0] = render->root.library->lcd_weights[0];
    +      target.wght[1] = render->root.library->lcd_weights[1];
    +      target.wght[2] = render->root.library->lcd_weights[2];
    +      target.wght[3] = render->root.library->lcd_weights[3];
    +      target.wght[4] = render->root.library->lcd_weights[4];
    +    }
    +    else
    +    {
    +      params.target = bitmap;
    +      params.source = outline;
    +      params.flags  = FT_RASTER_FLAG_AA;
    +    }
     
         /* implode outline */
         for ( vec = points; vec < points_end; vec++ )
    @@ -306,6 +369,32 @@
       }
     
     
    +  /* This function applies a vertical filter in direct rendering mode */
    +  static void
    +  ft_smooth_lcdv_spans( int             y,
    +                        int             count,
    +                        const FT_Span*  spans,
    +                        void*           target_ )   /* TOrigin* */
    +  {
    +    TOrigin*  target = (TOrigin*)target_;
    +
    +    int             pitch    = target->pitch;
    +    unsigned char*  dst_line = target->origin - ( y + 2 ) * pitch;
    +    unsigned char*  dst;
    +    unsigned short  w;
    +
    +
    +    for ( ; count--; spans++ )
    +      for ( dst = dst_line + spans->x, w = spans->len; w--; dst++ )
    +      {
    +        dst[        0] += ( spans->coverage * target->wght[0] + 85 ) >> 8;
    +        dst[    pitch] += ( spans->coverage * target->wght[1] + 85 ) >> 8;
    +        dst[2 * pitch] += ( spans->coverage * target->wght[2] + 85 ) >> 8;
    +        dst[3 * pitch] += ( spans->coverage * target->wght[3] + 85 ) >> 8;
    +        dst[4 * pitch] += ( spans->coverage * target->wght[4] + 85 ) >> 8;
    +      }
    +  }
    +
       static FT_Error
       ft_smooth_raster_lcdv( FT_Renderer  render,
                              FT_Outline*  outline,
    @@ -317,11 +406,42 @@
         FT_Vector*  vec;
     
         FT_Raster_Params  params;
    +    TOrigin           target;
     
     
    -    params.target = bitmap;
    -    params.source = outline;
    -    params.flags  = FT_RASTER_FLAG_AA;
    +    if ( render->root.library->lcd_weights[2] )
    +    {
    +      /* Set up direct rendering for instant filtering. */
    +      params.source     = outline;
    +      params.flags      = FT_RASTER_FLAG_AA | FT_RASTER_FLAG_DIRECT;
    +      params.gray_spans = ft_smooth_lcdv_spans;
    +      params.user       = ⌖
    +
    +      params.clip_box.xMin = 0;
    +      params.clip_box.yMin = 0;
    +      params.clip_box.xMax = bitmap->width;
    +      params.clip_box.yMax = bitmap->rows;
    +
    +      if ( bitmap->pitch < 0 )
    +        target.origin = bitmap->buffer;
    +      else
    +        target.origin = bitmap->buffer
    +                        + ( bitmap->rows - 1 ) * (unsigned int)bitmap->pitch;
    +
    +      target.pitch = bitmap->pitch;
    +
    +      target.wght[0] = render->root.library->lcd_weights[0];
    +      target.wght[1] = render->root.library->lcd_weights[1];
    +      target.wght[2] = render->root.library->lcd_weights[2];
    +      target.wght[3] = render->root.library->lcd_weights[3];
    +      target.wght[4] = render->root.library->lcd_weights[4];
    +    }
    +    else
    +    {
    +      params.target = bitmap;
    +      params.source = outline;
    +      params.flags  = FT_RASTER_FLAG_AA;
    +    }
     
         /* implode outline */
         for ( vec = points; vec < points_end; vec++ )
    @@ -494,12 +614,6 @@
         else
           y_shift += 64 * (FT_Int)bitmap->rows;
     
    -    if ( origin )
    -    {
    -      x_shift += origin->x;
    -      y_shift += origin->y;
    -    }
    -
         /* translate outline to render it into the bitmap */
         if ( x_shift || y_shift )
           FT_Outline_Translate( outline, x_shift, y_shift );
    @@ -527,33 +641,6 @@
             error = ft_smooth_raster_lcd ( render, outline, bitmap );
           else if ( mode == FT_RENDER_MODE_LCD_V )
             error = ft_smooth_raster_lcdv( render, outline, bitmap );
    -
    -#ifdef FT_CONFIG_OPTION_SUBPIXEL_RENDERING
    -
    -      /* finally apply filtering */
    -      {
    -        FT_Byte*                 lcd_weights;
    -        FT_Bitmap_LcdFilterFunc  lcd_filter_func;
    -
    -
    -        /* Per-face LCD filtering takes priority if set up. */
    -        if ( slot->face && slot->face->internal->lcd_filter_func )
    -        {
    -          lcd_weights     = slot->face->internal->lcd_weights;
    -          lcd_filter_func = slot->face->internal->lcd_filter_func;
    -        }
    -        else
    -        {
    -          lcd_weights     = slot->library->lcd_weights;
    -          lcd_filter_func = slot->library->lcd_filter_func;
    -        }
    -
    -        if ( lcd_filter_func )
    -          lcd_filter_func( bitmap, lcd_weights );
    -      }
    -
    -#endif /* FT_CONFIG_OPTION_SUBPIXEL_RENDERING */
    -
         }
     
       Exit:
    diff --git a/src/java.desktop/share/native/libfreetype/src/smooth/ftsmooth.h b/src/java.desktop/share/native/libfreetype/src/smooth/ftsmooth.h
    index d7b61a9e60e..f76708ae701 100644
    --- a/src/java.desktop/share/native/libfreetype/src/smooth/ftsmooth.h
    +++ b/src/java.desktop/share/native/libfreetype/src/smooth/ftsmooth.h
    @@ -4,7 +4,7 @@
      *
      *   Anti-aliasing renderer interface (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/truetype/ttdriver.c b/src/java.desktop/share/native/libfreetype/src/truetype/ttdriver.c
    index 4ab68eb9a12..6369d83d6d5 100644
    --- a/src/java.desktop/share/native/libfreetype/src/truetype/ttdriver.c
    +++ b/src/java.desktop/share/native/libfreetype/src/truetype/ttdriver.c
    @@ -4,7 +4,7 @@
      *
      *   TrueType font driver implementation (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -220,12 +220,12 @@
         {
           /* Use 'kern' table if available since that can be faster; otherwise */
           /* use GPOS kerning pairs if available.                              */
    -      if ( ttface->kern_avail_bits != 0 )
    +      if ( ttface->kern_avail_bits )
             kerning->x = sfnt->get_kerning( ttface,
                                             left_glyph,
                                             right_glyph );
     #ifdef TT_CONFIG_OPTION_GPOS_KERNING
    -      else if ( ttface->gpos_kerning_available )
    +      else if ( ttface->num_gpos_lookups_kerning )
             kerning->x = sfnt->get_gpos_kerning( ttface,
                                                  left_glyph,
                                                  right_glyph );
    diff --git a/src/java.desktop/share/native/libfreetype/src/truetype/ttdriver.h b/src/java.desktop/share/native/libfreetype/src/truetype/ttdriver.h
    index 3e1cf234fcf..943eaae3482 100644
    --- a/src/java.desktop/share/native/libfreetype/src/truetype/ttdriver.h
    +++ b/src/java.desktop/share/native/libfreetype/src/truetype/ttdriver.h
    @@ -4,7 +4,7 @@
      *
      *   High-level TrueType driver interface (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/truetype/tterrors.h b/src/java.desktop/share/native/libfreetype/src/truetype/tterrors.h
    index 7ad937bd04d..631dbf5a80f 100644
    --- a/src/java.desktop/share/native/libfreetype/src/truetype/tterrors.h
    +++ b/src/java.desktop/share/native/libfreetype/src/truetype/tterrors.h
    @@ -4,7 +4,7 @@
      *
      *   TrueType error codes (specification only).
      *
    - * Copyright (C) 2001-2024 by
    + * Copyright (C) 2001-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/truetype/ttgload.c b/src/java.desktop/share/native/libfreetype/src/truetype/ttgload.c
    index b656ccf04e3..4dd68ab1019 100644
    --- a/src/java.desktop/share/native/libfreetype/src/truetype/ttgload.c
    +++ b/src/java.desktop/share/native/libfreetype/src/truetype/ttgload.c
    @@ -4,7 +4,7 @@
      *
      *   TrueType Glyph Loader (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -660,7 +660,7 @@
         } while ( subglyph->flags & MORE_COMPONENTS );
     
         gloader->current.num_subglyphs = num_subglyphs;
    -    FT_TRACE5(( "  %d component%s\n",
    +    FT_TRACE5(( "  %u component%s\n",
                     num_subglyphs,
                     num_subglyphs > 1 ? "s" : "" ));
     
    @@ -674,7 +674,7 @@
           for ( i = 0; i < num_subglyphs; i++ )
           {
             if ( num_subglyphs > 1 )
    -          FT_TRACE7(( "    subglyph %d:\n", i ));
    +          FT_TRACE7(( "    subglyph %u:\n", i ));
     
             FT_TRACE7(( "      glyph index: %d\n", subglyph->index ));
     
    @@ -777,15 +777,11 @@
       TT_Hint_Glyph( TT_Loader  loader,
                      FT_Bool    is_composite )
       {
    -#ifdef TT_SUPPORT_SUBPIXEL_HINTING_MINIMAL
    -    TT_Face    face   = loader->face;
    -    TT_Driver  driver = (TT_Driver)FT_FACE_DRIVER( face );
    -#endif
    -
         TT_GlyphZone  zone = &loader->zone;
     
     #ifdef TT_USE_BYTECODE_INTERPRETER
         TT_ExecContext  exec  = loader->exec;
    +    TT_Size         size  = loader->size;
         FT_Long         n_ins = exec->glyphSize;
     #else
         FT_UNUSED( is_composite );
    @@ -797,9 +793,6 @@
         if ( n_ins > 0 )
           FT_ARRAY_COPY( zone->org, zone->cur, zone->n_points );
     
    -    /* Reset graphics state. */
    -    exec->GS = loader->size->GS;
    -
         /* XXX: UNDOCUMENTED! Hinting instructions of a composite glyph */
         /*      completely refer to the (already) hinted subglyphs.     */
         if ( is_composite )
    @@ -811,8 +804,8 @@
         }
         else
         {
    -      exec->metrics.x_scale = loader->size->metrics->x_scale;
    -      exec->metrics.y_scale = loader->size->metrics->y_scale;
    +      exec->metrics.x_scale = size->metrics->x_scale;
    +      exec->metrics.y_scale = size->metrics->y_scale;
         }
     #endif
     
    @@ -838,7 +831,7 @@
           exec->is_composite = is_composite;
           exec->pts          = *zone;
     
    -      error = TT_Run_Context( exec );
    +      error = TT_Run_Context( exec, size );
           if ( error && exec->pedantic_hinting )
             return error;
     
    @@ -854,8 +847,7 @@
         /* to change bearings or advance widths.                               */
     
     #ifdef TT_SUPPORT_SUBPIXEL_HINTING_MINIMAL
    -    if ( driver->interpreter_version == TT_INTERPRETER_VERSION_40 &&
    -         exec->backward_compatibility )
    +    if ( exec->backward_compatibility )
           return FT_Err_Ok;
     #endif
     
    @@ -1152,30 +1144,15 @@
             x = FT_MulFix( x, x_scale );
             y = FT_MulFix( y, y_scale );
     
    -        if ( subglyph->flags & ROUND_XY_TO_GRID )
    +        if ( subglyph->flags & ROUND_XY_TO_GRID &&
    +             IS_HINTED( loader->load_flags )    )
             {
    -          TT_Face    face   = loader->face;
    -          TT_Driver  driver = (TT_Driver)FT_FACE_DRIVER( face );
    +#ifdef TT_SUPPORT_SUBPIXEL_HINTING_MINIMAL
    +          if ( !loader->exec->backward_compatibility )
    +#endif
    +            x = FT_PIX_ROUND( x );
     
    -
    -          if ( IS_HINTED( loader->load_flags ) )
    -          {
    -            /*
    -             * We round the horizontal offset only if there is hinting along
    -             * the x axis; this corresponds to integer advance width values.
    -             *
    -             * Theoretically, a glyph's bytecode can toggle ClearType's
    -             * `backward compatibility' mode, which would allow modification
    -             * of the advance width.  In reality, however, applications
    -             * neither allow nor expect modified advance widths if subpixel
    -             * rendering is active.
    -             *
    -             */
    -            if ( driver->interpreter_version == TT_INTERPRETER_VERSION_35 )
    -              x = FT_PIX_ROUND( x );
    -
    -            y = FT_PIX_ROUND( y );
    -          }
    +          y = FT_PIX_ROUND( y );
             }
           }
         }
    @@ -1204,8 +1181,6 @@
       {
         FT_Error     error;
         FT_Outline*  outline = &loader->gloader->base.outline;
    -    FT_Stream    stream = loader->stream;
    -    FT_UShort    n_ins;
         FT_UInt      i;
     
     
    @@ -1224,8 +1199,10 @@
     #ifdef TT_USE_BYTECODE_INTERPRETER
     
         {
    -      TT_ExecContext  exec = loader->exec;
    +      TT_ExecContext  exec   = loader->exec;
           FT_Memory       memory = exec->memory;
    +      FT_Stream       stream = loader->stream;
    +      FT_UShort       n_ins;
     
     
           if ( exec->glyphSize )
    @@ -1378,8 +1355,9 @@
     
           if ( driver->interpreter_version == TT_INTERPRETER_VERSION_40 &&
                loader->exec                                             &&
    -           loader->exec->subpixel_hinting_lean                      &&
    -           loader->exec->grayscale_cleartype                        )
    +           loader->exec->mode != FT_RENDER_MODE_MONO                &&
    +           loader->exec->mode != FT_RENDER_MODE_LCD                 &&
    +           loader->exec->mode != FT_RENDER_MODE_LCD_V               )
           {
             loader->pp3.x = loader->advance / 2;
             loader->pp4.x = loader->advance / 2;
    @@ -1444,13 +1422,13 @@
     
     #ifdef FT_DEBUG_LEVEL_TRACE
         if ( recurse_count )
    -      FT_TRACE5(( "  nesting level: %d\n", recurse_count ));
    +      FT_TRACE5(( "  nesting level: %u\n", recurse_count ));
     #endif
     
         /* some fonts have an incorrect value of `maxComponentDepth' */
         if ( recurse_count > face->max_profile.maxComponentDepth )
         {
    -      FT_TRACE1(( "load_truetype_glyph: maxComponentDepth set to %d\n",
    +      FT_TRACE1(( "load_truetype_glyph: maxComponentDepth set to %u\n",
                       recurse_count ));
           face->max_profile.maxComponentDepth = (FT_UShort)recurse_count;
         }
    @@ -1566,18 +1544,18 @@
         if ( header_only )
           goto Exit;
     
    +#ifdef FT_CONFIG_OPTION_INCREMENTAL
    +    tt_get_metrics_incremental( loader, glyph_index );
    +#endif
    +    tt_loader_set_pp( loader );
    +
    +    /* shortcut for empty glyphs */
         if ( loader->byte_len == 0 || loader->n_contours == 0 )
         {
    -#ifdef FT_CONFIG_OPTION_INCREMENTAL
    -      tt_get_metrics_incremental( loader, glyph_index );
    -#endif
    -      tt_loader_set_pp( loader );
    -
     
     #ifdef TT_CONFIG_OPTION_GX_VAR_SUPPORT
     
    -      if ( FT_IS_NAMED_INSTANCE( FT_FACE( face ) ) ||
    -           FT_IS_VARIATION( FT_FACE( face ) )      )
    +      if ( !IS_DEFAULT_INSTANCE( FT_FACE( face ) ) )
           {
             /* a small outline structure with four elements for */
             /* communication with `TT_Vary_Apply_Glyph_Deltas'  */
    @@ -1627,11 +1605,6 @@
           goto Exit;
         }
     
    -#ifdef FT_CONFIG_OPTION_INCREMENTAL
    -    tt_get_metrics_incremental( loader, glyph_index );
    -#endif
    -    tt_loader_set_pp( loader );
    -
     
         /***********************************************************************/
         /***********************************************************************/
    @@ -1735,8 +1708,7 @@
     
     #ifdef TT_CONFIG_OPTION_GX_VAR_SUPPORT
     
    -      if ( FT_IS_NAMED_INSTANCE( FT_FACE( face ) ) ||
    -           FT_IS_VARIATION( FT_FACE( face ) )      )
    +      if ( !IS_DEFAULT_INSTANCE( FT_FACE( face ) ) )
           {
             FT_UShort    i, limit;
             FT_SubGlyph  subglyph;
    @@ -1953,6 +1925,9 @@
     
     #ifdef FT_CONFIG_OPTION_INCREMENTAL
     
    +    /* restore the original stream */
    +    loader->stream = face->root.stream;
    +
         if ( glyph_data_loaded )
           face->root.internal->incremental_interface->funcs->free_glyph_data(
             face->root.internal->incremental_interface->object,
    @@ -2112,7 +2087,6 @@
       {
         TT_Face             face   = (TT_Face)glyph->face;
         SFNT_Service        sfnt   = (SFNT_Service)face->sfnt;
    -    FT_Stream           stream = face->root.stream;
         FT_Error            error;
         TT_SBit_MetricsRec  sbit_metrics;
     
    @@ -2121,14 +2095,11 @@
                                        size->strike_index,
                                        glyph_index,
                                        (FT_UInt)load_flags,
    -                                   stream,
    +                                   face->root.stream,
                                        &glyph->bitmap,
                                        &sbit_metrics );
         if ( !error )
         {
    -      glyph->outline.n_points   = 0;
    -      glyph->outline.n_contours = 0;
    -
           glyph->metrics.width  = (FT_Pos)sbit_metrics.width  * 64;
           glyph->metrics.height = (FT_Pos)sbit_metrics.height * 64;
     
    @@ -2153,6 +2124,50 @@
             glyph->bitmap_top  = sbit_metrics.horiBearingY;
           }
         }
    +    /* a missing glyph in a bitmap-only font is assumed whitespace */
    +    /* that needs to be constructed using metrics data from `hmtx' */
    +    /* and, optionally, `vmtx' tables                              */
    +    else if ( FT_ERR_EQ( error, Missing_Bitmap ) &&
    +              !FT_IS_SCALABLE( glyph->face )     &&
    +              face->horz_metrics_size            )
    +    {
    +      FT_Fixed  x_scale = size->root.metrics.x_scale;
    +      FT_Fixed  y_scale = size->root.metrics.y_scale;
    +
    +      FT_Short  left_bearing = 0;
    +      FT_Short  top_bearing  = 0;
    +
    +      FT_UShort  advance_width  = 0;
    +      FT_UShort  advance_height = 0;
    +
    +
    +      TT_Get_HMetrics( face, glyph_index,
    +                       &left_bearing,
    +                       &advance_width );
    +      TT_Get_VMetrics( face, glyph_index,
    +                       0,
    +                       &top_bearing,
    +                       &advance_height );
    +
    +      glyph->metrics.width  = 0;
    +      glyph->metrics.height = 0;
    +
    +      glyph->metrics.horiBearingX = FT_MulFix( left_bearing, x_scale );
    +      glyph->metrics.horiBearingY = 0;
    +      glyph->metrics.horiAdvance  = FT_MulFix( advance_width, x_scale );
    +
    +      glyph->metrics.vertBearingX = 0;
    +      glyph->metrics.vertBearingY = FT_MulFix( top_bearing, y_scale );
    +      glyph->metrics.vertAdvance  = FT_MulFix( advance_height, y_scale );
    +
    +      glyph->format            = FT_GLYPH_FORMAT_BITMAP;
    +      glyph->bitmap.pixel_mode = FT_PIXEL_MODE_MONO;
    +
    +      glyph->bitmap_left = 0;
    +      glyph->bitmap_top  = 0;
    +
    +      error = FT_Err_Ok;
    +    }
     
         return error;
       }
    @@ -2168,15 +2183,6 @@
                       FT_Bool       glyf_table_only )
       {
         TT_Face    face   = (TT_Face)glyph->face;
    -    FT_Stream  stream = face->root.stream;
    -
    -#ifdef TT_USE_BYTECODE_INTERPRETER
    -    FT_Error   error;
    -    FT_Bool    pedantic = FT_BOOL( load_flags & FT_LOAD_PEDANTIC );
    -#ifdef TT_SUPPORT_SUBPIXEL_HINTING_MINIMAL
    -    TT_Driver  driver   = (TT_Driver)FT_FACE_DRIVER( glyph->face );
    -#endif
    -#endif
     
     
         FT_ZERO( loader );
    @@ -2186,122 +2192,80 @@
         /* load execution context */
         if ( IS_HINTED( load_flags ) && !glyf_table_only )
         {
    +      FT_Error        error;
           TT_ExecContext  exec;
    -      FT_Bool         grayscale = TRUE;
    +      FT_Render_Mode  mode      = FT_LOAD_TARGET_MODE( load_flags );
    +      FT_Bool         grayscale = FT_BOOL( mode != FT_RENDER_MODE_MONO );
    +      FT_Bool         reexecute = FALSE;
     #ifdef TT_SUPPORT_SUBPIXEL_HINTING_MINIMAL
    -      FT_Bool         subpixel_hinting_lean;
    -      FT_Bool         grayscale_cleartype;
    +      TT_Driver       driver    = (TT_Driver)FT_FACE_DRIVER( glyph->face );
     #endif
     
    -      FT_Bool  reexecute = FALSE;
     
    -
    -      if ( size->bytecode_ready < 0 || size->cvt_ready < 0 )
    +      if ( size->bytecode_ready > 0 )
    +        return size->bytecode_ready;
    +      if ( size->bytecode_ready < 0 )
           {
    -        error = tt_size_ready_bytecode( size, pedantic );
    +        FT_Bool  pedantic = FT_BOOL( load_flags & FT_LOAD_PEDANTIC );
    +
    +
    +        error = tt_size_init_bytecode( size, pedantic );
             if ( error )
               return error;
           }
    -      else if ( size->bytecode_ready )
    -        return size->bytecode_ready;
    -      else if ( size->cvt_ready )
    -        return size->cvt_ready;
     
    -      /* query new execution context */
           exec = size->context;
    -      if ( !exec )
    -        return FT_THROW( Could_Not_Find_Context );
    -
    -      grayscale = FT_BOOL( FT_LOAD_TARGET_MODE( load_flags ) !=
    -                             FT_RENDER_MODE_MONO             );
     
     #ifdef TT_SUPPORT_SUBPIXEL_HINTING_MINIMAL
    +      /* reset backward compatibility; note that */
    +      /* the CVT program always runs without it  */
    +      exec->backward_compatibility = 0;
    +
           if ( driver->interpreter_version == TT_INTERPRETER_VERSION_40 )
           {
    -        subpixel_hinting_lean =
    -          FT_BOOL( FT_LOAD_TARGET_MODE( load_flags ) !=
    -                   FT_RENDER_MODE_MONO               );
    -        grayscale_cleartype =
    -          FT_BOOL( subpixel_hinting_lean         &&
    -                   !( ( load_flags         &
    -                        FT_LOAD_TARGET_LCD )   ||
    -                      ( load_flags           &
    -                        FT_LOAD_TARGET_LCD_V ) ) );
    -        exec->vertical_lcd_lean =
    -          FT_BOOL( subpixel_hinting_lean    &&
    -                   ( load_flags           &
    -                     FT_LOAD_TARGET_LCD_V ) );
    -        grayscale = FT_BOOL( grayscale && !subpixel_hinting_lean );
    -      }
    -      else
    -      {
    -        subpixel_hinting_lean   = FALSE;
    -        grayscale_cleartype     = FALSE;
    -        exec->vertical_lcd_lean = FALSE;
    -      }
    -#endif
    +        grayscale = FALSE;
     
    -      error = TT_Load_Context( exec, face, size );
    -      if ( error )
    -        return error;
    -
    -      {
    -#ifdef TT_SUPPORT_SUBPIXEL_HINTING_MINIMAL
    -        if ( driver->interpreter_version == TT_INTERPRETER_VERSION_40 )
    +        /* any mode change requires a re-execution of the CVT program */
    +        if ( mode != exec->mode )
             {
    -          /* a change from mono to subpixel rendering (and vice versa) */
    -          /* requires a re-execution of the CVT program                */
    -          if ( subpixel_hinting_lean != exec->subpixel_hinting_lean )
    -          {
    -            FT_TRACE4(( "tt_loader_init: subpixel hinting change,"
    -                        " re-executing `prep' table\n" ));
    -
    -            exec->subpixel_hinting_lean = subpixel_hinting_lean;
    -            reexecute                   = TRUE;
    -          }
    -
    -          /* a change from colored to grayscale subpixel rendering (and */
    -          /* vice versa) requires a re-execution of the CVT program     */
    -          if ( grayscale_cleartype != exec->grayscale_cleartype )
    -          {
    -            FT_TRACE4(( "tt_loader_init: grayscale subpixel hinting change,"
    -                        " re-executing `prep' table\n" ));
    -
    -            exec->grayscale_cleartype = grayscale_cleartype;
    -            reexecute                 = TRUE;
    -          }
    -        }
    -#endif
    -
    -        /* a change from mono to grayscale rendering (and vice versa) */
    -        /* requires a re-execution of the CVT program                 */
    -        if ( grayscale != exec->grayscale )
    -        {
    -          FT_TRACE4(( "tt_loader_init: grayscale hinting change,"
    +          FT_TRACE4(( "tt_loader_init: render mode change,"
                           " re-executing `prep' table\n" ));
     
    -          exec->grayscale = grayscale;
    -          reexecute       = TRUE;
    +          exec->mode = mode;
    +          reexecute  = TRUE;
             }
           }
    +#endif
     
    -      if ( reexecute )
    +      /* a change from mono to grayscale rendering (and vice versa) */
    +      /* requires a re-execution of the CVT program                 */
    +      if ( grayscale != exec->grayscale )
           {
    -        error = tt_size_run_prep( size, pedantic );
    -        if ( error )
    -          return error;
    -        error = TT_Load_Context( exec, face, size );
    +        FT_TRACE4(( "tt_loader_init: grayscale hinting change,"
    +                    " re-executing `prep' table\n" ));
    +
    +        exec->grayscale = grayscale;
    +        reexecute       = TRUE;
    +      }
    +
    +      if ( size->cvt_ready > 0 )
    +        return size->cvt_ready;
    +      if ( size->cvt_ready < 0 || reexecute )
    +      {
    +        error = tt_size_run_prep( size );
             if ( error )
               return error;
           }
     
    +      TT_Load_Context( exec, face, size );
    +
           /* check whether the cvt program has disabled hinting */
    -      if ( exec->GS.instruct_control & 1 )
    +      if ( size->GS.instruct_control & 1 )
             load_flags |= FT_LOAD_NO_HINTING;
     
    -      /* load default graphics state -- if needed */
    -      if ( exec->GS.instruct_control & 2 )
    -        exec->GS = tt_default_graphics_state;
    +      /* check whether GS modifications should be reverted */
    +      if ( size->GS.instruct_control & 2 )
    +        size->GS = tt_default_graphics_state;
     
     #ifdef TT_SUPPORT_SUBPIXEL_HINTING_MINIMAL
           /*
    @@ -2318,28 +2282,24 @@
            *
            */
           if ( driver->interpreter_version == TT_INTERPRETER_VERSION_40 &&
    -           subpixel_hinting_lean                                    &&
    +           mode != FT_RENDER_MODE_MONO                              &&
                !FT_IS_TRICKY( glyph->face )                             )
    -        exec->backward_compatibility = !( exec->GS.instruct_control & 4 );
    -      else
    -        exec->backward_compatibility = FALSE;
    +        exec->backward_compatibility = ( size->GS.instruct_control & 4 ) ^ 4;
    +
     #endif /* TT_SUPPORT_SUBPIXEL_HINTING_MINIMAL */
     
    -      exec->pedantic_hinting = FT_BOOL( load_flags & FT_LOAD_PEDANTIC );
           loader->exec = exec;
    -      loader->instructions = exec->glyphIns;
     
           /* Use the hdmx table if any unless FT_LOAD_COMPUTE_METRICS */
           /* is set or backward compatibility mode of the v38 or v40  */
           /* interpreters is active.  See `ttinterp.h' for details on */
           /* backward compatibility mode.                             */
    -      if ( IS_HINTED( loader->load_flags )                                &&
    -           !( loader->load_flags & FT_LOAD_COMPUTE_METRICS )              &&
    +      if ( IS_HINTED( load_flags )                   &&
    +           !( load_flags & FT_LOAD_COMPUTE_METRICS ) &&
     #ifdef TT_SUPPORT_SUBPIXEL_HINTING_MINIMAL
    -           !( driver->interpreter_version == TT_INTERPRETER_VERSION_40  &&
    -              exec->backward_compatibility                              ) &&
    +           !exec->backward_compatibility             &&
     #endif
    -           !face->postscript.isFixedPitch                                 )
    +           !face->postscript.isFixedPitch            )
           {
             loader->widthp = size->widthp;
           }
    @@ -2364,7 +2324,7 @@
         loader->face   = face;
         loader->size   = size;
         loader->glyph  = (FT_GlyphSlot)glyph;
    -    loader->stream = stream;
    +    loader->stream = face->root.stream;
     
         loader->composites.head = NULL;
         loader->composites.tail = NULL;
    @@ -2426,84 +2386,26 @@
         TT_LoaderRec  loader;
     
     
    -    FT_TRACE1(( "TT_Load_Glyph: glyph index %d\n", glyph_index ));
    +    FT_TRACE1(( "TT_Load_Glyph: glyph index %u\n", glyph_index ));
     
     #ifdef TT_CONFIG_OPTION_EMBEDDED_BITMAPS
     
         /* try to load embedded bitmap (if any) */
    -    if ( size->strike_index != 0xFFFFFFFFUL      &&
    -         ( load_flags & FT_LOAD_NO_BITMAP ) == 0 &&
    -         IS_DEFAULT_INSTANCE( glyph->face )      )
    +    if ( size->strike_index != 0xFFFFFFFFUL  &&
    +         !( load_flags & FT_LOAD_NO_BITMAP &&
    +            FT_IS_SCALABLE( glyph->face )  ) &&
    +         IS_DEFAULT_INSTANCE( glyph->face )  )
         {
    -      FT_Fixed  x_scale = size->root.metrics.x_scale;
    -      FT_Fixed  y_scale = size->root.metrics.y_scale;
    -
    -
           error = load_sbit_image( size, glyph, glyph_index, load_flags );
    -      if ( FT_ERR_EQ( error, Missing_Bitmap ) )
    -      {
    -        /* the bitmap strike is incomplete and misses the requested glyph; */
    -        /* if we have a bitmap-only font, return an empty glyph            */
    -        if ( !FT_IS_SCALABLE( glyph->face ) )
    -        {
    -          FT_Short  left_bearing = 0;
    -          FT_Short  top_bearing  = 0;
    -
    -          FT_UShort  advance_width  = 0;
    -          FT_UShort  advance_height = 0;
    -
    -
    -          /* to return an empty glyph, however, we need metrics data   */
    -          /* from the `hmtx' (or `vmtx') table; the assumption is that */
    -          /* empty glyphs are missing intentionally, representing      */
    -          /* whitespace - not having at least horizontal metrics is    */
    -          /* thus considered an error                                  */
    -          if ( !face->horz_metrics_size )
    -            return error;
    -
    -          /* we now construct an empty bitmap glyph */
    -          TT_Get_HMetrics( face, glyph_index,
    -                           &left_bearing,
    -                           &advance_width );
    -          TT_Get_VMetrics( face, glyph_index,
    -                           0,
    -                           &top_bearing,
    -                           &advance_height );
    -
    -          glyph->outline.n_points   = 0;
    -          glyph->outline.n_contours = 0;
    -
    -          glyph->metrics.width  = 0;
    -          glyph->metrics.height = 0;
    -
    -          glyph->metrics.horiBearingX = FT_MulFix( left_bearing, x_scale );
    -          glyph->metrics.horiBearingY = 0;
    -          glyph->metrics.horiAdvance  = FT_MulFix( advance_width, x_scale );
    -
    -          glyph->metrics.vertBearingX = 0;
    -          glyph->metrics.vertBearingY = FT_MulFix( top_bearing, y_scale );
    -          glyph->metrics.vertAdvance  = FT_MulFix( advance_height, y_scale );
    -
    -          glyph->format            = FT_GLYPH_FORMAT_BITMAP;
    -          glyph->bitmap.pixel_mode = FT_PIXEL_MODE_MONO;
    -
    -          glyph->bitmap_left = 0;
    -          glyph->bitmap_top  = 0;
    -
    -          return FT_Err_Ok;
    -        }
    -      }
    -      else if ( error )
    -      {
    -        /* return error if font is not scalable */
    -        if ( !FT_IS_SCALABLE( glyph->face ) )
    -          return error;
    -      }
    -      else
    +      if ( !error )
           {
             if ( FT_IS_SCALABLE( glyph->face ) ||
                  FT_HAS_SBIX( glyph->face )    )
             {
    +          FT_Fixed  x_scale = size->root.metrics.x_scale;
    +          FT_Fixed  y_scale = size->root.metrics.y_scale;
    +
    +
               /* for the bbox we need the header only */
               (void)tt_loader_init( &loader, size, glyph, load_flags, TRUE );
               (void)load_truetype_glyph( &loader, glyph_index, 0, TRUE );
    @@ -2550,8 +2452,10 @@
                                                         y_scale );
             }
     
    -        return FT_Err_Ok;
    +        goto Exit;
           }
    +      else if ( !FT_IS_SCALABLE( glyph->face ) )
    +        goto Exit;
         }
     
         if ( load_flags & FT_LOAD_SBITS_ONLY )
    @@ -2563,7 +2467,7 @@
     #endif /* TT_CONFIG_OPTION_EMBEDDED_BITMAPS */
     
         /* if FT_LOAD_NO_SCALE is not set, `ttmetrics' must be valid */
    -    if ( !( load_flags & FT_LOAD_NO_SCALE ) && !size->ttmetrics.valid )
    +    if ( !( load_flags & FT_LOAD_NO_SCALE ) && !size->ttmetrics.ppem )
         {
           error = FT_THROW( Invalid_Size_Handle );
           goto Exit;
    @@ -2614,7 +2518,7 @@
             glyph->metrics.horiAdvance = FT_MulFix( advanceX, x_scale );
             glyph->metrics.vertAdvance = FT_MulFix( advanceY, y_scale );
     
    -        return error;
    +        goto Exit;
           }
     
           FT_TRACE3(( "Failed to load SVG glyph\n" ));
    @@ -2642,10 +2546,6 @@
           goto Done;
         }
     
    -    glyph->format        = FT_GLYPH_FORMAT_OUTLINE;
    -    glyph->num_subglyphs = 0;
    -    glyph->outline.flags = 0;
    -
         /* main loading loop */
         error = load_truetype_glyph( &loader, glyph_index, 0, FALSE );
         if ( !error )
    @@ -2657,9 +2557,18 @@
           }
           else
           {
    +        glyph->format         = FT_GLYPH_FORMAT_OUTLINE;
    +
             glyph->outline        = loader.gloader->base.outline;
             glyph->outline.flags &= ~FT_OUTLINE_SINGLE_PASS;
     
    +        /* Set the `high precision' bit flag.  This is _critical_ to   */
    +        /* get correct output for monochrome TrueType glyphs at all    */
    +        /* sizes using the bytecode interpreter.                       */
    +        if ( !( load_flags & FT_LOAD_NO_SCALE ) &&
    +             size->metrics->y_ppem < 24         )
    +          glyph->outline.flags |= FT_OUTLINE_HIGH_PRECISION;
    +
             /* Translate array so that (0,0) is the glyph's origin.  Note  */
             /* that this behaviour is independent on the value of bit 1 of */
             /* the `flags' field in the `head' table -- at least major     */
    @@ -2708,14 +2617,6 @@
           error = compute_glyph_metrics( &loader, glyph_index );
         }
     
    -    /* Set the `high precision' bit flag.                           */
    -    /* This is _critical_ to get correct output for monochrome      */
    -    /* TrueType glyphs at all sizes using the bytecode interpreter. */
    -    /*                                                              */
    -    if ( !( load_flags & FT_LOAD_NO_SCALE ) &&
    -         size->metrics->y_ppem < 24         )
    -      glyph->outline.flags |= FT_OUTLINE_HIGH_PRECISION;
    -
         FT_TRACE1(( "  subglyphs = %u, contours = %hu, points = %hu,"
                     " flags = 0x%.3x\n",
                     loader.gloader->base.num_subglyphs,
    @@ -2727,11 +2628,8 @@
         tt_loader_done( &loader );
     
       Exit:
    -#ifdef FT_DEBUG_LEVEL_TRACE
    -    if ( error )
    -      FT_TRACE1(( "  failed (error code 0x%x)\n",
    -                  error ));
    -#endif
    +    FT_TRACE1(( error ? "  failed (error code 0x%x)\n" : "",
    +                error ));
     
         return error;
       }
    diff --git a/src/java.desktop/share/native/libfreetype/src/truetype/ttgload.h b/src/java.desktop/share/native/libfreetype/src/truetype/ttgload.h
    index 22ea967f301..39d6ae3664c 100644
    --- a/src/java.desktop/share/native/libfreetype/src/truetype/ttgload.h
    +++ b/src/java.desktop/share/native/libfreetype/src/truetype/ttgload.h
    @@ -4,7 +4,7 @@
      *
      *   TrueType Glyph Loader (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/truetype/ttgxvar.c b/src/java.desktop/share/native/libfreetype/src/truetype/ttgxvar.c
    index 4f0083c96b7..f8842795f14 100644
    --- a/src/java.desktop/share/native/libfreetype/src/truetype/ttgxvar.c
    +++ b/src/java.desktop/share/native/libfreetype/src/truetype/ttgxvar.c
    @@ -4,7 +4,7 @@
      *
      *   TrueType GX Font Variation loader
      *
    - * Copyright (C) 2004-2024 by
    + * Copyright (C) 2004-2025 by
      * David Turner, Robert Wilhelm, Werner Lemberg, and George Williams.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -489,8 +489,9 @@
         FT_UShort  axis_count;
         FT_UInt    region_count;
     
    -    FT_UInt  i, j;
    -    FT_Bool  long_words;
    +    FT_UInt   i, j;
    +    FT_Byte*  bytes;
    +    FT_Bool   long_words;
     
         GX_Blend   blend           = ttface->blend;
         FT_ULong*  dataOffsetArray = NULL;
    @@ -526,11 +527,15 @@
         if ( FT_QNEW_ARRAY( dataOffsetArray, data_count ) )
           goto Exit;
     
    +    if ( FT_FRAME_ENTER( data_count * 4 ) )
    +      goto Exit;
    +
    +    bytes = stream->cursor;
    +
         for ( i = 0; i < data_count; i++ )
    -    {
    -      if ( FT_READ_ULONG( dataOffsetArray[i] ) )
    -        goto Exit;
    -    }
    +      dataOffsetArray[i] = FT_NEXT_ULONG( bytes );
    +
    +    FT_FRAME_EXIT();
     
         /* parse array of region records (region list) */
         if ( FT_STREAM_SEEK( offset + region_offset ) )
    @@ -564,13 +569,26 @@
           goto Exit;
         itemStore->regionCount = region_count;
     
    -    for ( i = 0; i < itemStore->regionCount; i++ )
    +    if ( FT_FRAME_ENTER( (FT_Long)region_count * axis_count * 6 ) )
    +    {
    +      FT_TRACE2(( "tt_var_load_item_variation_store:"
    +                  " not enough data for variation regions\n" ));
    +      error = FT_THROW( Invalid_Table );
    +      goto Exit;
    +    }
    +
    +    bytes = stream->cursor;
    +
    +    for ( i = 0; i < region_count; i++ )
         {
           GX_AxisCoords  axisCoords;
     
     
           if ( FT_NEW_ARRAY( itemStore->varRegionList[i].axisList, axis_count ) )
    +      {
    +        FT_FRAME_EXIT();
             goto Exit;
    +      }
     
           axisCoords = itemStore->varRegionList[i].axisList;
     
    @@ -579,10 +597,9 @@
             FT_Int  start, peak, end;
     
     
    -        if ( FT_READ_SHORT( start ) ||
    -             FT_READ_SHORT( peak )  ||
    -             FT_READ_SHORT( end )   )
    -          goto Exit;
    +        start = FT_NEXT_SHORT( bytes );
    +        peak  = FT_NEXT_SHORT( bytes );
    +        end   = FT_NEXT_SHORT( bytes );
     
             /* immediately tag invalid ranges with special peak = 0 */
             if ( ( start < 0 && end > 0 ) || start > peak || peak > end )
    @@ -594,6 +611,8 @@
           }
         }
     
    +    FT_FRAME_EXIT();
    +
         /* end of region list parse */
     
         /* use dataOffsetArray now to parse varData items */
    @@ -625,7 +644,7 @@
           /* check some data consistency */
           if ( word_delta_count > region_idx_count )
           {
    -        FT_TRACE2(( "bad short count %d or region count %d\n",
    +        FT_TRACE2(( "bad short count %d or region count %u\n",
                         word_delta_count,
                         region_idx_count ));
             error = FT_THROW( Invalid_Table );
    @@ -634,7 +653,7 @@
     
           if ( region_idx_count > itemStore->regionCount )
           {
    -        FT_TRACE2(( "inconsistent regionCount %d in varData[%d]\n",
    +        FT_TRACE2(( "inconsistent regionCount %u in varData[%u]\n",
                         region_idx_count,
                         i ));
             error = FT_THROW( Invalid_Table );
    @@ -648,29 +667,39 @@
           varData->wordDeltaCount = word_delta_count;
           varData->longWords      = long_words;
     
    +      if ( FT_FRAME_ENTER( region_idx_count * 2 ) )
    +      {
    +        FT_TRACE2(( "tt_var_load_item_variation_store:"
    +                    " not enough data for region indices\n" ));
    +        error = FT_THROW( Invalid_Table );
    +        goto Exit;
    +      }
    +
    +      bytes = stream->cursor;
    +
           for ( j = 0; j < varData->regionIdxCount; j++ )
           {
    -        if ( FT_READ_USHORT( varData->regionIndices[j] ) )
    -          goto Exit;
    +        varData->regionIndices[j] = FT_NEXT_USHORT( bytes );
     
             if ( varData->regionIndices[j] >= itemStore->regionCount )
             {
    -          FT_TRACE2(( "bad region index %d\n",
    +          FT_TRACE2(( "bad region index %u\n",
                           varData->regionIndices[j] ));
    +          FT_FRAME_EXIT();
               error = FT_THROW( Invalid_Table );
               goto Exit;
             }
           }
     
    +      FT_FRAME_EXIT();
    +
           per_region_size = word_delta_count + region_idx_count;
           if ( long_words )
             per_region_size *= 2;
     
    -      if ( FT_NEW_ARRAY( varData->deltaSet, per_region_size * item_count ) )
    +      if ( FT_QALLOC_MULT( varData->deltaSet, item_count, per_region_size ) )
             goto Exit;
    -      if ( FT_Stream_Read( stream,
    -                           varData->deltaSet,
    -                           per_region_size * item_count ) )
    +      if ( FT_STREAM_READ( varData->deltaSet, item_count * per_region_size ) )
           {
             FT_TRACE2(( "deltaSet read failed." ));
             error = FT_THROW( Invalid_Table );
    @@ -706,6 +735,7 @@
         FT_UInt   innerIndexMask;
         FT_ULong  i;
         FT_UInt   j;
    +    FT_Byte*  bytes;
     
     
         if ( FT_STREAM_SEEK( offset )    ||
    @@ -757,6 +787,16 @@
         if ( FT_NEW_ARRAY( map->outerIndex, map->mapCount ) )
           goto Exit;
     
    +    if ( FT_FRAME_ENTER( map->mapCount * entrySize ) )
    +    {
    +      FT_TRACE2(( "tt_var_load_delta_set_index_mapping:"
    +                  " invalid number of delta-set index mappings\n" ));
    +      error = FT_THROW( Invalid_Table );
    +      goto Exit;
    +    }
    +
    +    bytes = stream->cursor;
    +
         for ( i = 0; i < map->mapCount; i++ )
         {
           FT_UInt  mapData = 0;
    @@ -769,9 +809,7 @@
             FT_Byte  data;
     
     
    -        if ( FT_READ_BYTE( data ) )
    -          goto Exit;
    -
    +        data    = FT_NEXT_BYTE( bytes );
             mapData = ( mapData << 8 ) | data;
           }
     
    @@ -789,7 +827,7 @@
     
           if ( outerIndex >= itemStore->dataCount )
           {
    -        FT_TRACE2(( "outerIndex[%ld] == %d out of range\n",
    +        FT_TRACE2(( "outerIndex[%lu] == %u out of range\n",
                         i,
                         outerIndex ));
             error = FT_THROW( Invalid_Table );
    @@ -802,7 +840,7 @@
     
           if ( innerIndex >= itemStore->varData[outerIndex].itemCount )
           {
    -        FT_TRACE2(( "innerIndex[%ld] == %d out of range\n",
    +        FT_TRACE2(( "innerIndex[%lu] == %u out of range\n",
                         i,
                         innerIndex ));
             error = FT_THROW( Invalid_Table );
    @@ -812,6 +850,8 @@
           map->innerIndex[i] = innerIndex;
         }
     
    +    FT_FRAME_EXIT();
    +
       Exit:
         return error;
       }
    @@ -965,28 +1005,181 @@
       }
     
     
    +  static FT_Fixed
    +  tt_calculate_scalar( GX_AxisCoords  axis,
    +                       FT_UInt        axisCount,
    +                       FT_Fixed*      normalizedcoords )
    +  {
    +    FT_Fixed  scalar = 0x10000L;
    +    FT_UInt   j;
    +
    +
    +    /* Inner loop steps through axes in this region. */
    +    for ( j = 0; j < axisCount; j++, axis++ )
    +    {
    +      FT_Fixed  ncv = normalizedcoords[j];
    +
    +
    +      /* Compute the scalar contribution of this axis, */
    +      /* with peak of 0 used for invalid axes.         */
    +      if ( axis->peakCoord == ncv ||
    +           axis->peakCoord == 0   )
    +        continue;
    +
    +      /* Ignore this region if coordinates are out of range. */
    +      else if ( ncv <= axis->startCoord ||
    +                ncv >= axis->endCoord   )
    +      {
    +        scalar = 0;
    +        break;
    +      }
    +
    +      /* Cumulative product of all the axis scalars. */
    +      else if ( ncv < axis->peakCoord )
    +        scalar = FT_MulDiv( scalar,
    +                            ncv - axis->startCoord,
    +                            axis->peakCoord - axis->startCoord );
    +      else   /* ncv > axis->peakCoord */
    +        scalar = FT_MulDiv( scalar,
    +                            axis->endCoord - ncv,
    +                            axis->endCoord - axis->peakCoord );
    +
    +    } /* per-axis loop */
    +
    +    return scalar;
    +  }
    +
    +
    +  static FT_Int64
    +  ft_mul_add_delta_scalar( FT_Int64  returnValue,
    +                           FT_Int32  delta,
    +                           FT_Int32  scalar )
    +  {
    +
    +#ifdef FT_INT64
    +
    +    return returnValue + (FT_Int64)delta * scalar;
    +
    +#else /* !FT_INT64 */
    +
    +    if ( (FT_UInt32)( delta + 0x8000 ) <= 0x20000 )
    +    {
    +      /* Fast path: multiplication result fits into 32 bits. */
    +
    +      FT_Int32  lo = delta * scalar;
    +
    +
    +      returnValue.lo += (FT_UInt32)lo;
    +
    +      if ( returnValue.lo < (FT_UInt32)lo )
    +        returnValue.hi += ( lo < 0 ) ? 0 : 1;
    +
    +      if ( lo < 0 )
    +        returnValue.hi -= 1;
    +
    +      return returnValue;
    +    }
    +    else
    +    {
    +      /* Slow path: full 32x32 -> 64-bit signed multiplication. */
    +
    +      FT_Int64 product;
    +
    +      /* Get absolute values. */
    +      FT_UInt32  a = ( delta < 0 ) ? -delta : delta;
    +      FT_UInt32  b = ( scalar < 0 ) ? -scalar : scalar;
    +
    +      /* Prepare unsigned multiplication. */
    +      FT_UInt32  a_lo = a & 0xFFFF;
    +      FT_UInt32  a_hi = a >> 16;
    +
    +      FT_UInt32  b_lo = b & 0xFFFF;
    +      FT_UInt32  b_hi = b >> 16;
    +
    +      /* Partial products. */
    +      FT_UInt32  p0 = a_lo * b_lo;
    +      FT_UInt32  p1 = a_lo * b_hi;
    +      FT_UInt32  p2 = a_hi * b_lo;
    +      FT_UInt32  p3 = a_hi * b_hi;
    +
    +      /* Combine: result = p3 << 32 + (p1 + p2) << 16 + p0 */
    +      FT_UInt32  mid       = p1 + p2;
    +      FT_UInt32  mid_carry = ( mid < p1 );
    +
    +      FT_UInt32  carry;
    +
    +
    +      product.lo = ( mid << 16 ) + ( p0 & 0xFFFF );
    +      carry      = ( product.lo < ( p0 & 0xFFFF ) ) ? 1 : 0;
    +      product.hi = p3 + ( mid >> 16 ) + mid_carry + carry;
    +
    +      /* If result should be negative, negate. */
    +      if ( ( delta < 0 ) ^ ( scalar < 0 ) )
    +      {
    +        product.lo = ~product.lo + 1;
    +        product.hi = ~product.hi + ( product.lo == 0 ? 1 : 0 );
    +      }
    +
    +      /* Add to `returnValue`. */
    +      returnValue.lo += product.lo;
    +      if ( returnValue.lo < product.lo )
    +        returnValue.hi++;
    +      returnValue.hi += product.hi;
    +
    +      return returnValue;
    +    }
    +
    +#endif /* !FT_INT64 */
    +
    +  }
    +
    +
    +  static FT_ItemVarDelta
    +  ft_round_and_shift16( FT_Int64  returnValue )
    +  {
    +
    +#ifdef FT_INT64
    +
    +    return (FT_ItemVarDelta)( returnValue + 0x8000L ) >> 16;
    +
    +#else /* !FT_INT64 */
    +
    +    FT_UInt hi = returnValue.hi;
    +    FT_UInt lo = returnValue.lo;
    +
    +    FT_UInt delta;
    +
    +
    +    /* Add 0x8000 to round. */
    +    lo += 0x8000;
    +    if ( lo < 0x8000 )  /* overflow occurred */
    +      hi += 1;
    +
    +    /* Shift right by 16 bits. */
    +    delta = ( hi << 16 ) | ( lo >> 16 );
    +
    +    return (FT_ItemVarDelta)delta;
    +
    +#endif /* !FT_INT64 */
    +
    +  }
    +
    +
       FT_LOCAL_DEF( FT_ItemVarDelta )
       tt_var_get_item_delta( FT_Face          face,        /* TT_Face */
                              GX_ItemVarStore  itemStore,
                              FT_UInt          outerIndex,
                              FT_UInt          innerIndex )
       {
    -    TT_Face    ttface = (TT_Face)face;
    -    FT_Stream  stream = FT_FACE_STREAM( face );
    -    FT_Memory  memory = stream->memory;
    -    FT_Error   error  = FT_Err_Ok;
    +    TT_Face  ttface = (TT_Face)face;
     
    -    GX_ItemVarData    varData;
    -    FT_ItemVarDelta*  deltaSet = NULL;
    -    FT_ItemVarDelta   deltaSetStack[16];
    +    GX_ItemVarData  varData;
     
    -    FT_Fixed*  scalars = NULL;
    -    FT_Fixed   scalarsStack[16];
    -
    -    FT_UInt          master, j;
    -    FT_ItemVarDelta  returnValue = 0;
    -    FT_UInt          per_region_size;
    -    FT_Byte*         bytes;
    +    FT_UInt   master;
    +    FT_Int64  returnValue = FT_INT64_ZERO;
    +    FT_UInt   shift_base  = 1;
    +    FT_UInt   per_region_size;
    +    FT_Byte*  bytes;
     
     
         if ( !ttface->blend || !ttface->blend->normalizedcoords )
    @@ -1011,113 +1204,63 @@
         if ( varData->regionIdxCount == 0 )
           return 0; /* Avoid "applying zero offset to null pointer". */
     
    -    if ( varData->regionIdxCount < 16 )
    -    {
    -      deltaSet = deltaSetStack;
    -      scalars  = scalarsStack;
    -    }
    -    else
    -    {
    -      if ( FT_QNEW_ARRAY( deltaSet, varData->regionIdxCount ) )
    -        goto Exit;
    -      if ( FT_QNEW_ARRAY( scalars, varData->regionIdxCount ) )
    -        goto Exit;
    -    }
    -
         /* Parse delta set.                                            */
         /*                                                             */
         /* Deltas are (word_delta_count + region_idx_count) bytes each */
         /* if `longWords` isn't set, and twice as much otherwise.      */
         per_region_size = varData->wordDeltaCount + varData->regionIdxCount;
         if ( varData->longWords )
    +    {
    +      shift_base       = 2;
           per_region_size *= 2;
    +    }
     
         bytes = varData->deltaSet + per_region_size * innerIndex;
     
    -    if ( varData->longWords )
    -    {
    -      for ( master = 0; master < varData->wordDeltaCount; master++ )
    -        deltaSet[master] = FT_NEXT_LONG( bytes );
    -      for ( ; master < varData->regionIdxCount; master++ )
    -        deltaSet[master] = FT_NEXT_SHORT( bytes );
    -    }
    -    else
    -    {
    -      for ( master = 0; master < varData->wordDeltaCount; master++ )
    -        deltaSet[master] = FT_NEXT_SHORT( bytes );
    -      for ( ; master < varData->regionIdxCount; master++ )
    -        deltaSet[master] = FT_NEXT_CHAR( bytes );
    -    }
    -
         /* outer loop steps through master designs to be blended */
         for ( master = 0; master < varData->regionIdxCount; master++ )
         {
    -      FT_Fixed  scalar      = 0x10000L;
    -      FT_UInt   regionIndex = varData->regionIndices[master];
    +      FT_UInt  regionIndex = varData->regionIndices[master];
     
           GX_AxisCoords  axis = itemStore->varRegionList[regionIndex].axisList;
     
    +      FT_Fixed  scalar = tt_calculate_scalar(
    +                           axis,
    +                           itemStore->axisCount,
    +                           ttface->blend->normalizedcoords );
     
    -      /* inner loop steps through axes in this region */
    -      for ( j = 0; j < itemStore->axisCount; j++, axis++ )
    +
    +      if ( scalar )
           {
    -        FT_Fixed  ncv = ttface->blend->normalizedcoords[j];
    +        FT_Int  delta;
     
     
    -        /* compute the scalar contribution of this axis */
    -        /* with peak of 0 used for invalid axes         */
    -        if ( axis->peakCoord == ncv ||
    -             axis->peakCoord == 0   )
    -          continue;
    -
    -        /* ignore this region if coords are out of range */
    -        else if ( ncv <= axis->startCoord ||
    -                  ncv >= axis->endCoord   )
    +        if ( varData->longWords )
             {
    -          scalar = 0;
    -          break;
    +          if ( master < varData->wordDeltaCount )
    +            delta = FT_NEXT_LONG( bytes );
    +          else
    +            delta = FT_NEXT_SHORT( bytes );
    +        }
    +        else
    +        {
    +          if ( master < varData->wordDeltaCount )
    +            delta = FT_NEXT_SHORT( bytes );
    +          else
    +            delta = FT_NEXT_CHAR( bytes );
             }
     
    -        /* cumulative product of all the axis scalars */
    -        else if ( ncv < axis->peakCoord )
    -          scalar = FT_MulDiv( scalar,
    -                              ncv - axis->startCoord,
    -                              axis->peakCoord - axis->startCoord );
    -        else   /* ncv > axis->peakCoord */
    -          scalar = FT_MulDiv( scalar,
    -                              axis->endCoord - ncv,
    -                              axis->endCoord - axis->peakCoord );
    -
    -      } /* per-axis loop */
    -
    -      scalars[master] = scalar;
    +        returnValue = ft_mul_add_delta_scalar( returnValue, delta, scalar );
    +      }
    +      else
    +      {
    +        /* Branch-free, yay. */
    +        bytes += shift_base << ( master < varData->wordDeltaCount );
    +      }
     
         } /* per-region loop */
     
    -
    -    /* Compute the scaled delta for this region.
    -     *
    -     * From: https://docs.microsoft.com/en-us/typography/opentype/spec/otvarcommonformats#item-variation-store-header-and-item-variation-data-subtables:
    -     *
    -     *   `Fixed` is a 32-bit (16.16) type and, in the general case, requires
    -     *   32-bit deltas.  As described above, the `DeltaSet` record can
    -     *   accommodate deltas that are, logically, either 16-bit or 32-bit.
    -     *   When scaled deltas are applied to `Fixed` values, the `Fixed` value
    -     *   is treated like a 32-bit integer.
    -     *
    -     * `FT_MulAddFix` internally uses 64-bit precision; it thus can handle
    -     * deltas ranging from small 8-bit to large 32-bit values that are
    -     * applied to 16.16 `FT_Fixed` / OpenType `Fixed` values.
    -     */
    -    returnValue = FT_MulAddFix( scalars, deltaSet, varData->regionIdxCount );
    -
    -  Exit:
    -    if ( scalars != scalarsStack )
    -      FT_FREE( scalars );
    -    if ( deltaSet != deltaSetStack )
    -      FT_FREE( deltaSet );
    -
    -    return returnValue;
    +    return ft_round_and_shift16( returnValue );
       }
     
     
    @@ -1643,6 +1786,7 @@
         GX_Blend      blend  = face->blend;
         FT_Error      error;
         FT_UInt       i, j;
    +    FT_Byte*      bytes;
         FT_ULong      table_len;
         FT_ULong      gvar_start;
         FT_ULong      offsetToData;
    @@ -1734,6 +1878,8 @@
         if ( FT_FRAME_ENTER( offsets_len ) )
           goto Exit;
     
    +    bytes = stream->cursor;
    +
         /* offsets (one more offset than glyphs, to mark size of last) */
         if ( FT_QNEW_ARRAY( blend->glyphoffsets, gvar_head.glyphCount + 1 ) )
           goto Fail2;
    @@ -1744,16 +1890,24 @@
           FT_ULong  max_offset = 0;
     
     
    +      if ( stream->limit - stream->cursor < gvar_head.glyphCount * 4 )
    +      {
    +        FT_TRACE2(( "ft_var_load_gvar:"
    +                    " glyph variation data offset not enough\n" ));
    +        error = FT_THROW( Invalid_Table );
    +        goto Fail;
    +      }
    +
           for ( i = 0; i <= gvar_head.glyphCount; i++ )
           {
    -        blend->glyphoffsets[i] = offsetToData + FT_GET_ULONG();
    +        blend->glyphoffsets[i] = offsetToData + FT_NEXT_ULONG( bytes );
     
             if ( max_offset <= blend->glyphoffsets[i] )
               max_offset = blend->glyphoffsets[i];
             else
             {
               FT_TRACE2(( "ft_var_load_gvar:"
    -                      " glyph variation data offset %d not monotonic\n",
    +                      " glyph variation data offset %u not monotonic\n",
                           i ));
               blend->glyphoffsets[i] = max_offset;
             }
    @@ -1762,7 +1916,7 @@
             if ( limit < blend->glyphoffsets[i] )
             {
               FT_TRACE2(( "ft_var_load_gvar:"
    -                      " glyph variation data offset %d out of range\n",
    +                      " glyph variation data offset %u out of range\n",
                           i ));
               blend->glyphoffsets[i] = limit;
             }
    @@ -1774,16 +1928,24 @@
           FT_ULong  max_offset = 0;
     
     
    +      if ( stream->limit - stream->cursor < gvar_head.glyphCount * 2 )
    +      {
    +        FT_TRACE2(( "ft_var_load_gvar:"
    +                    " glyph variation data offset not enough\n" ));
    +        error = FT_THROW( Invalid_Table );
    +        goto Fail;
    +      }
    +
           for ( i = 0; i <= gvar_head.glyphCount; i++ )
           {
    -        blend->glyphoffsets[i] = offsetToData + FT_GET_USHORT() * 2;
    +        blend->glyphoffsets[i] = offsetToData + FT_NEXT_USHORT( bytes ) * 2;
     
             if ( max_offset <= blend->glyphoffsets[i] )
               max_offset = blend->glyphoffsets[i];
             else
             {
               FT_TRACE2(( "ft_var_load_gvar:"
    -                      " glyph variation data offset %d not monotonic\n",
    +                      " glyph variation data offset %u not monotonic\n",
                           i ));
               blend->glyphoffsets[i] = max_offset;
             }
    @@ -1792,7 +1954,7 @@
             if ( limit < blend->glyphoffsets[i] )
             {
               FT_TRACE2(( "ft_var_load_gvar:"
    -                      " glyph variation data offset %d out of range\n",
    +                      " glyph variation data offset %u out of range\n",
                           i ));
               blend->glyphoffsets[i] = limit;
             }
    @@ -1814,6 +1976,8 @@
             goto Fail;
           }
     
    +      bytes = stream->cursor;
    +
           if ( FT_QNEW_ARRAY( blend->tuplecoords,
                               gvar_head.axisCount * gvar_head.globalCoordCount ) )
             goto Fail2;
    @@ -1824,13 +1988,17 @@
             for ( j = 0; j < (FT_UInt)gvar_head.axisCount; j++ )
             {
               blend->tuplecoords[i * gvar_head.axisCount + j] =
    -            FT_fdot14ToFixed( FT_GET_SHORT() );
    +            FT_fdot14ToFixed( FT_NEXT_SHORT( bytes ) );
               FT_TRACE5(( "%.5f ",
                 (double)blend->tuplecoords[i * gvar_head.axisCount + j] / 65536 ));
             }
             FT_TRACE5(( "]\n" ));
           }
     
    +      if ( FT_NEW_ARRAY( blend->tuplescalars,
    +                         gvar_head.globalCoordCount ) )
    +        goto Fail2;
    +
           blend->tuplecount = gvar_head.globalCoordCount;
     
           FT_TRACE5(( "\n" ));
    @@ -1896,15 +2064,25 @@
     
         for ( i = 0; i < blend->num_axis; i++ )
         {
    -      FT_Fixed  ncv = blend->normalizedcoords[i];
    +      FT_Fixed  ncv;
     
     
    -      FT_TRACE6(( "    axis %d coordinate %.5f:\n", i, (double)ncv / 65536 ));
    +      if ( tuple_coords[i] == 0 )
    +      {
    +        FT_TRACE6(( "      tuple coordinate is zero, ignore\n" ));
    +        continue;
    +      }
     
    -      /* It's not clear why (for intermediate tuples) we don't need     */
    -      /* to check against start/end -- the documentation says we don't. */
    -      /* Similarly, it's unclear why we don't need to scale along the   */
    -      /* axis.                                                          */
    +      ncv = blend->normalizedcoords[i];
    +
    +      FT_TRACE6(( "    axis %u coordinate %.5f:\n", i, (double)ncv / 65536 ));
    +
    +      if ( ncv == 0 )
    +      {
    +        FT_TRACE6(( "      axis coordinate is zero, stop\n" ));
    +        apply = 0;
    +        break;
    +      }
     
           if ( tuple_coords[i] == ncv )
           {
    @@ -1914,12 +2092,6 @@
             continue;
           }
     
    -      if ( tuple_coords[i] == 0 )
    -      {
    -        FT_TRACE6(( "      tuple coordinate is zero, ignore\n" ));
    -        continue;
    -      }
    -
           if ( !( tupleIndex & GX_TI_INTERMEDIATE_TUPLE ) )
           {
             /* not an intermediate tuple */
    @@ -2001,7 +2173,7 @@
         if ( num_coords > mmvar->num_axis )
         {
           FT_TRACE2(( "ft_var_to_normalized:"
    -                  " only using first %d of %d coordinates\n",
    +                  " only using first %u of %u coordinates\n",
                       mmvar->num_axis, num_coords ));
           num_coords = mmvar->num_axis;
         }
    @@ -2016,7 +2188,7 @@
           FT_Fixed  coord = coords[i];
     
     
    -      FT_TRACE5(( "    %d: %.5f\n", i, (double)coord / 65536 ));
    +      FT_TRACE5(( "    %u: %.5f\n", i, (double)coord / 65536 ));
           if ( coord > a->maximum || coord < a->minimum )
           {
             FT_TRACE1(( "ft_var_to_normalized: design coordinate %.5f\n",
    @@ -2156,7 +2328,7 @@
         if ( num_coords > blend->num_axis )
         {
           FT_TRACE2(( "ft_var_to_design:"
    -                  " only using first %d of %d coordinates\n",
    +                  " only using first %u of %u coordinates\n",
                       blend->num_axis, num_coords ));
           nc = blend->num_axis;
         }
    @@ -2516,7 +2688,7 @@
                           "    minimum     default     maximum   flags\n" ));
                        /* "  XXXX.XXXXX  XXXX.XXXXX  XXXX.XXXXX  0xXXXX" */
     
    -        FT_TRACE5(( "  %3d  `%s'"
    +        FT_TRACE5(( "  %3u  `%s'"
                         "  %10.5f  %10.5f  %10.5f  0x%04X%s\n",
                         i,
                         a->name,
    @@ -2608,7 +2780,7 @@
     
               (void)FT_STREAM_SEEK( pos );
     
    -          FT_TRACE5(( "  named instance %d (%s%s%s, %s%s%s)\n",
    +          FT_TRACE5(( "  named instance %u (%s%s%s, %s%s%s)\n",
                           i,
                           strname ? "name: `" : "",
                           strname ? strname : "unnamed",
    @@ -2636,7 +2808,7 @@
             FT_UInt  strid = ~0U;
     
     
    -        /* The default instance is missing in array the    */
    +        /* The default instance is missing in the array    */
             /* of named instances; try to synthesize an entry. */
             /* If this fails, `default_named_instance` remains */
             /* at value zero, which doesn't do any harm.       */
    @@ -2766,10 +2938,18 @@
         } manageCvt;
     
     
    -    face->doblend = FALSE;
    -
         if ( !face->blend )
         {
    +      face->doblend = FALSE;
    +      for ( i = 0; i < num_coords; i++ )
    +        if ( coords[i] )
    +        {
    +          face->doblend = TRUE;
    +          break;
    +        }
    +      if ( !face->doblend )
    +        goto Exit;
    +
           if ( FT_SET_ERROR( TT_Get_MM_Var( FT_FACE( face ), NULL ) ) )
             goto Exit;
         }
    @@ -2780,7 +2960,7 @@
         if ( num_coords > mmvar->num_axis )
         {
           FT_TRACE2(( "TT_Set_MM_Blend:"
    -                  " only using first %d of %d coordinates\n",
    +                  " only using first %u of %u coordinates\n",
                       mmvar->num_axis, num_coords ));
           num_coords = mmvar->num_axis;
         }
    @@ -2882,11 +3062,7 @@
     
           /* return value -1 indicates `no change' */
           if ( !have_diff )
    -      {
    -        face->doblend = TRUE;
    -
             return -1;
    -      }
     
           for ( ; i < mmvar->num_axis; i++ )
           {
    @@ -2915,7 +3091,15 @@
                             blend->normalizedcoords,
                             blend->coords );
     
    -    face->doblend = TRUE;
    +    face->doblend = FALSE;
    +    for ( i = 0; i < blend->num_axis; i++ )
    +    {
    +      if ( blend->normalizedcoords[i] )
    +      {
    +        face->doblend = TRUE;
    +        break;
    +      }
    +    }
     
         if ( face->cvt )
         {
    @@ -2941,6 +3125,9 @@
           }
         }
     
    +    for ( i = 0 ; i < blend->tuplecount ; i++ )
    +      blend->tuplescalars[i] = (FT_Fixed)-0x20000L;
    +
       Exit:
         return error;
       }
    @@ -2980,7 +3167,24 @@
                        FT_UInt    num_coords,
                        FT_Fixed*  coords )
       {
    -    return tt_set_mm_blend( (TT_Face)face, num_coords, coords, 1 );
    +    FT_Error  error;
    +
    +
    +    error = tt_set_mm_blend( (TT_Face)face, num_coords, coords, 1 );
    +    if ( error == FT_Err_Ok )
    +    {
    +      FT_UInt  i;
    +
    +
    +      for ( i = 0; i < num_coords; i++ )
    +        if ( coords[i] )
    +        {
    +          error = -2; /* -2 means is_variable. */
    +          break;
    +        }
    +    }
    +
    +    return error;
       }
     
     
    @@ -3043,7 +3247,7 @@
         if ( num_coords > blend->num_axis )
         {
           FT_TRACE2(( "TT_Get_MM_Blend:"
    -                  " only using first %d of %d coordinates\n",
    +                  " only using first %u of %u coordinates\n",
                       blend->num_axis, num_coords ));
           nc = blend->num_axis;
         }
    @@ -3125,7 +3329,7 @@
         if ( num_coords > mmvar->num_axis )
         {
           FT_TRACE2(( "TT_Set_Var_Design:"
    -                  " only using first %d of %d coordinates\n",
    +                  " only using first %u of %u coordinates\n",
                       mmvar->num_axis, num_coords ));
           num_coords = mmvar->num_axis;
         }
    @@ -3201,6 +3405,15 @@
         if ( error )
           goto Exit;
     
    +    for ( i = 0; i < num_coords; i++ )
    +    {
    +      if ( normalized[i] )
    +      {
    +        error = -2; /* -2 means is_variable. */
    +        break;
    +      }
    +    }
    +
       Exit:
         FT_FREE( normalized );
         return error;
    @@ -3237,10 +3450,12 @@
                          FT_UInt    num_coords,
                          FT_Fixed*  coords )
       {
    -    TT_Face   ttface = (TT_Face)face;
    -    FT_Error  error  = FT_Err_Ok;
    -    GX_Blend  blend;
    -    FT_UInt   i, nc;
    +    TT_Face       ttface = (TT_Face)face;
    +    FT_Error      error  = FT_Err_Ok;
    +    GX_Blend      blend;
    +    FT_MM_Var*    mmvar;
    +    FT_Var_Axis*  a;
    +    FT_UInt       i, nc;
     
     
         if ( !ttface->blend )
    @@ -3263,24 +3478,26 @@
         if ( num_coords > blend->num_axis )
         {
           FT_TRACE2(( "TT_Get_Var_Design:"
    -                  " only using first %d of %d coordinates\n",
    +                  " only using first %u of %u coordinates\n",
                       blend->num_axis, num_coords ));
           nc = blend->num_axis;
         }
     
    +    mmvar = blend->mmvar;
    +    a     = mmvar->axis;
         if ( ttface->doblend )
         {
    -      for ( i = 0; i < nc; i++ )
    +      for ( i = 0; i < nc; i++, a++ )
             coords[i] = blend->coords[i];
         }
         else
         {
    -      for ( i = 0; i < nc; i++ )
    -        coords[i] = 0;
    +      for ( i = 0; i < nc; i++, a++ )
    +        coords[i] = a->def;
         }
     
    -    for ( ; i < num_coords; i++ )
    -      coords[i] = 0;
    +    for ( ; i < num_coords; i++, a++ )
    +      coords[i] = a->def;
     
         return FT_Err_Ok;
       }
    @@ -3373,6 +3590,9 @@
           error = TT_Set_Var_Design( face, 0, NULL );
         }
     
    +    if ( error == -1 || error == -2 )
    +      error = FT_Err_Ok;
    +
       Exit:
         return error;
       }
    @@ -3591,7 +3811,7 @@
           FT_Stream_SeekSet( stream, here );
         }
     
    -    FT_TRACE5(( "cvar: there %s %d tuple%s:\n",
    +    FT_TRACE5(( "cvar: there %s %u tuple%s:\n",
                     ( tupleCount & GX_TC_TUPLE_COUNT_MASK ) == 1 ? "is" : "are",
                     tupleCount & GX_TC_TUPLE_COUNT_MASK,
                     ( tupleCount & GX_TC_TUPLE_COUNT_MASK ) == 1 ? "" : "s" ));
    @@ -3610,7 +3830,7 @@
           FT_Fixed  apply;
     
     
    -      FT_TRACE6(( "  tuple %d:\n", i ));
    +      FT_TRACE6(( "  tuple %u:\n", i ));
     
           tupleDataSize = FT_GET_USHORT();
           tupleIndex    = FT_GET_USHORT();
    @@ -3676,7 +3896,7 @@
           if ( !points || !deltas )
             ; /* failure, ignore it */
     
    -      else if ( localpoints == ALL_POINTS )
    +      else if ( points == ALL_POINTS )
           {
     #ifdef FT_DEBUG_LEVEL_TRACE
             int  count = 0;
    @@ -3697,7 +3917,7 @@
     #ifdef FT_DEBUG_LEVEL_TRACE
               if ( old_cvt_delta != cvt_deltas[j] )
               {
    -            FT_TRACE7(( "      %d: %f -> %f\n",
    +            FT_TRACE7(( "      %u: %f -> %f\n",
                             j,
                             (double)( FT_fdot6ToFixed( face->cvt[j] ) +
                                         old_cvt_delta ) / 65536,
    @@ -4027,7 +4247,7 @@
                                   FT_Outline*  outline,
                                   FT_Vector*   unrounded )
       {
    -    FT_Error   error;
    +    FT_Error   error       = FT_Err_Ok;
         TT_Face    face        = loader->face;
         FT_Stream  stream      = face->root.stream;
         FT_Memory  memory      = stream->memory;
    @@ -4047,6 +4267,15 @@
         FT_ULong  here;
         FT_UInt   i, j;
     
    +    FT_UInt   peak_coords_size;
    +    FT_UInt   point_deltas_x_size;
    +    FT_UInt   points_org_size;
    +    FT_UInt   points_out_size;
    +    FT_UInt   has_delta_size;
    +    FT_UInt   pool_size;
    +    FT_Byte*  pool = NULL;
    +    FT_Byte*  p;
    +
         FT_Fixed*  peak_coords = NULL;
         FT_Fixed*  tuple_coords;
         FT_Fixed*  im_start_coords;
    @@ -4067,21 +4296,24 @@
         FT_Fixed*  point_deltas_y = NULL;
     
     
    -    if ( !face->doblend || !blend )
    -      return FT_THROW( Invalid_Argument );
    -
         for ( i = 0; i < n_points; i++ )
         {
           unrounded[i].x = INT_TO_F26DOT6( outline->points[i].x );
           unrounded[i].y = INT_TO_F26DOT6( outline->points[i].y );
         }
     
    +    if ( !face->doblend  )
    +      goto Exit;
    +
    +    if ( !blend )
    +      return FT_THROW( Invalid_Argument );
    +
         if ( glyph_index >= blend->gv_glyphcnt      ||
              blend->glyphoffsets[glyph_index] ==
                blend->glyphoffsets[glyph_index + 1] )
         {
           FT_TRACE2(( "TT_Vary_Apply_Glyph_Deltas:"
    -                  " no variation data for glyph %d\n", glyph_index ));
    +                  " no variation data for glyph %u\n", glyph_index ));
           return FT_Err_Ok;
         }
     
    @@ -4125,18 +4357,41 @@
           FT_Stream_SeekSet( stream, here );
         }
     
    -    FT_TRACE5(( "gvar: there %s %d tuple%s:\n",
    +    FT_TRACE5(( "gvar: there %s %u tuple%s:\n",
                     ( tupleCount & GX_TC_TUPLE_COUNT_MASK ) == 1 ? "is" : "are",
                     tupleCount & GX_TC_TUPLE_COUNT_MASK,
                     ( tupleCount & GX_TC_TUPLE_COUNT_MASK ) == 1 ? "" : "s" ));
     
    -    if ( FT_QNEW_ARRAY( peak_coords, 3 * blend->num_axis ) ||
    -         FT_NEW_ARRAY( point_deltas_x, 2 * n_points )      ||
    -         FT_QNEW_ARRAY( points_org, n_points )             ||
    -         FT_QNEW_ARRAY( points_out, n_points )             ||
    -         FT_QNEW_ARRAY( has_delta, n_points )              )
    +    peak_coords_size    = ALIGN_SIZE( 3 * blend->num_axis *
    +                                      sizeof ( *peak_coords ) );
    +    point_deltas_x_size = ALIGN_SIZE( 2 * n_points *
    +                                      sizeof ( *point_deltas_x ) );
    +    points_org_size     = ALIGN_SIZE( n_points * sizeof ( *points_org ) );
    +    points_out_size     = ALIGN_SIZE( n_points * sizeof ( *points_out ) );
    +    has_delta_size      = ALIGN_SIZE( n_points * sizeof ( *has_delta ) );
    +
    +    pool_size = peak_coords_size    +
    +                point_deltas_x_size +
    +                points_org_size     +
    +                points_out_size     +
    +                has_delta_size;
    +
    +    if ( FT_ALLOC( pool, pool_size ) )
           goto Exit;
     
    +    p               = pool;
    +    peak_coords     = (FT_Fixed*)p;
    +    p              += peak_coords_size;
    +    point_deltas_x  = (FT_Fixed*)p;
    +    p              += point_deltas_x_size;
    +    points_org      = (FT_Vector*)p;
    +    p              += points_org_size;
    +    points_out      = (FT_Vector*)p;
    +    p              += points_out_size;
    +    has_delta       = (FT_Bool*)p;
    +
    +    FT_ARRAY_ZERO( point_deltas_x, 2 * n_points );
    +
         im_start_coords = peak_coords + blend->num_axis;
         im_end_coords   = im_start_coords + blend->num_axis;
         point_deltas_y  = point_deltas_x + n_points;
    @@ -4147,27 +4402,70 @@
           points_org[j].y = FT_intToFixed( outline->points[j].y );
         }
     
    -    for ( i = 0; i < ( tupleCount & GX_TC_TUPLE_COUNT_MASK ); i++ )
    +    p = stream->cursor;
    +
    +    tupleCount &= GX_TC_TUPLE_COUNT_MASK;
    +    for ( i = 0; i < tupleCount; i++ )
         {
    -      FT_UInt   tupleDataSize;
    -      FT_UInt   tupleIndex;
    -      FT_Fixed  apply;
    +      FT_UInt    tupleDataSize;
    +      FT_UInt    tupleIndex;
    +      FT_Fixed   apply;
    +      FT_Fixed*  tupleScalars;
     
     
    -      FT_TRACE6(( "  tuple %d:\n", i ));
    +      FT_TRACE6(( "  tuple %u:\n", i ));
     
    -      tupleDataSize = FT_GET_USHORT();
    -      tupleIndex    = FT_GET_USHORT();
    +      tupleScalars = blend->tuplescalars;
    +
    +      /* Enter frame for four bytes. */
    +      if ( 4 > stream->limit - p )
    +      {
    +        FT_TRACE2(( "TT_Vary_Apply_Glyph_Deltas:"
    +                    " invalid glyph variation array header\n" ));
    +        error = FT_THROW( Invalid_Table );
    +        goto Exit;
    +      }
    +
    +      tupleDataSize = FT_NEXT_USHORT( p );
    +      tupleIndex    = FT_NEXT_USHORT( p );
    +
    +      if ( tupleIndex & GX_TI_INTERMEDIATE_TUPLE )
    +        tupleScalars = NULL;
     
           if ( tupleIndex & GX_TI_EMBEDDED_TUPLE_COORD )
           {
    +        if ( 2 * blend->num_axis > (FT_UInt)( stream->limit - p ) )
    +        {
    +          FT_TRACE2(( "TT_Vary_Apply_Glyph_Deltas:"
    +                      " invalid glyph variation array header\n" ));
    +          error = FT_THROW( Invalid_Table );
    +          goto Exit;
    +        }
    +
             for ( j = 0; j < blend->num_axis; j++ )
    -          peak_coords[j] = FT_fdot14ToFixed( FT_GET_SHORT() );
    +          peak_coords[j] = FT_fdot14ToFixed( FT_NEXT_SHORT( p ) );
    +
             tuple_coords = peak_coords;
    +        tupleScalars = NULL;
           }
           else if ( ( tupleIndex & GX_TI_TUPLE_INDEX_MASK ) < blend->tuplecount )
    +      {
    +        FT_Fixed  scalar =
    +                    tupleScalars
    +                      ? tupleScalars[tupleIndex & GX_TI_TUPLE_INDEX_MASK]
    +                      : (FT_Fixed)-0x20000;
    +
    +
    +        if ( scalar != (FT_Fixed)-0x20000 )
    +        {
    +          apply = scalar;
    +          goto apply_found;
    +        }
    +
             tuple_coords = blend->tuplecoords +
    -            ( tupleIndex & GX_TI_TUPLE_INDEX_MASK ) * blend->num_axis;
    +                         ( tupleIndex & GX_TI_TUPLE_INDEX_MASK ) *
    +                         blend->num_axis;
    +      }
           else
           {
             FT_TRACE2(( "TT_Vary_Apply_Glyph_Deltas:"
    @@ -4179,10 +4477,18 @@
     
           if ( tupleIndex & GX_TI_INTERMEDIATE_TUPLE )
           {
    +        if ( 4 * blend->num_axis > (FT_UInt)( stream->limit - p ) )
    +        {
    +          FT_TRACE2(( "TT_Vary_Apply_Glyph_Deltas:"
    +                      " invalid glyph variation array header\n" ));
    +          error = FT_THROW( Invalid_Table );
    +          goto Exit;
    +        }
    +
             for ( j = 0; j < blend->num_axis; j++ )
    -          im_start_coords[j] = FT_fdot14ToFixed( FT_GET_SHORT() );
    +          im_start_coords[j] = FT_fdot14ToFixed( FT_NEXT_SHORT( p ) );
             for ( j = 0; j < blend->num_axis; j++ )
    -          im_end_coords[j] = FT_fdot14ToFixed( FT_GET_SHORT() );
    +          im_end_coords[j] = FT_fdot14ToFixed( FT_NEXT_SHORT( p ) );
           }
     
           apply = ft_var_apply_tuple( blend,
    @@ -4191,6 +4497,11 @@
                                       im_start_coords,
                                       im_end_coords );
     
    +      if ( tupleScalars )
    +        tupleScalars[tupleIndex & GX_TI_TUPLE_INDEX_MASK] = apply;
    +
    +    apply_found:
    +
           if ( apply == 0 )              /* tuple isn't active for our blend */
           {
             offsetToData += tupleDataSize;
    @@ -4247,7 +4558,7 @@
     #ifdef FT_DEBUG_LEVEL_TRACE
               if ( point_delta_x || point_delta_y )
               {
    -            FT_TRACE7(( "      %d: (%f, %f) -> (%f, %f)\n",
    +            FT_TRACE7(( "      %u: (%f, %f) -> (%f, %f)\n",
                             j,
                             (double)( FT_intToFixed( outline->points[j].x ) +
                                         old_point_delta_x ) / 65536,
    @@ -4321,7 +4632,7 @@
     #ifdef FT_DEBUG_LEVEL_TRACE
               if ( point_delta_x || point_delta_y )
               {
    -            FT_TRACE7(( "      %d: (%f, %f) -> (%f, %f)\n",
    +            FT_TRACE7(( "      %u: (%f, %f) -> (%f, %f)\n",
                             j,
                             (double)( FT_intToFixed( outline->points[j].x ) +
                                         old_point_delta_x ) / 65536,
    @@ -4402,11 +4713,7 @@
       Exit:
         if ( sharedpoints != ALL_POINTS )
           FT_FREE( sharedpoints );
    -    FT_FREE( points_org );
    -    FT_FREE( points_out );
    -    FT_FREE( has_delta );
    -    FT_FREE( peak_coords );
    -    FT_FREE( point_deltas_x );
    +    FT_FREE( pool );
     
       FExit:
         FT_FRAME_EXIT();
    @@ -4577,6 +4884,7 @@
             FT_FREE( blend->mvar_table );
           }
     
    +      FT_FREE( blend->tuplescalars );
           FT_FREE( blend->tuplecoords );
           FT_FREE( blend->glyphoffsets );
           FT_FREE( blend );
    diff --git a/src/java.desktop/share/native/libfreetype/src/truetype/ttgxvar.h b/src/java.desktop/share/native/libfreetype/src/truetype/ttgxvar.h
    index 9326011e3a2..568c8027bbf 100644
    --- a/src/java.desktop/share/native/libfreetype/src/truetype/ttgxvar.h
    +++ b/src/java.desktop/share/native/libfreetype/src/truetype/ttgxvar.h
    @@ -4,7 +4,7 @@
      *
      *   TrueType GX Font Variation loader (specification)
      *
    - * Copyright (C) 2004-2024 by
    + * Copyright (C) 2004-2025 by
      * David Turner, Robert Wilhelm, Werner Lemberg and George Williams.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -255,6 +255,10 @@ FT_BEGIN_HEADER
        *     A two-dimensional array that holds the shared tuple coordinates
        *     in the `gvar' table.
        *
    +   *   tuplescalars ::
    +   *     A one-dimensional array that holds the shared tuple
    +   *     scalars in the `gvar' table for current face coordinates.
    +   *
        *   gv_glyphcnt ::
        *     The number of glyphs handled in the `gvar' table.
        *
    @@ -293,6 +297,7 @@ FT_BEGIN_HEADER
     
         FT_UInt         tuplecount;
         FT_Fixed*       tuplecoords;      /* tuplecoords[tuplecount][num_axis] */
    +    FT_Fixed*       tuplescalars;     /* tuplescalars[tuplecount]          */
     
         FT_UInt         gv_glyphcnt;
         FT_ULong*       glyphoffsets;         /* glyphoffsets[gv_glyphcnt + 1] */
    diff --git a/src/java.desktop/share/native/libfreetype/src/truetype/ttinterp.c b/src/java.desktop/share/native/libfreetype/src/truetype/ttinterp.c
    index 951891dbf51..f46cc77fe5f 100644
    --- a/src/java.desktop/share/native/libfreetype/src/truetype/ttinterp.c
    +++ b/src/java.desktop/share/native/libfreetype/src/truetype/ttinterp.c
    @@ -4,7 +4,7 @@
      *
      *   TrueType bytecode interpreter (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -27,6 +27,8 @@
     #include 
     #include 
     
    +#ifdef TT_USE_BYTECODE_INTERPRETER
    +
     #include "ttinterp.h"
     #include "tterrors.h"
     #ifdef TT_CONFIG_OPTION_GX_VAR_SUPPORT
    @@ -34,9 +36,6 @@
     #endif
     
     
    -#ifdef TT_USE_BYTECODE_INTERPRETER
    -
    -
       /**************************************************************************
        *
        * The macro FT_COMPONENT is used in trace mode.  It is an implicit
    @@ -89,6 +88,32 @@
     #define FAILURE  1
     
     
    +  /* The default value for `scan_control' is documented as FALSE in the */
    +  /* TrueType specification.  This is confusing since it implies a      */
    +  /* Boolean value.  However, this is not the case, thus both the       */
    +  /* default values of our `scan_type' and `scan_control' fields (which */
    +  /* the documentation's `scan_control' variable is split into) are     */
    +  /* zero.                                                              */
    +  /*                                                                    */
    +  /* The rounding compensation should logically belong here but poorly  */
    +  /* described in the OpenType specs.  It was probably important in the */
    +  /* days of dot matrix printers.  The values are referenced by color   */
    +  /* as Gray, Black, and White in order. The Apple specification says   */
    +  /* that the Gray compensation is always zero.  The fourth value is    */
    +  /* not described at all, but Greg says that it is the same as Gray.   */
    +  /* FreeType sets all compensation values to zero.                     */
    +
    +  const TT_GraphicsState  tt_default_graphics_state =
    +  {
    +    0, 0, 0,  1, 1, 1,
    +    { 0x4000, 0 }, { 0x4000, 0 }, { 0x4000, 0 },
    +    1, 1, { 0, 0, 0, 0 },
    +
    +    64, 68, 0, 0, 9, 3,
    +    TRUE, 0, FALSE, 0
    +  };
    +
    +
       /**************************************************************************
        *
        *                       CODERANGE FUNCTIONS
    @@ -96,53 +121,6 @@
        */
     
     
    -  /**************************************************************************
    -   *
    -   * @Function:
    -   *   TT_Goto_CodeRange
    -   *
    -   * @Description:
    -   *   Switches to a new code range (updates the code related elements in
    -   *   `exec', and `IP').
    -   *
    -   * @Input:
    -   *   range ::
    -   *     The new execution code range.
    -   *
    -   *   IP ::
    -   *     The new IP in the new code range.
    -   *
    -   * @InOut:
    -   *   exec ::
    -   *     The target execution context.
    -   */
    -  FT_LOCAL_DEF( void )
    -  TT_Goto_CodeRange( TT_ExecContext  exec,
    -                     FT_Int          range,
    -                     FT_Long         IP )
    -  {
    -    TT_CodeRange*  coderange;
    -
    -
    -    FT_ASSERT( range >= 1 && range <= 3 );
    -
    -    coderange = &exec->codeRangeTable[range - 1];
    -
    -    FT_ASSERT( coderange->base );
    -
    -    /* NOTE: Because the last instruction of a program may be a CALL */
    -    /*       which will return to the first byte *after* the code    */
    -    /*       range, we test for IP <= Size instead of IP < Size.     */
    -    /*                                                               */
    -    FT_ASSERT( IP <= coderange->size );
    -
    -    exec->code     = coderange->base;
    -    exec->codeSize = coderange->size;
    -    exec->IP       = IP;
    -    exec->curRange = range;
    -  }
    -
    -
       /**************************************************************************
        *
        * @Function:
    @@ -168,13 +146,19 @@
       FT_LOCAL_DEF( void )
       TT_Set_CodeRange( TT_ExecContext  exec,
                         FT_Int          range,
    -                    void*           base,
    +                    FT_Byte*        base,
                         FT_Long         length )
       {
         FT_ASSERT( range >= 1 && range <= 3 );
     
    -    exec->codeRangeTable[range - 1].base = (FT_Byte*)base;
    +    exec->codeRangeTable[range - 1].base = base;
         exec->codeRangeTable[range - 1].size = length;
    +
    +    exec->code     = base;
    +    exec->codeSize = length;
    +    exec->IP       = 0;
    +    exec->curRange = range;
    +    exec->iniRange = range;
       }
     
     
    @@ -224,9 +208,6 @@
        *   exec ::
        *     A handle to the target execution context.
        *
    -   *   memory ::
    -   *     A handle to the parent memory object.
    -   *
        * @Note:
        *   Only the glyph loader and debugger should call this function.
        */
    @@ -240,10 +221,6 @@
         exec->maxPoints   = 0;
         exec->maxContours = 0;
     
    -    /* free stack */
    -    FT_FREE( exec->stack );
    -    exec->stackSize = 0;
    -
         /* free glyf cvt working area */
         FT_FREE( exec->glyfCvt );
         exec->glyfCvtSize = 0;
    @@ -295,79 +272,31 @@
        *
        *   Note that not all members of `TT_ExecContext` get initialized.
        */
    -  FT_LOCAL_DEF( FT_Error )
    +  FT_LOCAL_DEF( void )
       TT_Load_Context( TT_ExecContext  exec,
                        TT_Face         face,
                        TT_Size         size )
       {
    -    FT_Int          i;
    -    TT_MaxProfile*  maxp;
    -    FT_Error        error;
    -    FT_Memory       memory = exec->memory;
    +    FT_Memory  memory = exec->memory;
     
     
         exec->face = face;
    -    maxp       = &face->max_profile;
         exec->size = size;
     
    -    if ( size )
    -    {
    -      exec->numFDefs   = size->num_function_defs;
    -      exec->maxFDefs   = size->max_function_defs;
    -      exec->numIDefs   = size->num_instruction_defs;
    -      exec->maxIDefs   = size->max_instruction_defs;
    -      exec->FDefs      = size->function_defs;
    -      exec->IDefs      = size->instruction_defs;
    -      exec->pointSize  = size->point_size;
    -      exec->tt_metrics = size->ttmetrics;
    -      exec->metrics    = *size->metrics;
    -
    -      exec->maxFunc    = size->max_func;
    -      exec->maxIns     = size->max_ins;
    -
    -      for ( i = 0; i < TT_MAX_CODE_RANGES; i++ )
    -        exec->codeRangeTable[i] = size->codeRangeTable[i];
    -
    -      /* set graphics state */
    -      exec->GS = size->GS;
    -
    -      exec->cvtSize = size->cvt_size;
    -      exec->cvt     = size->cvt;
    -
    -      exec->storeSize = size->storage_size;
    -      exec->storage   = size->storage;
    -
    -      exec->twilight  = size->twilight;
    -
    -      /* In case of multi-threading it can happen that the old size object */
    -      /* no longer exists, thus we must clear all glyph zone references.   */
    -      FT_ZERO( &exec->zp0 );
    -      exec->zp1 = exec->zp0;
    -      exec->zp2 = exec->zp0;
    -    }
    -
    -    /* XXX: We reserve a little more elements on the stack to deal safely */
    -    /*      with broken fonts like arialbs, courbs, timesbs, etc.         */
    -    if ( FT_QRENEW_ARRAY( exec->stack,
    -                          exec->stackSize,
    -                          maxp->maxStackElements + 32 ) )
    -      return error;
    -    exec->stackSize = maxp->maxStackElements + 32;
    +    /* CVT and storage are not persistent in FreeType */
    +    /* reset them after they might have been modified */
    +    exec->storage = exec->stack   + exec->stackSize;
    +    exec->cvt     = exec->storage + exec->storeSize;
     
         /* free previous glyph code range */
         FT_FREE( exec->glyphIns );
         exec->glyphSize = 0;
     
    -    exec->pts.n_points   = 0;
    -    exec->pts.n_contours = 0;
    +    exec->pointSize  = size->point_size;
    +    exec->tt_metrics = size->ttmetrics;
    +    exec->metrics    = *size->metrics;
     
    -    exec->zp1 = exec->pts;
    -    exec->zp2 = exec->pts;
    -    exec->zp0 = exec->pts;
    -
    -    exec->instruction_trap = FALSE;
    -
    -    return FT_Err_Ok;
    +    exec->twilight   = size->twilight;
       }
     
     
    @@ -394,89 +323,22 @@
       TT_Save_Context( TT_ExecContext  exec,
                        TT_Size         size )
       {
    -    FT_Int  i;
    +    /* UNDOCUMENTED!                                            */
    +    /* Only these GS values can be modified by the CVT program. */
     
    -
    -    /* XXX: Will probably disappear soon with all the code range */
    -    /*      management, which is now rather obsolete.            */
    -    /*                                                           */
    -    size->num_function_defs    = exec->numFDefs;
    -    size->num_instruction_defs = exec->numIDefs;
    -
    -    size->max_func = exec->maxFunc;
    -    size->max_ins  = exec->maxIns;
    -
    -    for ( i = 0; i < TT_MAX_CODE_RANGES; i++ )
    -      size->codeRangeTable[i] = exec->codeRangeTable[i];
    +    size->GS.minimum_distance    = exec->GS.minimum_distance;
    +    size->GS.control_value_cutin = exec->GS.control_value_cutin;
    +    size->GS.single_width_cutin  = exec->GS.single_width_cutin;
    +    size->GS.single_width_value  = exec->GS.single_width_value;
    +    size->GS.delta_base          = exec->GS.delta_base;
    +    size->GS.delta_shift         = exec->GS.delta_shift;
    +    size->GS.auto_flip           = exec->GS.auto_flip;
    +    size->GS.instruct_control    = exec->GS.instruct_control;
    +    size->GS.scan_control        = exec->GS.scan_control;
    +    size->GS.scan_type           = exec->GS.scan_type;
       }
     
     
    -  /**************************************************************************
    -   *
    -   * @Function:
    -   *   TT_Run_Context
    -   *
    -   * @Description:
    -   *   Executes one or more instructions in the execution context.
    -   *
    -   * @Input:
    -   *   exec ::
    -   *     A handle to the target execution context.
    -   *
    -   * @Return:
    -   *   TrueType error code.  0 means success.
    -   */
    -  FT_LOCAL_DEF( FT_Error )
    -  TT_Run_Context( TT_ExecContext  exec )
    -  {
    -    TT_Goto_CodeRange( exec, tt_coderange_glyph, 0 );
    -
    -    exec->zp0 = exec->pts;
    -    exec->zp1 = exec->pts;
    -    exec->zp2 = exec->pts;
    -
    -    exec->GS.gep0 = 1;
    -    exec->GS.gep1 = 1;
    -    exec->GS.gep2 = 1;
    -
    -    exec->GS.projVector.x = 0x4000;
    -    exec->GS.projVector.y = 0x0000;
    -
    -    exec->GS.freeVector = exec->GS.projVector;
    -    exec->GS.dualVector = exec->GS.projVector;
    -
    -    exec->GS.round_state = 1;
    -    exec->GS.loop        = 1;
    -
    -    /* some glyphs leave something on the stack. so we clean it */
    -    /* before a new execution.                                  */
    -    exec->top     = 0;
    -    exec->callTop = 0;
    -
    -    return exec->face->interpreter( exec );
    -  }
    -
    -
    -  /* The default value for `scan_control' is documented as FALSE in the */
    -  /* TrueType specification.  This is confusing since it implies a      */
    -  /* Boolean value.  However, this is not the case, thus both the       */
    -  /* default values of our `scan_type' and `scan_control' fields (which */
    -  /* the documentation's `scan_control' variable is split into) are     */
    -  /* zero.                                                              */
    -
    -  const TT_GraphicsState  tt_default_graphics_state =
    -  {
    -    0, 0, 0,
    -    { 0x4000, 0 },
    -    { 0x4000, 0 },
    -    { 0x4000, 0 },
    -
    -    1, 64, 1,
    -    TRUE, 68, 0, 0, 9, 3,
    -    0, FALSE, 0, 1, 1, 1
    -  };
    -
    -
       /* documentation is in ttinterp.h */
     
       FT_EXPORT_DEF( TT_ExecContext )
    @@ -485,7 +347,8 @@
         FT_Memory  memory;
         FT_Error   error;
     
    -    TT_ExecContext  exec = NULL;
    +    TT_ExecContext     exec = NULL;
    +    FT_DebugHook_Func  interp;
     
     
         if ( !driver )
    @@ -497,6 +360,15 @@
         if ( FT_NEW( exec ) )
           goto Fail;
     
    +    /* set `exec->interpreter' according to the debug hook present, */
    +    /* which is used by 'ttdebug'.                                  */
    +    interp = driver->root.root.library->debug_hooks[FT_DEBUG_HOOK_TRUETYPE];
    +
    +    if ( interp )
    +      exec->interpreter = (TT_Interpreter)interp;
    +    else
    +      exec->interpreter = (TT_Interpreter)TT_RunIns;
    +
         /* create callStack here, other allocations delayed */
         exec->memory   = memory;
         exec->callSize = 32;
    @@ -1160,20 +1032,35 @@
     #undef PACK
     
     
    -#ifndef FT_CONFIG_OPTION_NO_ASSEMBLER
    +#ifdef FT_INT64
    +
    +#define TT_MulFix14( a, b )  TT_MulFix14_64( a, b )
    +
    +  static inline FT_F26Dot6
    +  TT_MulFix14_64( FT_F26Dot6  a,
    +                  FT_F2Dot14  b )
    +  {
    +    FT_Int64  ab = MUL_INT64( a, b );
    +
    +
    +    ab = ADD_INT64( ab, 0x2000 + ( ab >> 63 ) );  /* rounding phase */
    +
    +    return (FT_F26Dot6)( ab >> 14 );
    +  }
    +
    +#elif !defined( FT_CONFIG_OPTION_NO_ASSEMBLER )
     
     #if defined( __arm__ )                                 && \
         ( defined( __thumb2__ ) || !defined( __thumb__ ) )
     
     #define TT_MulFix14  TT_MulFix14_arm
     
    -  static FT_Int32
    +  static __inline FT_Int32
       TT_MulFix14_arm( FT_Int32  a,
    -                   FT_Int    b )
    +                   FT_Int32  b )
       {
         FT_Int32  t, t2;
     
    -
     #if defined( __CC_ARM ) || defined( __ARMCC__ )
     
         __asm
    @@ -1199,8 +1086,8 @@
     #endif
           "adds   %1, %1, %0\n\t"           /* %1 += %0 */
           "adc    %2, %2, #0\n\t"           /* %2 += carry */
    -      "mov    %0, %1, lsr #14\n\t"      /* %0  = %1 >> 16 */
    -      "orr    %0, %0, %2, lsl #18\n\t"  /* %0 |= %2 << 16 */
    +      "mov    %0, %1, lsr #14\n\t"      /* %0  = %1 >> 14 */
    +      "orr    %0, %0, %2, lsl #18\n\t"  /* %0 |= %2 << 18 */
           : "=r"(a), "=&r"(t2), "=&r"(t)
           : "r"(a), "r"(b)
           : "cc" );
    @@ -1210,49 +1097,60 @@
         return a;
       }
     
    -#endif /* __arm__ && ( __thumb2__ || !__thumb__ ) */
    +#elif defined( __i386__ ) || defined( _M_IX86 )
     
    -#endif /* !FT_CONFIG_OPTION_NO_ASSEMBLER */
    +#define TT_MulFix14  TT_MulFix14_i386
     
    +  /* documentation is in freetype.h */
     
    -#if defined( __GNUC__ )                              && \
    -    ( defined( __i386__ ) || defined( __x86_64__ ) )
    -
    -#define TT_MulFix14  TT_MulFix14_long_long
    -
    -  /* Temporarily disable the warning that C90 doesn't support `long long'. */
    -#if ( __GNUC__ * 100 + __GNUC_MINOR__ ) >= 406
    -#pragma GCC diagnostic push
    -#endif
    -#pragma GCC diagnostic ignored "-Wlong-long"
    -
    -  /* This is declared `noinline' because inlining the function results */
    -  /* in slower code.  The `pure' attribute indicates that the result   */
    -  /* only depends on the parameters.                                   */
    -  static __attribute__(( noinline ))
    -         __attribute__(( pure )) FT_Int32
    -  TT_MulFix14_long_long( FT_Int32  a,
    -                         FT_Int    b )
    +  static __inline FT_Int32
    +  TT_MulFixi14_i386( FT_Int32  a,
    +                     FT_Int32  b )
       {
    +    FT_Int32  result;
     
    -    long long  ret = (long long)a * b;
    +#if defined( __GNUC__ )
     
    -    /* The following line assumes that right shifting of signed values */
    -    /* will actually preserve the sign bit.  The exact behaviour is    */
    -    /* undefined, but this is true on x86 and x86_64.                  */
    -    long long  tmp = ret >> 63;
    +    __asm__ __volatile__ (
    +      "imul  %%edx\n"
    +      "movl  %%edx, %%ecx\n"
    +      "sarl  $31, %%ecx\n"
    +      "addl  $0x2000, %%ecx\n"
    +      "addl  %%ecx, %%eax\n"
    +      "adcl  $0, %%edx\n"
    +      "shrl  $14, %%eax\n"
    +      "shll  $18, %%edx\n"
    +      "addl  %%edx, %%eax\n"
    +      : "=a"(result), "=d"(b)
    +      : "a"(a), "d"(b)
    +      : "%ecx", "cc" );
     
    +#elif defined( _MSC_VER)
     
    -    ret += 0x2000 + tmp;
    +    __asm
    +    {
    +      mov eax, a
    +      mov edx, b
    +      imul edx
    +      mov ecx, edx
    +      sar ecx, 31
    +      add ecx, 2000h
    +      add eax, ecx
    +      adc edx, 0
    +      shr eax, 14
    +      shl edx, 18
    +      add eax, edx
    +      mov result, eax
    +    }
     
    -    return (FT_Int32)( ret >> 14 );
    +#endif
    +
    +    return result;
       }
     
    -#if ( __GNUC__ * 100 + __GNUC_MINOR__ ) >= 406
    -#pragma GCC diagnostic pop
    -#endif
    +#endif /* __i386__ || _M_IX86 */
     
    -#endif /* __GNUC__ && ( __i386__ || __x86_64__ ) */
    +#endif /* !FT_CONFIG_OPTION_NO_ASSEMBLER */
     
     
     #ifndef TT_MulFix14
    @@ -1262,92 +1160,59 @@
       /* for platforms where sizeof(int) == 2.                   */
       static FT_Int32
       TT_MulFix14( FT_Int32  a,
    -               FT_Int    b )
    +               FT_Int16  b )
       {
    -    FT_Int32   sign;
    -    FT_UInt32  ah, al, mid, lo, hi;
    +    FT_Int32   m, hi;
    +    FT_UInt32  l, lo;
     
     
    -    sign = a ^ b;
    +    /* compute a*b as 64-bit (hi_lo) value */
    +    l = (FT_UInt32)( ( a & 0xFFFFU ) * b );
    +    m = ( a >> 16 ) * b;
     
    -    if ( a < 0 )
    -      a = -a;
    -    if ( b < 0 )
    -      b = -b;
    +    lo = l + ( (FT_UInt32)m << 16 );
    +    hi = ( m >> 16 ) + ( (FT_Int32)l >> 31 ) + ( lo < l );
     
    -    ah = (FT_UInt32)( ( a >> 16 ) & 0xFFFFU );
    -    al = (FT_UInt32)( a & 0xFFFFU );
    +    /* divide the result by 2^14 with rounding */
    +    l   = lo + 0x2000U + (FT_UInt32)( hi >> 31 );  /* rounding phase */
    +    hi += ( l < lo );
     
    -    lo    = al * b;
    -    mid   = ah * b;
    -    hi    = mid >> 16;
    -    mid   = ( mid << 16 ) + ( 1 << 13 ); /* rounding */
    -    lo   += mid;
    -    if ( lo < mid )
    -      hi += 1;
    -
    -    mid = ( lo >> 14 ) | ( hi << 18 );
    -
    -    return sign >= 0 ? (FT_Int32)mid : -(FT_Int32)mid;
    +    return (FT_F26Dot6)( ( (FT_UInt32)hi << 18 ) | ( l >> 14 ) );
       }
     
     #endif  /* !TT_MulFix14 */
     
     
    -#if defined( __GNUC__ )        && \
    -    ( defined( __i386__ )   ||    \
    -      defined( __x86_64__ ) ||    \
    -      defined( __arm__ )    )
    -
    -#define TT_DotFix14  TT_DotFix14_long_long
    -
    -#if ( __GNUC__ * 100 + __GNUC_MINOR__ ) >= 406
    -#pragma GCC diagnostic push
    -#endif
    -#pragma GCC diagnostic ignored "-Wlong-long"
    -
    -  static __attribute__(( pure )) FT_Int32
    -  TT_DotFix14_long_long( FT_Int32  ax,
    -                         FT_Int32  ay,
    -                         FT_Int    bx,
    -                         FT_Int    by )
    -  {
    -    /* Temporarily disable the warning that C90 doesn't support */
    -    /* `long long'.                                             */
    -
    -    long long  temp1 = (long long)ax * bx;
    -    long long  temp2 = (long long)ay * by;
    -
    -
    -    temp1 += temp2;
    -    temp2  = temp1 >> 63;
    -    temp1 += 0x2000 + temp2;
    -
    -    return (FT_Int32)( temp1 >> 14 );
    -
    -  }
    -
    -#if ( __GNUC__ * 100 + __GNUC_MINOR__ ) >= 406
    -#pragma GCC diagnostic pop
    -#endif
    -
    -#endif /* __GNUC__ && (__arm__ || __i386__ || __x86_64__) */
    -
    -
    -#ifndef TT_DotFix14
    +#ifdef FT_INT64
     
       /* compute (ax*bx+ay*by)/2^14 with maximum accuracy and rounding */
    -  static FT_Int32
    -  TT_DotFix14( FT_Int32  ax,
    -               FT_Int32  ay,
    -               FT_Int    bx,
    -               FT_Int    by )
    +  static inline FT_F26Dot6
    +  TT_DotFix14( FT_F26Dot6  ax,
    +               FT_F26Dot6  ay,
    +               FT_F2Dot14  bx,
    +               FT_F2Dot14  by )
       {
    -    FT_Int32   m, s, hi1, hi2, hi;
    +    FT_Int64  c = ADD_INT64( MUL_INT64( ax, bx ), MUL_INT64( ay, by ) );
    +
    +
    +    c = ADD_INT64( c, 0x2000 + ( c >> 63 ) );  /* rounding phase */
    +
    +    return (FT_F26Dot6)( c >> 14 );
    +  }
    +
    +#else
    +
    +  static inline FT_F26Dot6
    +  TT_DotFix14( FT_F26Dot6  ax,
    +               FT_F26Dot6  ay,
    +               FT_F2Dot14  bx,
    +               FT_F2Dot14  by )
    +  {
    +    FT_Int32   m, hi1, hi2, hi;
         FT_UInt32  l, lo1, lo2, lo;
     
     
    -    /* compute ax*bx as 64-bit value */
    +    /* compute ax*bx as 64-bit (hi_lo) value */
         l = (FT_UInt32)( ( ax & 0xFFFFU ) * bx );
         m = ( ax >> 16 ) * bx;
     
    @@ -1366,18 +1231,13 @@
         hi = hi1 + hi2 + ( lo < lo1 );
     
         /* divide the result by 2^14 with rounding */
    -    s   = hi >> 31;
    -    l   = lo + (FT_UInt32)s;
    -    hi += s + ( l < lo );
    -    lo  = l;
    -
    -    l   = lo + 0x2000U;
    +    l   = lo + 0x2000U + (FT_UInt32)( hi >> 31 );  /* rounding phase */
         hi += ( l < lo );
     
    -    return (FT_Int32)( ( (FT_UInt32)hi << 18 ) | ( l >> 14 ) );
    +    return (FT_F26Dot6)( ( (FT_UInt32)hi << 18 ) | ( l >> 14 ) );
       }
     
    -#endif /* TT_DotFix14 */
    +#endif /* !FT_INT64 */
     
     
       /**************************************************************************
    @@ -1531,31 +1391,6 @@
       }
     
     
    -  /**************************************************************************
    -   *
    -   * @Function:
    -   *   GetShortIns
    -   *
    -   * @Description:
    -   *   Returns a short integer taken from the instruction stream at
    -   *   address IP.
    -   *
    -   * @Return:
    -   *   Short read at code[IP].
    -   *
    -   * @Note:
    -   *   This one could become a macro.
    -   */
    -  static FT_Short
    -  GetShortIns( TT_ExecContext  exc )
    -  {
    -    /* Reading a byte stream so there is no endianness (DaveP) */
    -    exc->IP += 2;
    -    return (FT_Short)( ( exc->code[exc->IP - 2] << 8 ) +
    -                         exc->code[exc->IP - 1]      );
    -  }
    -
    -
       /**************************************************************************
        *
        * @Function:
    @@ -1609,6 +1444,7 @@
         exc->code     = range->base;
         exc->codeSize = range->size;
         exc->IP       = aIP;
    +    exc->length   = 0;
         exc->curRange = aRange;
     
         return SUCCESS;
    @@ -1671,48 +1507,33 @@
                    FT_UShort       point,
                    FT_F26Dot6      distance )
       {
    -    FT_F26Dot6  v;
    +    FT_Fixed  v;
     
     
    -    v = exc->GS.freeVector.x;
    -
    +    v = exc->moveVector.x;
         if ( v != 0 )
         {
     #ifdef TT_SUPPORT_SUBPIXEL_HINTING_MINIMAL
           /* Exception to the post-IUP curfew: Allow the x component of */
           /* diagonal moves, but only post-IUP.  DejaVu tries to adjust */
           /* diagonal stems like on `Z' and `z' post-IUP.               */
    -      if ( SUBPIXEL_HINTING_MINIMAL && !exc->backward_compatibility )
    -        zone->cur[point].x = ADD_LONG( zone->cur[point].x,
    -                                       FT_MulDiv( distance,
    -                                                  v,
    -                                                  exc->F_dot_P ) );
    -      else
    +      if ( !exc->backward_compatibility )
     #endif
    -
    -      if ( NO_SUBPIXEL_HINTING )
             zone->cur[point].x = ADD_LONG( zone->cur[point].x,
    -                                       FT_MulDiv( distance,
    -                                                  v,
    -                                                  exc->F_dot_P ) );
    +                                       FT_MulFix( distance, v ) );
     
           zone->tags[point] |= FT_CURVE_TAG_TOUCH_X;
         }
     
    -    v = exc->GS.freeVector.y;
    -
    +    v = exc->moveVector.y;
         if ( v != 0 )
         {
     #ifdef TT_SUPPORT_SUBPIXEL_HINTING_MINIMAL
    -      if ( !( SUBPIXEL_HINTING_MINIMAL    &&
    -              exc->backward_compatibility &&
    -              exc->iupx_called            &&
    -              exc->iupy_called            ) )
    +      /* See `ttinterp.h' for details on backward compatibility mode. */
    +      if ( exc->backward_compatibility != 0x7 )
     #endif
             zone->cur[point].y = ADD_LONG( zone->cur[point].y,
    -                                       FT_MulDiv( distance,
    -                                                  v,
    -                                                  exc->F_dot_P ) );
    +                                       FT_MulFix( distance, v ) );
     
           zone->tags[point] |= FT_CURVE_TAG_TOUCH_Y;
         }
    @@ -1745,24 +1566,20 @@
                         FT_UShort       point,
                         FT_F26Dot6      distance )
       {
    -    FT_F26Dot6  v;
    +    FT_Fixed  v;
     
     
    -    v = exc->GS.freeVector.x;
    +    v = exc->moveVector.x;
     
         if ( v != 0 )
           zone->org[point].x = ADD_LONG( zone->org[point].x,
    -                                     FT_MulDiv( distance,
    -                                                v,
    -                                                exc->F_dot_P ) );
    +                                     FT_MulFix( distance, v ) );
     
    -    v = exc->GS.freeVector.y;
    +    v = exc->moveVector.y;
     
         if ( v != 0 )
           zone->org[point].y = ADD_LONG( zone->org[point].y,
    -                                     FT_MulDiv( distance,
    -                                                v,
    -                                                exc->F_dot_P ) );
    +                                     FT_MulFix( distance, v ) );
       }
     
     
    @@ -1784,12 +1601,8 @@
                      FT_F26Dot6      distance )
       {
     #ifdef TT_SUPPORT_SUBPIXEL_HINTING_MINIMAL
    -    if ( SUBPIXEL_HINTING_MINIMAL && !exc->backward_compatibility )
    -      zone->cur[point].x = ADD_LONG( zone->cur[point].x, distance );
    -    else
    +    if ( !exc->backward_compatibility )
     #endif
    -
    -    if ( NO_SUBPIXEL_HINTING )
           zone->cur[point].x = ADD_LONG( zone->cur[point].x, distance );
     
         zone->tags[point]  |= FT_CURVE_TAG_TOUCH_X;
    @@ -1805,9 +1618,8 @@
         FT_UNUSED( exc );
     
     #ifdef TT_SUPPORT_SUBPIXEL_HINTING_MINIMAL
    -    if ( !( SUBPIXEL_HINTING_MINIMAL             &&
    -            exc->backward_compatibility          &&
    -            exc->iupx_called && exc->iupy_called ) )
    +    /* See `ttinterp.h' for details on backward compatibility mode. */
    +    if ( exc->backward_compatibility != 0x7 )
     #endif
           zone->cur[point].y = ADD_LONG( zone->cur[point].y, distance );
     
    @@ -1860,8 +1672,8 @@
        *   distance ::
        *     The distance (not) to round.
        *
    -   *   color ::
    -   *     The engine compensation color.
    +   *   compensation ::
    +   *     The engine compensation.
        *
        * @Return:
        *   The compensated distance.
    @@ -1869,10 +1681,10 @@
       static FT_F26Dot6
       Round_None( TT_ExecContext  exc,
                   FT_F26Dot6      distance,
    -              FT_Int          color )
    +              FT_F26Dot6      compensation )
       {
    -    FT_F26Dot6  compensation = exc->tt_metrics.compensations[color];
         FT_F26Dot6  val;
    +    FT_UNUSED( exc );
     
     
         if ( distance >= 0 )
    @@ -1903,8 +1715,8 @@
        *   distance ::
        *     The distance to round.
        *
    -   *   color ::
    -   *     The engine compensation color.
    +   *   compensation ::
    +   *     The engine compensation.
        *
        * @Return:
        *   Rounded distance.
    @@ -1912,10 +1724,10 @@
       static FT_F26Dot6
       Round_To_Grid( TT_ExecContext  exc,
                      FT_F26Dot6      distance,
    -                 FT_Int          color )
    +                 FT_F26Dot6      compensation )
       {
    -    FT_F26Dot6  compensation = exc->tt_metrics.compensations[color];
         FT_F26Dot6  val;
    +    FT_UNUSED( exc );
     
     
         if ( distance >= 0 )
    @@ -1948,8 +1760,8 @@
        *   distance ::
        *     The distance to round.
        *
    -   *   color ::
    -   *     The engine compensation color.
    +   *   compensation ::
    +   *     The engine compensation.
        *
        * @Return:
        *   Rounded distance.
    @@ -1957,10 +1769,10 @@
       static FT_F26Dot6
       Round_To_Half_Grid( TT_ExecContext  exc,
                           FT_F26Dot6      distance,
    -                      FT_Int          color )
    +                      FT_F26Dot6      compensation )
       {
    -    FT_F26Dot6  compensation = exc->tt_metrics.compensations[color];
         FT_F26Dot6  val;
    +    FT_UNUSED( exc );
     
     
         if ( distance >= 0 )
    @@ -1995,8 +1807,8 @@
        *   distance ::
        *     The distance to round.
        *
    -   *   color ::
    -   *     The engine compensation color.
    +   *   compensation ::
    +   *     The engine compensation.
        *
        * @Return:
        *   Rounded distance.
    @@ -2004,10 +1816,10 @@
       static FT_F26Dot6
       Round_Down_To_Grid( TT_ExecContext  exc,
                           FT_F26Dot6      distance,
    -                      FT_Int          color )
    +                      FT_F26Dot6      compensation )
       {
    -    FT_F26Dot6  compensation = exc->tt_metrics.compensations[color];
         FT_F26Dot6  val;
    +    FT_UNUSED( exc );
     
     
         if ( distance >= 0 )
    @@ -2039,8 +1851,8 @@
        *   distance ::
        *     The distance to round.
        *
    -   *   color ::
    -   *     The engine compensation color.
    +   *   compensation ::
    +   *     The engine compensation.
        *
        * @Return:
        *   Rounded distance.
    @@ -2048,10 +1860,10 @@
       static FT_F26Dot6
       Round_Up_To_Grid( TT_ExecContext  exc,
                         FT_F26Dot6      distance,
    -                    FT_Int          color )
    +                    FT_F26Dot6      compensation )
       {
    -    FT_F26Dot6  compensation = exc->tt_metrics.compensations[color];
         FT_F26Dot6  val;
    +    FT_UNUSED( exc );
     
     
         if ( distance >= 0 )
    @@ -2084,8 +1896,8 @@
        *   distance ::
        *     The distance to round.
        *
    -   *   color ::
    -   *     The engine compensation color.
    +   *   compensation ::
    +   *     The engine compensation.
        *
        * @Return:
        *   Rounded distance.
    @@ -2093,10 +1905,10 @@
       static FT_F26Dot6
       Round_To_Double_Grid( TT_ExecContext  exc,
                             FT_F26Dot6      distance,
    -                        FT_Int          color )
    +                        FT_F26Dot6      compensation )
       {
    -    FT_F26Dot6  compensation = exc->tt_metrics.compensations[color];
         FT_F26Dot6  val;
    +    FT_UNUSED( exc );
     
     
         if ( distance >= 0 )
    @@ -2129,8 +1941,8 @@
        *   distance ::
        *     The distance to round.
        *
    -   *   color ::
    -   *     The engine compensation color.
    +   *   compensation ::
    +   *     The engine compensation.
        *
        * @Return:
        *   Rounded distance.
    @@ -2144,9 +1956,8 @@
       static FT_F26Dot6
       Round_Super( TT_ExecContext  exc,
                    FT_F26Dot6      distance,
    -               FT_Int          color )
    +               FT_F26Dot6      compensation )
       {
    -    FT_F26Dot6  compensation = exc->tt_metrics.compensations[color];
         FT_F26Dot6  val;
     
     
    @@ -2185,8 +1996,8 @@
        *   distance ::
        *     The distance to round.
        *
    -   *   color ::
    -   *     The engine compensation color.
    +   *   compensation ::
    +   *     The engine compensation.
        *
        * @Return:
        *   Rounded distance.
    @@ -2198,9 +2009,8 @@
       static FT_F26Dot6
       Round_Super_45( TT_ExecContext  exc,
                       FT_F26Dot6      distance,
    -                  FT_Int          color )
    +                  FT_F26Dot6      compensation )
       {
    -    FT_F26Dot6  compensation = exc->tt_metrics.compensations[color];
         FT_F26Dot6  val;
     
     
    @@ -2227,59 +2037,6 @@
       }
     
     
    -  /**************************************************************************
    -   *
    -   * @Function:
    -   *   Compute_Round
    -   *
    -   * @Description:
    -   *   Sets the rounding mode.
    -   *
    -   * @Input:
    -   *   round_mode ::
    -   *     The rounding mode to be used.
    -   */
    -  static void
    -  Compute_Round( TT_ExecContext  exc,
    -                 FT_Byte         round_mode )
    -  {
    -    switch ( round_mode )
    -    {
    -    case TT_Round_Off:
    -      exc->func_round = (TT_Round_Func)Round_None;
    -      break;
    -
    -    case TT_Round_To_Grid:
    -      exc->func_round = (TT_Round_Func)Round_To_Grid;
    -      break;
    -
    -    case TT_Round_Up_To_Grid:
    -      exc->func_round = (TT_Round_Func)Round_Up_To_Grid;
    -      break;
    -
    -    case TT_Round_Down_To_Grid:
    -      exc->func_round = (TT_Round_Func)Round_Down_To_Grid;
    -      break;
    -
    -    case TT_Round_To_Half_Grid:
    -      exc->func_round = (TT_Round_Func)Round_To_Half_Grid;
    -      break;
    -
    -    case TT_Round_To_Double_Grid:
    -      exc->func_round = (TT_Round_Func)Round_To_Double_Grid;
    -      break;
    -
    -    case TT_Round_Super:
    -      exc->func_round = (TT_Round_Func)Round_Super;
    -      break;
    -
    -    case TT_Round_Super_45:
    -      exc->func_round = (TT_Round_Func)Round_Super_45;
    -      break;
    -    }
    -  }
    -
    -
       /**************************************************************************
        *
        * @Function:
    @@ -2481,14 +2238,45 @@
       static void
       Compute_Funcs( TT_ExecContext  exc )
       {
    -    if ( exc->GS.freeVector.x == 0x4000 )
    -      exc->F_dot_P = exc->GS.projVector.x;
    -    else if ( exc->GS.freeVector.y == 0x4000 )
    -      exc->F_dot_P = exc->GS.projVector.y;
    +    FT_Long  F_dot_P =
    +             ( (FT_Long)exc->GS.projVector.x * exc->GS.freeVector.x +
    +               (FT_Long)exc->GS.projVector.y * exc->GS.freeVector.y +
    +               0x2000L ) >> 14;
    +
    +
    +    if ( F_dot_P >= 0x3FFEL )
    +    {
    +      /* commonly collinear */
    +      exc->moveVector.x = exc->GS.freeVector.x * 4;
    +      exc->moveVector.y = exc->GS.freeVector.y * 4;
    +    }
    +    else if ( -0x400L < F_dot_P && F_dot_P < 0x400L )
    +    {
    +      /* prohibitively orthogonal */
    +      exc->moveVector.x = 0;
    +      exc->moveVector.y = 0;
    +    }
         else
    -      exc->F_dot_P =
    -        ( (FT_Long)exc->GS.projVector.x * exc->GS.freeVector.x +
    -          (FT_Long)exc->GS.projVector.y * exc->GS.freeVector.y ) >> 14;
    +    {
    +      exc->moveVector.x = exc->GS.freeVector.x * 0x10000L / F_dot_P;
    +      exc->moveVector.y = exc->GS.freeVector.y * 0x10000L / F_dot_P;
    +    }
    +
    +    if ( F_dot_P >= 0x3FFEL && exc->GS.freeVector.x == 0x4000 )
    +    {
    +      exc->func_move      = (TT_Move_Func)Direct_Move_X;
    +      exc->func_move_orig = (TT_Move_Func)Direct_Move_Orig_X;
    +    }
    +    else if ( F_dot_P >= 0x3FFEL && exc->GS.freeVector.y == 0x4000 )
    +    {
    +      exc->func_move      = (TT_Move_Func)Direct_Move_Y;
    +      exc->func_move_orig = (TT_Move_Func)Direct_Move_Orig_Y;
    +    }
    +    else
    +    {
    +      exc->func_move      = (TT_Move_Func)Direct_Move;
    +      exc->func_move_orig = (TT_Move_Func)Direct_Move_Orig;
    +    }
     
         if ( exc->GS.projVector.x == 0x4000 )
           exc->func_project = (TT_Project_Func)Project_x;
    @@ -2504,29 +2292,6 @@
         else
           exc->func_dualproj = (TT_Project_Func)Dual_Project;
     
    -    exc->func_move      = (TT_Move_Func)Direct_Move;
    -    exc->func_move_orig = (TT_Move_Func)Direct_Move_Orig;
    -
    -    if ( exc->F_dot_P == 0x4000L )
    -    {
    -      if ( exc->GS.freeVector.x == 0x4000 )
    -      {
    -        exc->func_move      = (TT_Move_Func)Direct_Move_X;
    -        exc->func_move_orig = (TT_Move_Func)Direct_Move_Orig_X;
    -      }
    -      else if ( exc->GS.freeVector.y == 0x4000 )
    -      {
    -        exc->func_move      = (TT_Move_Func)Direct_Move_Y;
    -        exc->func_move_orig = (TT_Move_Func)Direct_Move_Orig_Y;
    -      }
    -    }
    -
    -    /* at small sizes, F_dot_P can become too small, resulting   */
    -    /* in overflows and `spikes' in a number of glyphs like `w'. */
    -
    -    if ( FT_ABS( exc->F_dot_P ) < 0x400L )
    -      exc->F_dot_P = 0x4000L;
    -
         /* Disable cached aspect ratio */
         exc->tt_metrics.ratio = 0;
       }
    @@ -2799,7 +2564,7 @@
       Ins_ODD( TT_ExecContext  exc,
                FT_Long*        args )
       {
    -    args[0] = ( ( exc->func_round( exc, args[0], 3 ) & 127 ) == 64 );
    +    args[0] = ( ( exc->func_round( exc, args[0], 0 ) & 64 ) == 64 );
       }
     
     
    @@ -2813,7 +2578,7 @@
       Ins_EVEN( TT_ExecContext  exc,
                 FT_Long*        args )
       {
    -    args[0] = ( ( exc->func_round( exc, args[0], 3 ) & 127 ) == 0 );
    +    args[0] = ( ( exc->func_round( exc, args[0], 0 ) & 64 ) == 0 );
       }
     
     
    @@ -3020,7 +2785,7 @@
             FT_MEM_QRENEW_ARRAY( exc->glyfStorage,
                                  exc->glyfStoreSize,
                                  exc->storeSize );
    -        exc->error  = error;
    +        exc->error = error;
             if ( error )
               return;
     
    @@ -3143,7 +2908,8 @@
       Ins_ROUND( TT_ExecContext  exc,
                  FT_Long*        args )
       {
    -    args[0] = exc->func_round( exc, args[0], exc->opcode & 3 );
    +    args[0] = exc->func_round( exc, args[0],
    +                               exc->GS.compensation[exc->opcode & 3] );
       }
     
     
    @@ -3157,7 +2923,8 @@
       Ins_NROUND( TT_ExecContext  exc,
                   FT_Long*        args )
       {
    -    args[0] = Round_None( exc, args[0], exc->opcode & 3 );
    +    args[0] = Round_None( exc, args[0],
    +                          exc->GS.compensation[exc->opcode & 3] );
       }
     
     
    @@ -3211,13 +2978,11 @@
         }
         else
         {
    -      K = exc->stack[exc->args - L];
    +      K = args[-L];
     
    -      FT_ARRAY_MOVE( &exc->stack[exc->args - L    ],
    -                     &exc->stack[exc->args - L + 1],
    -                     ( L - 1 ) );
    +      FT_ARRAY_MOVE( args - L, args - L + 1, L - 1 );
     
    -      exc->stack[exc->args - 1] = K;
    +      args[-1] = K;
         }
       }
     
    @@ -3244,7 +3009,7 @@
           args[0] = 0;
         }
         else
    -      args[0] = exc->stack[exc->args - L];
    +      args[0] = args[-L];
       }
     
     
    @@ -3314,8 +3079,7 @@
             exc->length = 2 - exc->length * exc->code[exc->IP + 1];
           }
     
    -      if ( exc->IP + exc->length <= exc->codeSize )
    -        return SUCCESS;
    +      return SUCCESS;
         }
     
       Fail_Overflow:
    @@ -3363,6 +3127,9 @@
             nIfs--;
             Out = FT_BOOL( nIfs == 0 );
             break;
    +
    +      default:
    +        break;
           }
         } while ( Out == 0 );
       }
    @@ -3396,6 +3163,9 @@
           case 0x59:    /* EIF */
             nIfs--;
             break;
    +
    +      default:
    +        break;
           }
         } while ( nIfs != 0 );
       }
    @@ -3439,7 +3209,7 @@
           return;
         }
     
    -    exc->step_ins = FALSE;
    +    exc->length = 0;
     
         if ( args[0] < 0 )
         {
    @@ -3540,10 +3310,10 @@
           return;
         }
     
    -    rec->range          = exc->curRange;
    -    rec->opc            = (FT_UInt16)n;
    -    rec->start          = exc->IP + 1;
    -    rec->active         = TRUE;
    +    rec->range  = exc->curRange;
    +    rec->opc    = (FT_UInt16)n;
    +    rec->start  = exc->IP + 1;
    +    rec->active = TRUE;
     
         if ( n > exc->maxFunc )
           exc->maxFunc = (FT_UInt16)n;
    @@ -3555,14 +3325,17 @@
         {
           switch ( exc->opcode )
           {
    -      case 0x89:    /* IDEF */
    -      case 0x2C:    /* FDEF */
    +      case 0x89:   /* IDEF */
    +      case 0x2C:   /* FDEF */
             exc->error = FT_THROW( Nested_DEFS );
             return;
     
           case 0x2D:   /* ENDF */
             rec->end = exc->IP;
             return;
    +
    +      default:
    +        break;
           }
         }
       }
    @@ -3592,12 +3365,11 @@
     
         pRec->Cur_Count--;
     
    -    exc->step_ins = FALSE;
    -
         if ( pRec->Cur_Count > 0 )
         {
           exc->callTop++;
    -      exc->IP = pRec->Def->start;
    +      exc->IP     = pRec->Def->start;
    +      exc->length = 0;
         }
         else
           /* Loop through the current function */
    @@ -3685,8 +3457,6 @@
     
         Ins_Goto_CodeRange( exc, def->range, def->start );
     
    -    exc->step_ins = FALSE;
    -
         return;
     
       Fail:
    @@ -3764,8 +3534,6 @@
     
           Ins_Goto_CodeRange( exc, def->range, def->start );
     
    -      exc->step_ins = FALSE;
    -
           exc->loopcall_counter += (FT_ULong)args[0];
           if ( exc->loopcall_counter > exc->loopcall_counter_max )
             exc->error = FT_THROW( Execution_Too_Long );
    @@ -3845,9 +3613,13 @@
           case 0x2C:   /* FDEF */
             exc->error = FT_THROW( Nested_DEFS );
             return;
    +
           case 0x2D:   /* ENDF */
             def->end = exc->IP;
             return;
    +
    +      default:
    +        break;
           }
         }
       }
    @@ -3870,10 +3642,23 @@
       Ins_NPUSHB( TT_ExecContext  exc,
                   FT_Long*        args )
       {
    -    FT_UShort  L, K;
    +    FT_Long  IP = exc->IP;
    +    FT_Int   L, K;
     
     
    -    L = (FT_UShort)exc->code[exc->IP + 1];
    +    if ( ++IP >= exc->codeSize )
    +    {
    +      exc->error = FT_THROW( Code_Overflow );
    +      return;
    +    }
    +
    +    L = exc->code[IP];
    +
    +    if ( IP + L >= exc->codeSize )
    +    {
    +      exc->error = FT_THROW( Code_Overflow );
    +      return;
    +    }
     
         if ( BOUNDS( L, exc->stackSize + 1 - exc->top ) )
         {
    @@ -3881,10 +3666,11 @@
           return;
         }
     
    -    for ( K = 1; K <= L; K++ )
    -      args[K - 1] = exc->code[exc->IP + K + 1];
    +    for ( K = 0; K < L; K++ )
    +      args[K] = exc->code[++IP];
     
         exc->new_top += L;
    +    exc->IP       = IP;
       }
     
     
    @@ -3898,10 +3684,23 @@
       Ins_NPUSHW( TT_ExecContext  exc,
                   FT_Long*        args )
       {
    -    FT_UShort  L, K;
    +    FT_Long  IP = exc->IP;
    +    FT_Int   L, K;
     
     
    -    L = (FT_UShort)exc->code[exc->IP + 1];
    +    if ( ++IP >= exc->codeSize )
    +    {
    +      exc->error = FT_THROW( Code_Overflow );
    +      return;
    +    }
    +
    +    L = exc->code[IP];
    +
    +    if ( IP + 2 * L >= exc->codeSize )
    +    {
    +      exc->error = FT_THROW( Code_Overflow );
    +      return;
    +    }
     
         if ( BOUNDS( L, exc->stackSize + 1 - exc->top ) )
         {
    @@ -3909,13 +3708,12 @@
           return;
         }
     
    -    exc->IP += 2;
    +    /* note casting for sign-extension */
    +    for ( K = 0; K < L; K++, IP += 2 )
    +      args[K] = (FT_Short)( exc->code[IP + 1] << 8 ) | exc->code[IP + 2];
     
    -    for ( K = 0; K < L; K++ )
    -      args[K] = GetShortIns( exc );
    -
    -    exc->step_ins = FALSE;
         exc->new_top += L;
    +    exc->IP       = IP;
       }
     
     
    @@ -3929,10 +3727,17 @@
       Ins_PUSHB( TT_ExecContext  exc,
                  FT_Long*        args )
       {
    -    FT_UShort  L, K;
    +    FT_Long  IP = exc->IP;
    +    FT_Int   L, K;
     
     
    -    L = (FT_UShort)( exc->opcode - 0xB0 + 1 );
    +    L = exc->opcode - 0xB0 + 1;
    +
    +    if ( IP + L >= exc->codeSize )
    +    {
    +      exc->error = FT_THROW( Code_Overflow );
    +      return;
    +    }
     
         if ( BOUNDS( L, exc->stackSize + 1 - exc->top ) )
         {
    @@ -3940,8 +3745,10 @@
           return;
         }
     
    -    for ( K = 1; K <= L; K++ )
    -      args[K - 1] = exc->code[exc->IP + K];
    +    for ( K = 0; K < L; K++ )
    +      args[K] = exc->code[++IP];
    +
    +    exc->IP = IP;
       }
     
     
    @@ -3955,10 +3762,17 @@
       Ins_PUSHW( TT_ExecContext  exc,
                  FT_Long*        args )
       {
    -    FT_UShort  L, K;
    +    FT_Long  IP = exc->IP;
    +    FT_Int   L, K;
     
     
    -    L = (FT_UShort)( exc->opcode - 0xB8 + 1 );
    +    L = exc->opcode - 0xB8 + 1;
    +
    +    if ( IP + 2 * L >= exc->codeSize )
    +    {
    +      exc->error = FT_THROW( Code_Overflow );
    +      return;
    +    }
     
         if ( BOUNDS( L, exc->stackSize + 1 - exc->top ) )
         {
    @@ -3966,12 +3780,11 @@
           return;
         }
     
    -    exc->IP++;
    +    /* note casting for sign-extension */
    +    for ( K = 0; K < L; K++, IP += 2 )
    +      args[K] = (FT_Short)( exc->code[IP + 1] << 8 ) | exc->code[IP + 2];
     
    -    for ( K = 0; K < L; K++ )
    -      args[K] = GetShortIns( exc );
    -
    -    exc->step_ins = FALSE;
    +    exc->IP = IP;
       }
     
     
    @@ -4142,15 +3955,12 @@
       Ins_SPVFS( TT_ExecContext  exc,
                  FT_Long*        args )
       {
    -    FT_Short  S;
         FT_Long   X, Y;
     
     
         /* Only use low 16bits, then sign extend */
    -    S = (FT_Short)args[1];
    -    Y = (FT_Long)S;
    -    S = (FT_Short)args[0];
    -    X = (FT_Long)S;
    +    Y = (FT_Short)args[1];
    +    X = (FT_Short)args[0];
     
         Normalize( X, Y, &exc->GS.projVector );
     
    @@ -4169,15 +3979,12 @@
       Ins_SFVFS( TT_ExecContext  exc,
                  FT_Long*        args )
       {
    -    FT_Short  S;
         FT_Long   X, Y;
     
     
         /* Only use low 16bits, then sign extend */
    -    S = (FT_Short)args[1];
    -    Y = (FT_Long)S;
    -    S = (FT_Short)args[0];
    -    X = S;
    +    Y = (FT_Short)args[1];
    +    X = (FT_Short)args[0];
     
         Normalize( X, Y, &exc->GS.freeVector );
         Compute_Funcs( exc );
    @@ -4915,7 +4722,7 @@
           /* compatibility hacks and lets them program points to the grid like */
           /* it's 1996.  They might sign a waiver for just one glyph, though.  */
           if ( SUBPIXEL_HINTING_MINIMAL )
    -        exc->backward_compatibility = !FT_BOOL( L == 4 );
    +        exc->backward_compatibility = ( L & 4 ) ^ 4;
     #endif
         }
         else if ( exc->pedantic_hinting )
    @@ -4999,32 +4806,31 @@
        * Stack:        uint32... -->
        */
       static void
    -  Ins_FLIPPT( TT_ExecContext  exc )
    +  Ins_FLIPPT( TT_ExecContext  exc,
    +              FT_Long*        args )
       {
    +    FT_Long    loop = exc->GS.loop;
         FT_UShort  point;
     
     
    -#ifdef TT_SUPPORT_SUBPIXEL_HINTING_MINIMAL
    -    /* See `ttinterp.h' for details on backward compatibility mode. */
    -    if ( SUBPIXEL_HINTING_MINIMAL    &&
    -         exc->backward_compatibility &&
    -         exc->iupx_called            &&
    -         exc->iupy_called            )
    -      goto Fail;
    -#endif
    -
    -    if ( exc->top < exc->GS.loop )
    +    if ( exc->new_top < loop )
         {
           if ( exc->pedantic_hinting )
             exc->error = FT_THROW( Too_Few_Arguments );
           goto Fail;
         }
     
    -    while ( exc->GS.loop > 0 )
    -    {
    -      exc->args--;
    +    exc->new_top -= loop;
     
    -      point = (FT_UShort)exc->stack[exc->args];
    +#ifdef TT_SUPPORT_SUBPIXEL_HINTING_MINIMAL
    +    /* See `ttinterp.h' for details on backward compatibility mode. */
    +    if ( exc->backward_compatibility == 0x7 )
    +      goto Fail;
    +#endif
    +
    +    while ( loop-- )
    +    {
    +      point = (FT_UShort)*(--args);
     
           if ( BOUNDS( point, exc->pts.n_points ) )
           {
    @@ -5036,13 +4842,10 @@
           }
           else
             exc->pts.tags[point] ^= FT_CURVE_TAG_ON;
    -
    -      exc->GS.loop--;
         }
     
       Fail:
         exc->GS.loop = 1;
    -    exc->new_top = exc->args;
       }
     
     
    @@ -5061,10 +4864,7 @@
     
     #ifdef TT_SUPPORT_SUBPIXEL_HINTING_MINIMAL
         /* See `ttinterp.h' for details on backward compatibility mode. */
    -    if ( SUBPIXEL_HINTING_MINIMAL    &&
    -         exc->backward_compatibility &&
    -         exc->iupx_called            &&
    -         exc->iupy_called            )
    +    if ( exc->backward_compatibility == 0x7 )
           return;
     #endif
     
    @@ -5099,10 +4899,7 @@
     
     #ifdef TT_SUPPORT_SUBPIXEL_HINTING_MINIMAL
         /* See `ttinterp.h' for details on backward compatibility mode. */
    -    if ( SUBPIXEL_HINTING_MINIMAL    &&
    -         exc->backward_compatibility &&
    -         exc->iupx_called            &&
    -         exc->iupy_called            )
    +    if ( exc->backward_compatibility == 0x7 )
           return;
     #endif
     
    @@ -5158,8 +4955,8 @@
     
         d = PROJECT( zp.cur + p, zp.org + p );
     
    -    *x = FT_MulDiv( d, (FT_Long)exc->GS.freeVector.x, exc->F_dot_P );
    -    *y = FT_MulDiv( d, (FT_Long)exc->GS.freeVector.y, exc->F_dot_P );
    +    *x = FT_MulFix( d, exc->moveVector.x );
    +    *y = FT_MulFix( d, exc->moveVector.y );
     
         return SUCCESS;
       }
    @@ -5176,8 +4973,8 @@
         if ( exc->GS.freeVector.x != 0 )
         {
     #ifdef TT_SUPPORT_SUBPIXEL_HINTING_MINIMAL
    -      if ( !( SUBPIXEL_HINTING_MINIMAL    &&
    -              exc->backward_compatibility ) )
    +      /* See `ttinterp.h' for details on backward compatibility mode. */
    +      if ( !exc->backward_compatibility )
     #endif
             exc->zp2.cur[point].x = ADD_LONG( exc->zp2.cur[point].x, dx );
     
    @@ -5188,10 +4985,8 @@
         if ( exc->GS.freeVector.y != 0 )
         {
     #ifdef TT_SUPPORT_SUBPIXEL_HINTING_MINIMAL
    -      if ( !( SUBPIXEL_HINTING_MINIMAL    &&
    -              exc->backward_compatibility &&
    -              exc->iupx_called            &&
    -              exc->iupy_called            ) )
    +      /* See `ttinterp.h' for details on backward compatibility mode. */
    +      if ( exc->backward_compatibility != 0x7 )
     #endif
             exc->zp2.cur[point].y = ADD_LONG( exc->zp2.cur[point].y, dy );
     
    @@ -5208,8 +5003,10 @@
        * Stack:        uint32... -->
        */
       static void
    -  Ins_SHP( TT_ExecContext  exc )
    +  Ins_SHP( TT_ExecContext  exc,
    +           FT_Long*        args )
       {
    +    FT_Long          loop = exc->GS.loop;
         TT_GlyphZoneRec  zp;
         FT_UShort        refp;
     
    @@ -5217,20 +5014,21 @@
         FT_UShort        point;
     
     
    -    if ( exc->top < exc->GS.loop )
    +    if ( exc->new_top < loop )
         {
           if ( exc->pedantic_hinting )
    -        exc->error = FT_THROW( Invalid_Reference );
    +        exc->error = FT_THROW( Too_Few_Arguments );
           goto Fail;
         }
     
    +    exc->new_top -= loop;
    +
         if ( Compute_Point_Displacement( exc, &dx, &dy, &zp, &refp ) )
           return;
     
    -    while ( exc->GS.loop > 0 )
    +    while ( loop-- )
         {
    -      exc->args--;
    -      point = (FT_UShort)exc->stack[exc->args];
    +      point = (FT_UShort)*(--args);
     
           if ( BOUNDS( point, exc->zp2.n_points ) )
           {
    @@ -5242,13 +5040,10 @@
           }
           else
             Move_Zp2_Point( exc, point, dx, dy, TRUE );
    -
    -      exc->GS.loop--;
         }
     
       Fail:
         exc->GS.loop = 1;
    -    exc->new_top = exc->args;
       }
     
     
    @@ -5364,6 +5159,7 @@
       Ins_SHPIX( TT_ExecContext  exc,
                  FT_Long*        args )
       {
    +    FT_Long     loop = exc->GS.loop;
         FT_F26Dot6  dx, dy;
         FT_UShort   point;
     #ifdef TT_SUPPORT_SUBPIXEL_HINTING_MINIMAL
    @@ -5373,22 +5169,21 @@
     #endif
     
     
    -
    -    if ( exc->top < exc->GS.loop + 1 )
    +    if ( exc->new_top < loop )
         {
           if ( exc->pedantic_hinting )
    -        exc->error = FT_THROW( Invalid_Reference );
    +        exc->error = FT_THROW( Too_Few_Arguments );
           goto Fail;
         }
     
    +    exc->new_top -= loop;
    +
         dx = TT_MulFix14( args[0], exc->GS.freeVector.x );
         dy = TT_MulFix14( args[0], exc->GS.freeVector.y );
     
    -    while ( exc->GS.loop > 0 )
    +    while ( loop-- )
         {
    -      exc->args--;
    -
    -      point = (FT_UShort)exc->stack[exc->args];
    +      point = (FT_UShort)*(--args);
     
           if ( BOUNDS( point, exc->zp2.n_points ) )
           {
    @@ -5400,8 +5195,7 @@
           }
           else
     #ifdef TT_SUPPORT_SUBPIXEL_HINTING_MINIMAL
    -      if ( SUBPIXEL_HINTING_MINIMAL    &&
    -           exc->backward_compatibility )
    +      if ( exc->backward_compatibility )
           {
             /* Special case: allow SHPIX to move points in the twilight zone.  */
             /* Otherwise, treat SHPIX the same as DELTAP.  Unbreaks various    */
    @@ -5409,7 +5203,7 @@
             /* that would glitch severely after calling ALIGNRP after a        */
             /* blocked SHPIX.                                                  */
             if ( in_twilight                                                ||
    -             ( !( exc->iupx_called && exc->iupy_called )              &&
    +             ( exc->backward_compatibility != 0x7                     &&
                    ( ( exc->is_composite && exc->GS.freeVector.y != 0 ) ||
                      ( exc->zp2.tags[point] & FT_CURVE_TAG_TOUCH_Y )    ) ) )
               Move_Zp2_Point( exc, point, 0, dy, TRUE );
    @@ -5417,13 +5211,10 @@
           else
     #endif
             Move_Zp2_Point( exc, point, dx, dy, TRUE );
    -
    -      exc->GS.loop--;
         }
     
       Fail:
         exc->GS.loop = 1;
    -    exc->new_top = exc->args;
       }
     
     
    @@ -5502,7 +5293,7 @@
         if ( ( exc->opcode & 1 ) != 0 )
         {
           cur_dist = FAST_PROJECT( &exc->zp0.cur[point] );
    -      distance = SUB_LONG( exc->func_round( exc, cur_dist, 3 ), cur_dist );
    +      distance = SUB_LONG( exc->func_round( exc, cur_dist, 0 ), cur_dist );
         }
         else
           distance = 0;
    @@ -5566,7 +5357,7 @@
         if ( exc->GS.gep0 == 0 )   /* If in twilight zone */
         {
           exc->zp0.org[point].x = TT_MulFix14( distance,
    -                                             exc->GS.freeVector.x );
    +                                           exc->GS.freeVector.x );
           exc->zp0.org[point].y = TT_MulFix14( distance,
                                                exc->GS.freeVector.y );
           exc->zp0.cur[point]   = exc->zp0.org[point];
    @@ -5587,7 +5378,7 @@
           if ( delta > control_value_cutin )
             distance = org_dist;
     
    -      distance = exc->func_round( exc, distance, 3 );
    +      distance = exc->func_round( exc, distance, 0 );
         }
     
         exc->func_move( exc, &exc->zp0, point, SUB_LONG( distance, org_dist ) );
    @@ -5609,7 +5400,7 @@
                 FT_Long*        args )
       {
         FT_UShort   point = 0;
    -    FT_F26Dot6  org_dist, distance;
    +    FT_F26Dot6  org_dist, distance, compensation;
     
     
         point = (FT_UShort)args[0];
    @@ -5664,11 +5455,11 @@
         /* single width cut-in test */
     
         /* |org_dist - single_width_value| < single_width_cutin */
    -    if ( exc->GS.single_width_cutin > 0          &&
    -         org_dist < exc->GS.single_width_value +
    -                      exc->GS.single_width_cutin &&
    -         org_dist > exc->GS.single_width_value -
    -                      exc->GS.single_width_cutin )
    +    if ( exc->GS.single_width_cutin > 0                    &&
    +         org_dist < ADD_LONG( exc->GS.single_width_value,
    +                              exc->GS.single_width_cutin ) &&
    +         org_dist > SUB_LONG( exc->GS.single_width_value,
    +                              exc->GS.single_width_cutin ) )
         {
           if ( org_dist >= 0 )
             org_dist = exc->GS.single_width_value;
    @@ -5678,12 +5469,12 @@
     
         /* round flag */
     
    +    compensation = exc->GS.compensation[exc->opcode & 3];
    +
         if ( ( exc->opcode & 4 ) != 0 )
    -    {
    -      distance = exc->func_round( exc, org_dist, exc->opcode & 3 );
    -    }
    +      distance = exc->func_round( exc, org_dist, compensation );
         else
    -      distance = Round_None( exc, org_dist, exc->opcode & 3 );
    +      distance = Round_None( exc, org_dist, compensation );
     
         /* minimum distance flag */
     
    @@ -5735,7 +5526,8 @@
         FT_F26Dot6  cvt_dist,
                     distance,
                     cur_dist,
    -                org_dist;
    +                org_dist,
    +                compensation;
     
         FT_F26Dot6  delta;
     
    @@ -5801,6 +5593,8 @@
     
         /* control value cut-in and round */
     
    +    compensation = exc->GS.compensation[exc->opcode & 3];
    +
         if ( ( exc->opcode & 4 ) != 0 )
         {
           /* XXX: UNDOCUMENTED!  Only perform cut-in test when both points */
    @@ -5831,16 +5625,16 @@
               cvt_dist = org_dist;
           }
     
    -      distance = exc->func_round( exc, cvt_dist, exc->opcode & 3 );
    +      distance = exc->func_round( exc, cvt_dist, compensation );
         }
         else
    -      distance = Round_None( exc, cvt_dist, exc->opcode & 3 );
    +      distance = Round_None( exc, cvt_dist, compensation );
     
         /* minimum distance test */
     
         if ( ( exc->opcode & 8 ) != 0 )
         {
    -      FT_F26Dot6  minimum_distance    = exc->GS.minimum_distance;
    +      FT_F26Dot6  minimum_distance = exc->GS.minimum_distance;
     
     
           if ( org_dist >= 0 )
    @@ -5862,11 +5656,10 @@
     
       Fail:
         exc->GS.rp1 = exc->GS.rp0;
    +    exc->GS.rp2 = point;
     
         if ( ( exc->opcode & 16 ) != 0 )
           exc->GS.rp0 = point;
    -
    -    exc->GS.rp2 = point;
       }
     
     
    @@ -5877,25 +5670,33 @@
        * Stack:        uint32 uint32... -->
        */
       static void
    -  Ins_ALIGNRP( TT_ExecContext  exc )
    +  Ins_ALIGNRP( TT_ExecContext  exc,
    +               FT_Long*        args )
       {
    +    FT_Long     loop = exc->GS.loop;
         FT_UShort   point;
         FT_F26Dot6  distance;
     
     
    -    if ( exc->top < exc->GS.loop                  ||
    -         BOUNDS( exc->GS.rp0, exc->zp0.n_points ) )
    +    if ( exc->new_top < loop )
    +    {
    +      if ( exc->pedantic_hinting )
    +        exc->error = FT_THROW( Too_Few_Arguments );
    +      goto Fail;
    +    }
    +
    +    exc->new_top -= loop;
    +
    +    if ( BOUNDS( exc->GS.rp0, exc->zp0.n_points ) )
         {
           if ( exc->pedantic_hinting )
             exc->error = FT_THROW( Invalid_Reference );
           goto Fail;
         }
     
    -    while ( exc->GS.loop > 0 )
    +    while ( loop-- )
         {
    -      exc->args--;
    -
    -      point = (FT_UShort)exc->stack[exc->args];
    +      point = (FT_UShort)*(--args);
     
           if ( BOUNDS( point, exc->zp1.n_points ) )
           {
    @@ -5912,13 +5713,10 @@
     
             exc->func_move( exc, &exc->zp1, point, NEG_LONG( distance ) );
           }
    -
    -      exc->GS.loop--;
         }
     
       Fail:
         exc->GS.loop = 1;
    -    exc->new_top = exc->args;
       }
     
     
    @@ -6060,15 +5858,26 @@
       /* SOMETIMES, DUMBER CODE IS BETTER CODE */
     
       static void
    -  Ins_IP( TT_ExecContext  exc )
    +  Ins_IP( TT_ExecContext  exc,
    +          FT_Long*        args )
       {
    +    FT_Long     loop = exc->GS.loop;
         FT_F26Dot6  old_range, cur_range;
         FT_Vector*  orus_base;
         FT_Vector*  cur_base;
         FT_Int      twilight;
     
     
    -    if ( exc->top < exc->GS.loop )
    +    if ( exc->new_top < loop )
    +    {
    +      if ( exc->pedantic_hinting )
    +        exc->error = FT_THROW( Too_Few_Arguments );
    +      goto Fail;
    +    }
    +
    +    exc->new_top -= loop;
    +
    +    if ( BOUNDS( exc->GS.rp1, exc->zp0.n_points ) )
         {
           if ( exc->pedantic_hinting )
             exc->error = FT_THROW( Invalid_Reference );
    @@ -6084,13 +5893,6 @@
                      exc->GS.gep1 == 0 ||
                      exc->GS.gep2 == 0 );
     
    -    if ( BOUNDS( exc->GS.rp1, exc->zp0.n_points ) )
    -    {
    -      if ( exc->pedantic_hinting )
    -        exc->error = FT_THROW( Invalid_Reference );
    -      goto Fail;
    -    }
    -
         if ( twilight )
           orus_base = &exc->zp0.org[exc->GS.rp1];
         else
    @@ -6102,8 +5904,7 @@
         /*      fonts out there (e.g. [aeu]grave in monotype.ttf)   */
         /*      calling IP[] with bad values of rp[12].             */
         /*      Do something sane when this odd thing happens.      */
    -    if ( BOUNDS( exc->GS.rp1, exc->zp0.n_points ) ||
    -         BOUNDS( exc->GS.rp2, exc->zp1.n_points ) )
    +    if ( BOUNDS( exc->GS.rp2, exc->zp1.n_points ) )
         {
           old_range = 0;
           cur_range = 0;
    @@ -6132,9 +5933,9 @@
           cur_range = PROJECT( &exc->zp1.cur[exc->GS.rp2], cur_base );
         }
     
    -    for ( ; exc->GS.loop > 0; exc->GS.loop-- )
    +    while ( loop-- )
         {
    -      FT_UInt     point = (FT_UInt)exc->stack[--exc->args];
    +      FT_UInt     point = (FT_UInt)*(--args);
           FT_F26Dot6  org_dist, cur_dist, new_dist;
     
     
    @@ -6206,7 +6007,6 @@
     
       Fail:
         exc->GS.loop = 1;
    -    exc->new_top = exc->args;
       }
     
     
    @@ -6405,17 +6205,10 @@
         /* See `ttinterp.h' for details on backward compatibility mode.  */
         /* Allow IUP until it has been called on both axes.  Immediately */
         /* return on subsequent ones.                                    */
    -    if ( SUBPIXEL_HINTING_MINIMAL    &&
    -         exc->backward_compatibility )
    -    {
    -      if ( exc->iupx_called && exc->iupy_called )
    -        return;
    -
    -      if ( exc->opcode & 1 )
    -        exc->iupx_called = TRUE;
    -      else
    -        exc->iupy_called = TRUE;
    -    }
    +    if ( exc->backward_compatibility == 0x7 )
    +      return;
    +    else if ( exc->backward_compatibility )
    +      exc->backward_compatibility |= 1 << ( exc->opcode & 1 );
     #endif
     
         /* ignore empty outlines */
    @@ -6507,30 +6300,50 @@
       Ins_DELTAP( TT_ExecContext  exc,
                   FT_Long*        args )
       {
    -    FT_ULong   nump, k;
    +    FT_Long    nump;
         FT_UShort  A;
    -    FT_ULong   C, P;
    -    FT_Long    B;
    +    FT_Long    B, P, F;
     
     
    -    P    = (FT_ULong)exc->func_cur_ppem( exc );
    -    nump = (FT_ULong)args[0];   /* some points theoretically may occur more
    -                                   than once, thus UShort isn't enough */
    +    nump = args[0];  /* signed value for convenience */
     
    -    for ( k = 1; k <= nump; k++ )
    +    if ( nump < 0 || nump > exc->new_top / 2 )
         {
    -      if ( exc->args < 2 )
    -      {
    -        if ( exc->pedantic_hinting )
    -          exc->error = FT_THROW( Too_Few_Arguments );
    -        exc->args = 0;
    -        goto Fail;
    -      }
    +      if ( exc->pedantic_hinting )
    +        exc->error = FT_THROW( Too_Few_Arguments );
     
    -      exc->args -= 2;
    +      nump = exc->new_top / 2;
    +    }
     
    -      A = (FT_UShort)exc->stack[exc->args + 1];
    -      B = exc->stack[exc->args];
    +    exc->new_top -= 2 * nump;
    +
    +    P = exc->func_cur_ppem( exc ) - exc->GS.delta_base;
    +
    +    switch ( exc->opcode )
    +    {
    +    case 0x5D:
    +      break;
    +
    +    case 0x71:
    +      P -= 16;
    +      break;
    +
    +    case 0x72:
    +      P -= 32;
    +      break;
    +    }
    +
    +    /* check applicable range of adjusted ppem */
    +    if ( P & ~0xF )         /* P < 0 || P > 15 */
    +      return;
    +
    +    P <<= 4;
    +    F   = 1L << ( 6 - exc->GS.delta_shift );
    +
    +    while ( nump-- )
    +    {
    +      A = (FT_UShort)*(--args);
    +      B = *(--args);
     
           /* XXX: Because some popular fonts contain some invalid DeltaP */
           /*      instructions, we simply ignore them when the stacked   */
    @@ -6538,41 +6351,28 @@
           /*      error.  As a delta instruction doesn't change a glyph  */
           /*      in great ways, this shouldn't be a problem.            */
     
    -      if ( !BOUNDS( A, exc->zp0.n_points ) )
    +      if ( BOUNDS( A, exc->zp0.n_points ) )
           {
    -        C = ( (FT_ULong)B & 0xF0 ) >> 4;
    -
    -        switch ( exc->opcode )
    +        if ( exc->pedantic_hinting )
             {
    -        case 0x5D:
    -          break;
    -
    -        case 0x71:
    -          C += 16;
    -          break;
    -
    -        case 0x72:
    -          C += 32;
    -          break;
    +          exc->error = FT_THROW( Invalid_Reference );
    +          return;
             }
    -
    -        C += exc->GS.delta_base;
    -
    -        if ( P == C )
    +      }
    +      else
    +      {
    +        if ( ( B & 0xF0 ) == P )
             {
    -          B = ( (FT_ULong)B & 0xF ) - 8;
    +          B = ( B & 0xF ) - 8;
               if ( B >= 0 )
                 B++;
    -          B *= 1L << ( 6 - exc->GS.delta_shift );
    -
    +          B *= F;
     
     #ifdef TT_SUPPORT_SUBPIXEL_HINTING_MINIMAL
    -          /* See `ttinterp.h' for details on backward compatibility */
    -          /* mode.                                                  */
    -          if ( SUBPIXEL_HINTING_MINIMAL    &&
    -               exc->backward_compatibility )
    +          /* See `ttinterp.h' for details on backward compatibility mode. */
    +          if ( exc->backward_compatibility )
               {
    -            if ( !( exc->iupx_called && exc->iupy_called )              &&
    +            if ( exc->backward_compatibility != 0x7                     &&
                      ( ( exc->is_composite && exc->GS.freeVector.y != 0 ) ||
                        ( exc->zp0.tags[A] & FT_CURVE_TAG_TOUCH_Y )        ) )
                   exc->func_move( exc, &exc->zp0, A, B );
    @@ -6582,13 +6382,7 @@
                 exc->func_move( exc, &exc->zp0, A, B );
             }
           }
    -      else
    -        if ( exc->pedantic_hinting )
    -          exc->error = FT_THROW( Invalid_Reference );
         }
    -
    -  Fail:
    -    exc->new_top = exc->args;
       }
     
     
    @@ -6602,28 +6396,50 @@
       Ins_DELTAC( TT_ExecContext  exc,
                   FT_Long*        args )
       {
    -    FT_ULong  nump, k;
    -    FT_ULong  A, C, P;
    -    FT_Long   B;
    +    FT_Long   nump;
    +    FT_ULong  A;
    +    FT_Long   B, P, F;
     
     
    -    P    = (FT_ULong)exc->func_cur_ppem( exc );
    -    nump = (FT_ULong)args[0];
    +    nump = args[0];  /* signed value for convenience */
     
    -    for ( k = 1; k <= nump; k++ )
    +    if ( nump < 0 || nump > exc->new_top / 2 )
         {
    -      if ( exc->args < 2 )
    -      {
    -        if ( exc->pedantic_hinting )
    -          exc->error = FT_THROW( Too_Few_Arguments );
    -        exc->args = 0;
    -        goto Fail;
    -      }
    +      if ( exc->pedantic_hinting )
    +        exc->error = FT_THROW( Too_Few_Arguments );
     
    -      exc->args -= 2;
    +      nump = exc->new_top / 2;
    +    }
     
    -      A = (FT_ULong)exc->stack[exc->args + 1];
    -      B = exc->stack[exc->args];
    +    exc->new_top -= 2 * nump;
    +
    +    P = exc->func_cur_ppem( exc ) - exc->GS.delta_base;
    +
    +    switch ( exc->opcode )
    +    {
    +    case 0x73:
    +      break;
    +
    +    case 0x74:
    +      P -= 16;
    +      break;
    +
    +    case 0x75:
    +      P -= 32;
    +      break;
    +    }
    +
    +    /* check applicable range of adjusted ppem */
    +    if ( P & ~0xF )         /* P < 0 || P > 15 */
    +      return;
    +
    +    P <<= 4;
    +    F   = 1L << ( 6 - exc->GS.delta_shift );
    +
    +    while ( nump-- )
    +    {
    +      A = (FT_ULong)*(--args);
    +      B = *(--args);
     
           if ( BOUNDSL( A, exc->cvtSize ) )
           {
    @@ -6635,38 +6451,17 @@
           }
           else
           {
    -        C = ( (FT_ULong)B & 0xF0 ) >> 4;
    -
    -        switch ( exc->opcode )
    +        if ( ( B & 0xF0 ) == P )
             {
    -        case 0x73:
    -          break;
    -
    -        case 0x74:
    -          C += 16;
    -          break;
    -
    -        case 0x75:
    -          C += 32;
    -          break;
    -        }
    -
    -        C += exc->GS.delta_base;
    -
    -        if ( P == C )
    -        {
    -          B = ( (FT_ULong)B & 0xF ) - 8;
    +          B = ( B & 0xF ) - 8;
               if ( B >= 0 )
                 B++;
    -          B *= 1L << ( 6 - exc->GS.delta_shift );
    +          B *= F;
     
               exc->func_move_cvt( exc, A, B );
             }
           }
         }
    -
    -  Fail:
    -    exc->new_top = exc->args;
       }
     
     
    @@ -6736,7 +6531,7 @@
         /* Otherwise, instructions may behave weirdly and rendering results */
         /* may differ between v35 and v40 mode, e.g., in `Times New Roman   */
         /* Bold Italic'. */
    -    if ( SUBPIXEL_HINTING_MINIMAL && exc->subpixel_hinting_lean )
    +    if ( SUBPIXEL_HINTING_MINIMAL && exc->mode != FT_RENDER_MODE_MONO )
         {
           /*********************************
            * HINTING FOR SUBPIXEL
    @@ -6753,7 +6548,7 @@
            * Selector Bit:  8
            * Return Bit(s): 15
            */
    -      if ( ( args[0] & 256 ) != 0 && exc->vertical_lcd_lean )
    +      if ( ( args[0] & 256 ) != 0 && exc->mode == FT_RENDER_MODE_LCD_V )
             K |= 1 << 15;
     
           /*********************************
    @@ -6774,7 +6569,7 @@
            * The only smoothing method FreeType supports unless someone sets
            * FT_LOAD_TARGET_MONO.
            */
    -      if ( ( args[0] & 2048 ) != 0 && exc->subpixel_hinting_lean )
    +      if ( ( args[0] & 2048 ) != 0 && exc->mode != FT_RENDER_MODE_MONO )
             K |= 1 << 18;
     
           /*********************************
    @@ -6786,7 +6581,10 @@
            * Grayscale rendering is what FreeType does anyway unless someone
            * sets FT_LOAD_TARGET_MONO or FT_LOAD_TARGET_LCD(_V)
            */
    -      if ( ( args[0] & 4096 ) != 0 && exc->grayscale_cleartype )
    +      if ( ( args[0] & 4096 ) != 0           &&
    +           exc->mode != FT_RENDER_MODE_MONO  &&
    +           exc->mode != FT_RENDER_MODE_LCD   &&
    +           exc->mode != FT_RENDER_MODE_LCD_V )
             K |= 1 << 19;
         }
     #endif
    @@ -6833,6 +6631,8 @@
           for ( i = 0; i < num_axes; i++ )
             args[i] = 0;
         }
    +
    +    exc->new_top += num_axes;
       }
     
     
    @@ -6883,7 +6683,6 @@
     
             Ins_Goto_CodeRange( exc, def->range, def->start );
     
    -        exc->step_ins = FALSE;
             return;
           }
         }
    @@ -6928,96 +6727,22 @@
       TT_RunIns( void*  exec )
       {
         TT_ExecContext  exc = (TT_ExecContext)exec;
    +    FT_ULong        ins_counter = 0;
     
    -    FT_ULong   ins_counter = 0;  /* executed instructions counter */
    -    FT_ULong   num_twilight_points;
    -    FT_UShort  i;
    -
    -
    -    /* We restrict the number of twilight points to a reasonable,     */
    -    /* heuristic value to avoid slow execution of malformed bytecode. */
    -    num_twilight_points = FT_MAX( 30,
    -                                  2 * ( exc->pts.n_points + exc->cvtSize ) );
    -    if ( exc->twilight.n_points > num_twilight_points )
    -    {
    -      if ( num_twilight_points > 0xFFFFU )
    -        num_twilight_points = 0xFFFFU;
    -
    -      FT_TRACE5(( "TT_RunIns: Resetting number of twilight points\n" ));
    -      FT_TRACE5(( "           from %d to the more reasonable value %ld\n",
    -                  exc->twilight.n_points,
    -                  num_twilight_points ));
    -      exc->twilight.n_points = (FT_UShort)num_twilight_points;
    -    }
    -
    -    /* Set up loop detectors.  We restrict the number of LOOPCALL loops */
    -    /* and the number of JMPR, JROT, and JROF calls with a negative     */
    -    /* argument to values that depend on various parameters like the    */
    -    /* size of the CVT table or the number of points in the current     */
    -    /* glyph (if applicable).                                           */
    -    /*                                                                  */
    -    /* The idea is that in real-world bytecode you either iterate over  */
    -    /* all CVT entries (in the `prep' table), or over all points (or    */
    -    /* contours, in the `glyf' table) of a glyph, and such iterations   */
    -    /* don't happen very often.                                         */
    -    exc->loopcall_counter = 0;
    -    exc->neg_jump_counter = 0;
    -
    -    /* The maximum values are heuristic. */
    -    if ( exc->pts.n_points )
    -      exc->loopcall_counter_max = FT_MAX( 50,
    -                                          10 * exc->pts.n_points ) +
    -                                  FT_MAX( 50,
    -                                          exc->cvtSize / 10 );
    -    else
    -      exc->loopcall_counter_max = 300 + 22 * exc->cvtSize;
    -
    -    /* as a protection against an unreasonable number of CVT entries  */
    -    /* we assume at most 100 control values per glyph for the counter */
    -    if ( exc->loopcall_counter_max >
    -         100 * (FT_ULong)exc->face->root.num_glyphs )
    -      exc->loopcall_counter_max = 100 * (FT_ULong)exc->face->root.num_glyphs;
    -
    -    FT_TRACE5(( "TT_RunIns: Limiting total number of loops in LOOPCALL"
    -                " to %ld\n", exc->loopcall_counter_max ));
    -
    -    exc->neg_jump_counter_max = exc->loopcall_counter_max;
    -    FT_TRACE5(( "TT_RunIns: Limiting total number of backward jumps"
    -                " to %ld\n", exc->neg_jump_counter_max ));
    -
    -    /* set PPEM and CVT functions */
    -    exc->tt_metrics.ratio = 0;
    -    if ( exc->metrics.x_ppem != exc->metrics.y_ppem )
    -    {
    -      /* non-square pixels, use the stretched routines */
    -      exc->func_cur_ppem  = Current_Ppem_Stretched;
    -      exc->func_read_cvt  = Read_CVT_Stretched;
    -      exc->func_write_cvt = Write_CVT_Stretched;
    -      exc->func_move_cvt  = Move_CVT_Stretched;
    -    }
    -    else
    -    {
    -      /* square pixels, use normal routines */
    -      exc->func_cur_ppem  = Current_Ppem;
    -      exc->func_read_cvt  = Read_CVT;
    -      exc->func_write_cvt = Write_CVT;
    -      exc->func_move_cvt  = Move_CVT;
    -    }
    -
    -    exc->iniRange    = exc->curRange;
    -
    -    Compute_Funcs( exc );
    -    Compute_Round( exc, (FT_Byte)exc->GS.round_state );
    -
    -    /* These flags cancel execution of some opcodes after IUP is called */
    -#ifdef TT_SUPPORT_SUBPIXEL_HINTING_MINIMAL
    -    exc->iupx_called = FALSE;
    -    exc->iupy_called = FALSE;
    -#endif
     
         do
         {
    +      /* increment instruction counter and check if we didn't */
    +      /* run this program for too long (e.g. infinite loops). */
    +      if ( ++ins_counter > TT_CONFIG_OPTION_MAX_RUNNABLE_OPCODES )
    +      {
    +        exc->error = FT_THROW( Execution_Too_Long );
    +        goto LErrorLabel_;
    +      }
    +
    +      exc->error  = FT_Err_Ok;
           exc->opcode = exc->code[exc->IP];
    +      exc->length = 1;
     
     #ifdef FT_DEBUG_LEVEL_TRACE
           if ( ft_trace_levels[trace_ttinterp] >= 6 )
    @@ -7041,17 +6766,6 @@
           }
     #endif /* FT_DEBUG_LEVEL_TRACE */
     
    -      if ( ( exc->length = opcode_length[exc->opcode] ) < 0 )
    -      {
    -        if ( exc->IP + 1 >= exc->codeSize )
    -          goto LErrorCodeOverflow_;
    -
    -        exc->length = 2 - exc->length * exc->code[exc->IP + 1];
    -      }
    -
    -      if ( exc->IP + exc->length > exc->codeSize )
    -        goto LErrorCodeOverflow_;
    -
           /* First, let's check for empty stack and overflow */
           exc->args = exc->top - ( Pop_Push_Count[exc->opcode] >> 4 );
     
    @@ -7059,6 +6773,9 @@
           /* One can also interpret it as the index of the last argument.    */
           if ( exc->args < 0 )
           {
    +        FT_UShort  i;
    +
    +
             if ( exc->pedantic_hinting )
             {
               exc->error = FT_THROW( Too_Few_Arguments );
    @@ -7071,21 +6788,7 @@
             exc->args = 0;
           }
     
    -#ifdef TT_CONFIG_OPTION_GX_VAR_SUPPORT
    -      if ( exc->opcode == 0x91 )
    -      {
    -        /* this is very special: GETVARIATION returns */
    -        /* a variable number of arguments             */
    -
    -        /* it is the job of the application to `activate' GX handling, */
    -        /* that is, calling any of the GX API functions on the current */
    -        /* font to select a variation instance                         */
    -        if ( exc->face->blend )
    -          exc->new_top = exc->args + exc->face->blend->num_axis;
    -      }
    -      else
    -#endif
    -        exc->new_top = exc->args + ( Pop_Push_Count[exc->opcode] & 15 );
    +      exc->new_top = exc->args + ( Pop_Push_Count[exc->opcode] & 15 );
     
           /* `new_top' is the new top of the stack, after the instruction's */
           /* execution.  `top' will be set to `new_top' after the `switch'  */
    @@ -7096,9 +6799,6 @@
             goto LErrorLabel_;
           }
     
    -      exc->step_ins = TRUE;
    -      exc->error    = FT_Err_Ok;
    -
           {
             FT_Long*  args   = exc->stack + exc->args;
             FT_Byte   opcode = exc->opcode;
    @@ -7281,7 +6981,7 @@
     
             case 0x32:  /* SHP */
             case 0x33:  /* SHP */
    -          Ins_SHP( exc );
    +          Ins_SHP( exc, args );
               break;
     
             case 0x34:  /* SHC */
    @@ -7299,7 +6999,7 @@
               break;
     
             case 0x39:  /* IP    */
    -          Ins_IP( exc );
    +          Ins_IP( exc, args );
               break;
     
             case 0x3A:  /* MSIRP */
    @@ -7308,7 +7008,7 @@
               break;
     
             case 0x3C:  /* AlignRP */
    -          Ins_ALIGNRP( exc );
    +          Ins_ALIGNRP( exc, args );
               break;
     
             case 0x3D:  /* RTDG */
    @@ -7544,7 +7244,7 @@
               break;
     
             case 0x80:  /* FLIPPT */
    -          Ins_FLIPPT( exc );
    +          Ins_FLIPPT( exc, args );
               break;
     
             case 0x81:  /* FLIPRGON */
    @@ -7642,13 +7342,13 @@
           {
             switch ( exc->error )
             {
    -          /* looking for redefined instructions */
             case FT_ERR( Invalid_Opcode ):
               {
                 TT_DefRecord*  def   = exc->IDefs;
                 TT_DefRecord*  limit = FT_OFFSET( def, exc->numIDefs );
     
     
    +            /* looking for redefined instructions */
                 for ( ; def < limit; def++ )
                 {
                   if ( def->active && exc->opcode == (FT_Byte)def->opc )
    @@ -7678,37 +7378,15 @@
                   }
                 }
               }
    -
    -          exc->error = FT_THROW( Invalid_Opcode );
    -          goto LErrorLabel_;
    -
    -#if 0
    -          break;   /* Unreachable code warning suppression.             */
    -                   /* Leave to remind in case a later change the editor */
    -                   /* to consider break;                                */
    -#endif
    +          FALL_THROUGH;
     
             default:
               goto LErrorLabel_;
    -
    -#if 0
    -        break;
    -#endif
             }
           }
     
           exc->top = exc->new_top;
    -
    -      if ( exc->step_ins )
    -        exc->IP += exc->length;
    -
    -      /* increment instruction counter and check if we didn't */
    -      /* run this program for too long (e.g. infinite loops). */
    -      if ( ++ins_counter > TT_CONFIG_OPTION_MAX_RUNNABLE_OPCODES )
    -      {
    -        exc->error = FT_THROW( Execution_Too_Long );
    -        goto LErrorLabel_;
    -      }
    +      exc->IP += exc->length;
     
         LSuiteLabel_:
           if ( exc->IP >= exc->codeSize )
    @@ -7724,15 +7402,12 @@
         } while ( !exc->instruction_trap );
     
       LNo_Error_:
    -    FT_TRACE4(( "  %ld instruction%s executed\n",
    +    FT_TRACE4(( "  %lu instruction%s executed\n",
                     ins_counter,
                     ins_counter == 1 ? "" : "s" ));
     
         return FT_Err_Ok;
     
    -  LErrorCodeOverflow_:
    -    exc->error = FT_THROW( Code_Overflow );
    -
       LErrorLabel_:
         if ( exc->error && !exc->instruction_trap )
           FT_TRACE1(( "  The interpreter returned error 0x%x\n", exc->error ));
    @@ -7740,6 +7415,126 @@
         return exc->error;
       }
     
    +
    +  /**************************************************************************
    +   *
    +   * @Function:
    +   *   TT_Run_Context
    +   *
    +   * @Description:
    +   *   Executes one or more instructions in the execution context.
    +   *
    +   * @Input:
    +   *   exec ::
    +   *     A handle to the target execution context.
    +   *
    +   * @Return:
    +   *   TrueType error code.  0 means success.
    +   */
    +  FT_LOCAL_DEF( FT_Error )
    +  TT_Run_Context( TT_ExecContext  exec,
    +                  TT_Size         size )
    +  {
    +    FT_ULong   num_twilight_points;
    +
    +
    +    exec->zp0 = exec->pts;
    +    exec->zp1 = exec->pts;
    +    exec->zp2 = exec->pts;
    +
    +    /* We restrict the number of twilight points to a reasonable,     */
    +    /* heuristic value to avoid slow execution of malformed bytecode. */
    +    /* The selected value is large enough to support fonts hinted     */
    +    /* with `ttfautohint`, which uses twilight points to store        */
    +    /* vertical coordinates of (auto-hinter) segments.                */
    +    num_twilight_points = FT_MAX( 30,
    +                                  2 * ( exec->pts.n_points + exec->cvtSize ) );
    +    if ( exec->twilight.n_points > num_twilight_points )
    +    {
    +      if ( num_twilight_points > 0xFFFFU )
    +        num_twilight_points = 0xFFFFU;
    +
    +      FT_TRACE5(( "TT_RunIns: Resetting number of twilight points\n" ));
    +      FT_TRACE5(( "           from %d to the more reasonable value %lu\n",
    +                  exec->twilight.n_points,
    +                  num_twilight_points ));
    +      exec->twilight.n_points = (FT_UShort)num_twilight_points;
    +    }
    +
    +    /* Set up loop detectors.  We restrict the number of LOOPCALL loops */
    +    /* and the number of JMPR, JROT, and JROF calls with a negative     */
    +    /* argument to values that depend on various parameters like the    */
    +    /* size of the CVT table or the number of points in the current     */
    +    /* glyph (if applicable).                                           */
    +    /*                                                                  */
    +    /* The idea is that in real-world bytecode you either iterate over  */
    +    /* all CVT entries (in the `prep' table), or over all points (or    */
    +    /* contours, in the `glyf' table) of a glyph, and such iterations   */
    +    /* don't happen very often.                                         */
    +    exec->loopcall_counter = 0;
    +    exec->neg_jump_counter = 0;
    +
    +    /* The maximum values are heuristic. */
    +    if ( exec->pts.n_points )
    +      exec->loopcall_counter_max = FT_MAX( 50,
    +                                           10 * exec->pts.n_points ) +
    +                                   FT_MAX( 50,
    +                                           exec->cvtSize / 10 );
    +    else
    +      exec->loopcall_counter_max = 300 + 22 * exec->cvtSize;
    +
    +    /* as a protection against an unreasonable number of CVT entries  */
    +    /* we assume at most 100 control values per glyph for the counter */
    +    if ( exec->loopcall_counter_max >
    +         100 * (FT_ULong)exec->face->root.num_glyphs )
    +      exec->loopcall_counter_max = 100 * (FT_ULong)exec->face->root.num_glyphs;
    +
    +    FT_TRACE5(( "TT_RunIns: Limiting total number of loops in LOOPCALL"
    +                " to %lu\n", exec->loopcall_counter_max ));
    +
    +    exec->neg_jump_counter_max = exec->loopcall_counter_max;
    +    FT_TRACE5(( "TT_RunIns: Limiting total number of backward jumps"
    +                " to %lu\n", exec->neg_jump_counter_max ));
    +
    +    /* set PPEM and CVT functions */
    +    if ( exec->metrics.x_ppem != exec->metrics.y_ppem )
    +    {
    +      /* non-square pixels, use the stretched routines */
    +      exec->func_cur_ppem  = Current_Ppem_Stretched;
    +      exec->func_read_cvt  = Read_CVT_Stretched;
    +      exec->func_write_cvt = Write_CVT_Stretched;
    +      exec->func_move_cvt  = Move_CVT_Stretched;
    +    }
    +    else
    +    {
    +      /* square pixels, use normal routines */
    +      exec->func_cur_ppem  = Current_Ppem;
    +      exec->func_read_cvt  = Read_CVT;
    +      exec->func_write_cvt = Write_CVT;
    +      exec->func_move_cvt  = Move_CVT;
    +    }
    +
    +    /* reset graphics state */
    +    exec->GS         = size->GS;
    +    exec->func_round = (TT_Round_Func)Round_To_Grid;
    +    Compute_Funcs( exec );
    +
    +#ifdef TT_SUPPORT_SUBPIXEL_HINTING_MINIMAL
    +    /* Reset IUP tracking bits in the backward compatibility mode. */
    +    /* See `ttinterp.h' for details.                               */
    +    exec->backward_compatibility &= ~0x3;
    +#endif
    +
    +    /* some glyphs leave something on the stack, */
    +    /* so we clean it before a new execution.    */
    +    exec->top     = 0;
    +    exec->callTop = 0;
    +
    +    exec->instruction_trap = FALSE;
    +
    +    return exec->interpreter( exec );
    +  }
    +
     #else /* !TT_USE_BYTECODE_INTERPRETER */
     
       /* ANSI C doesn't like empty source files */
    diff --git a/src/java.desktop/share/native/libfreetype/src/truetype/ttinterp.h b/src/java.desktop/share/native/libfreetype/src/truetype/ttinterp.h
    index 4f1a9bbc679..5cdc8f59f1a 100644
    --- a/src/java.desktop/share/native/libfreetype/src/truetype/ttinterp.h
    +++ b/src/java.desktop/share/native/libfreetype/src/truetype/ttinterp.h
    @@ -4,7 +4,7 @@
      *
      *   TrueType bytecode interpreter (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -39,6 +39,60 @@ FT_BEGIN_HEADER
     #define TT_Round_Super_45        7
     
     
    +  /**************************************************************************
    +   *
    +   * EXECUTION SUBTABLES
    +   *
    +   * These sub-tables relate to instruction execution.
    +   *
    +   */
    +
    +
    +#define TT_MAX_CODE_RANGES  3
    +
    +
    +  /**************************************************************************
    +   *
    +   * There can only be 3 active code ranges at once:
    +   *   - the Font Program
    +   *   - the CVT Program
    +   *   - a glyph's instructions set
    +   */
    +  typedef enum  TT_CodeRange_Tag_
    +  {
    +    tt_coderange_none = 0,
    +    tt_coderange_font,
    +    tt_coderange_cvt,
    +    tt_coderange_glyph
    +
    +  } TT_CodeRange_Tag;
    +
    +
    +  typedef struct  TT_CodeRange_
    +  {
    +    FT_Byte*  base;
    +    FT_Long   size;
    +
    +  } TT_CodeRange;
    +
    +  typedef TT_CodeRange  TT_CodeRangeTable[TT_MAX_CODE_RANGES];
    +
    +
    +  /**************************************************************************
    +   *
    +   * Defines a function/instruction definition record.
    +   */
    +  typedef struct  TT_DefRecord_
    +  {
    +    FT_Int    range;          /* in which code range is it located?     */
    +    FT_Long   start;          /* where does it start?                   */
    +    FT_Long   end;            /* where does it end?                     */
    +    FT_UInt   opc;            /* function #, or instruction code        */
    +    FT_Bool   active;         /* is it active?                          */
    +
    +  } TT_DefRecord, *TT_DefArray;
    +
    +
       /**************************************************************************
        *
        * Function types used by the interpreter, depending on various modes
    @@ -51,7 +105,7 @@ FT_BEGIN_HEADER
       typedef FT_F26Dot6
       (*TT_Round_Func)( TT_ExecContext  exc,
                         FT_F26Dot6      distance,
    -                    FT_Int          color );
    +                    FT_F26Dot6      compensation );
     
       /* Point displacement along the freedom vector routine */
       typedef void
    @@ -111,12 +165,13 @@ FT_BEGIN_HEADER
         TT_Face            face;       /* ! */
         TT_Size            size;       /* ! */
         FT_Memory          memory;
    +    TT_Interpreter     interpreter;
     
         /* instructions state */
     
         FT_Error           error;      /* last execution error */
     
    -    FT_Long            top;        /* @ top of exec. stack */
    +    FT_Long            top;        /* @! top of exec. stack */
     
         FT_Long            stackSize;  /* ! size of exec. stack */
         FT_Long*           stack;      /* ! current exec. stack */
    @@ -142,11 +197,9 @@ FT_BEGIN_HEADER
         FT_Long            IP;        /* current instruction pointer */
         FT_Long            codeSize;  /* size of current range       */
     
    -    FT_Byte            opcode;    /* current opcode              */
    -    FT_Int             length;    /* length of current opcode    */
    +    FT_Byte            opcode;    /* current opcode             */
    +    FT_Int             length;    /* opcode length or increment */
     
    -    FT_Bool            step_ins;  /* true if the interpreter must */
    -                                  /* increment IP after ins. exec */
         FT_ULong           cvtSize;   /* ! */
         FT_Long*           cvt;       /* ! */
         FT_ULong           glyfCvtSize;
    @@ -166,9 +219,9 @@ FT_BEGIN_HEADER
         FT_UInt            maxFunc;   /* ! maximum function index    */
         FT_UInt            maxIns;    /* ! maximum instruction index */
     
    -    FT_Int             callTop,    /* @ top of call stack during execution */
    -                       callSize;   /*   size of call stack                 */
    -    TT_CallStack       callStack;  /*   call stack                         */
    +    FT_Int             callTop,    /* @! top of call stack during execution */
    +                       callSize;   /*    size of call stack                 */
    +    TT_CallStack       callStack;  /*    call stack                         */
     
         FT_UShort          maxPoints;    /* capacity of this context's `pts' */
         FT_Short           maxContours;  /* record, expressed in points and  */
    @@ -189,16 +242,14 @@ FT_BEGIN_HEADER
         FT_Bool            instruction_trap; /* ! If `True', the interpreter   */
                                              /*   exits after each instruction */
     
    -    TT_GraphicsState   default_GS;       /* graphics state resulting from   */
    -                                         /* the prep program                */
         FT_Bool            is_composite;     /* true if the glyph is composite  */
         FT_Bool            pedantic_hinting; /* true if pedantic interpretation */
     
         /* latest interpreter additions */
     
    -    FT_Long            F_dot_P;    /* dot product of freedom and projection */
    -                                   /* vectors                               */
    -    TT_Round_Func      func_round; /* current rounding function             */
    +    TT_Round_Func      func_round;     /* current rounding function   */
    +
    +    FT_Vector          moveVector;     /* "projected" freedom vector  */
     
         TT_Project_Func    func_project,   /* current projection function */
                            func_dualproj,  /* current dual proj. function */
    @@ -327,34 +378,13 @@ FT_BEGIN_HEADER
          *
          */
     
    -    /* Using v40 implies subpixel hinting, unless FT_RENDER_MODE_MONO has been
    -     * requested.  Used to detect interpreter */
    -    /* version switches.  `_lean' to differentiate from the Infinality */
    -    /* `subpixel_hinting', which is managed differently.               */
    -    FT_Bool            subpixel_hinting_lean;
    +    /* Activate backward compatibility (bit 2) and track IUP (bits 0-1). */
    +    /* If this is zero, it means that the interpreter is either in v35   */
    +    /* or in native ClearType mode.                                      */
    +    FT_Int             backward_compatibility;
     
    -    /* Long side of a LCD subpixel is vertical (e.g., screen is rotated). */
    -    /* `_lean' to differentiate from the Infinality `vertical_lcd', which */
    -    /* is managed differently.                                            */
    -    FT_Bool            vertical_lcd_lean;
    +    FT_Render_Mode     mode;  /* target render mode */
     
    -    /* Default to backward compatibility mode in v40 interpreter.  If   */
    -    /* this is false, it implies the interpreter is in v35 or in native */
    -    /* ClearType mode.                                                  */
    -    FT_Bool            backward_compatibility;
    -
    -    /* Useful for detecting and denying post-IUP trickery that is usually */
    -    /* used to fix pixel patterns (`superhinting').                       */
    -    FT_Bool            iupx_called;
    -    FT_Bool            iupy_called;
    -
    -    /* ClearType hinting and grayscale rendering, as used by Universal */
    -    /* Windows Platform apps (Windows 8 and above).  Like the standard */
    -    /* colorful ClearType mode, it utilizes a vastly increased virtual */
    -    /* resolution on the x axis.  Different from bi-level hinting and  */
    -    /* grayscale rendering, the old mode from Win9x days that roughly  */
    -    /* adheres to the physical pixel grid on both axes.                */
    -    FT_Bool            grayscale_cleartype;
     #endif /* TT_SUPPORT_SUBPIXEL_HINTING_MINIMAL */
     
         /* We maintain two counters (in addition to the instruction counter) */
    @@ -371,22 +401,15 @@ FT_BEGIN_HEADER
       extern const TT_GraphicsState  tt_default_graphics_state;
     
     
    -#ifdef TT_USE_BYTECODE_INTERPRETER
    -  FT_LOCAL( void )
    -  TT_Goto_CodeRange( TT_ExecContext  exec,
    -                     FT_Int          range,
    -                     FT_Long         IP );
    -
       FT_LOCAL( void )
       TT_Set_CodeRange( TT_ExecContext  exec,
                         FT_Int          range,
    -                    void*           base,
    +                    FT_Byte*        base,
                         FT_Long         length );
     
       FT_LOCAL( void )
       TT_Clear_CodeRange( TT_ExecContext  exec,
                           FT_Int          range );
    -#endif /* TT_USE_BYTECODE_INTERPRETER */
     
     
       /**************************************************************************
    @@ -413,22 +436,21 @@ FT_BEGIN_HEADER
       TT_New_Context( TT_Driver  driver );
     
     
    -#ifdef TT_USE_BYTECODE_INTERPRETER
       FT_LOCAL( void )
       TT_Done_Context( TT_ExecContext  exec );
     
    -  FT_LOCAL( FT_Error )
    +  FT_LOCAL( void )
       TT_Load_Context( TT_ExecContext  exec,
                        TT_Face         face,
                        TT_Size         size );
     
       FT_LOCAL( void )
       TT_Save_Context( TT_ExecContext  exec,
    -                   TT_Size         ins );
    +                   TT_Size         size );
     
       FT_LOCAL( FT_Error )
    -  TT_Run_Context( TT_ExecContext  exec );
    -#endif /* TT_USE_BYTECODE_INTERPRETER */
    +  TT_Run_Context( TT_ExecContext  exec,
    +                  TT_Size         size );
     
     
       /**************************************************************************
    diff --git a/src/java.desktop/share/native/libfreetype/src/truetype/ttobjs.c b/src/java.desktop/share/native/libfreetype/src/truetype/ttobjs.c
    index d0ac3181204..2aedbd842c1 100644
    --- a/src/java.desktop/share/native/libfreetype/src/truetype/ttobjs.c
    +++ b/src/java.desktop/share/native/libfreetype/src/truetype/ttobjs.c
    @@ -4,7 +4,7 @@
      *
      *   Objects manager (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -67,23 +67,13 @@
        *     A pointer to the target glyph zone.
        */
       FT_LOCAL_DEF( void )
    -  tt_glyphzone_done( TT_GlyphZone  zone )
    +  tt_glyphzone_done( FT_Memory     memory,
    +                     TT_GlyphZone  zone )
       {
    -    FT_Memory  memory = zone->memory;
    +    FT_FREE( zone->org );
     
    -
    -    if ( memory )
    -    {
    -      FT_FREE( zone->contours );
    -      FT_FREE( zone->tags );
    -      FT_FREE( zone->cur );
    -      FT_FREE( zone->org );
    -      FT_FREE( zone->orus );
    -
    -      zone->max_points   = zone->n_points   = 0;
    -      zone->max_contours = zone->n_contours = 0;
    -      zone->memory       = NULL;
    -    }
    +    zone->n_points   = 0;
    +    zone->n_contours = 0;
       }
     
     
    @@ -119,23 +109,22 @@
                         TT_GlyphZone  zone )
       {
         FT_Error  error;
    +    FT_Long   size = 3 * maxPoints * sizeof ( FT_Vector ) +
    +                       maxContours * sizeof ( FT_UShort ) +
    +                         maxPoints * sizeof ( FT_Byte );
     
     
    -    FT_ZERO( zone );
    -    zone->memory = memory;
    -
    -    if ( FT_NEW_ARRAY( zone->org,      maxPoints   ) ||
    -         FT_NEW_ARRAY( zone->cur,      maxPoints   ) ||
    -         FT_NEW_ARRAY( zone->orus,     maxPoints   ) ||
    -         FT_NEW_ARRAY( zone->tags,     maxPoints   ) ||
    -         FT_NEW_ARRAY( zone->contours, maxContours ) )
    +    if ( !FT_ALLOC( zone->org, size ) )
         {
    -      tt_glyphzone_done( zone );
    -    }
    -    else
    -    {
    -      zone->max_points   = maxPoints;
    -      zone->max_contours = maxContours;
    +      zone->n_points   = maxPoints;
    +      zone->n_contours = maxContours;
    +
    +      zone->cur      =               zone->org      + maxPoints;
    +      zone->orus     =               zone->cur      + maxPoints;
    +      zone->contours = (FT_UShort*)( zone->orus     + maxPoints );
    +      zone->tags     =   (FT_Byte*)( zone->contours + maxContours );
    +
    +      zone->first_point = 0;
         }
     
         return error;
    @@ -488,8 +477,7 @@
         int        j, k;
     
     
    -    FT_MEM_SET( num_matched_ids, 0,
    -                sizeof ( int ) * TRICK_SFNT_IDS_NUM_FACES );
    +    FT_ARRAY_ZERO( num_matched_ids, TRICK_SFNT_IDS_NUM_FACES );
         has_cvt  = FALSE;
         has_fpgm = FALSE;
         has_prep = FALSE;
    @@ -787,7 +775,7 @@
           FT_UInt  instance_index = (FT_UInt)face_index >> 16;
     
     
    -      if ( FT_HAS_MULTIPLE_MASTERS( ttface ) )
    +      if ( instance_index && FT_HAS_MULTIPLE_MASTERS( ttface ) )
           {
             error = FT_Set_Named_Instance( ttface, instance_index );
             if ( error )
    @@ -885,59 +873,18 @@
        *   size ::
        *     A handle to the size object.
        *
    -   *   pedantic ::
    -   *     Set if bytecode execution should be pedantic.
    -   *
        * @Return:
        *   FreeType error code.  0 means success.
        */
       FT_LOCAL_DEF( FT_Error )
    -  tt_size_run_fpgm( TT_Size  size,
    -                    FT_Bool  pedantic )
    +  tt_size_run_fpgm( TT_Size  size )
       {
         TT_Face         face = (TT_Face)size->root.face;
    -    TT_ExecContext  exec;
    +    TT_ExecContext  exec = size->context;
         FT_Error        error;
     
     
    -    exec = size->context;
    -
    -    error = TT_Load_Context( exec, face, size );
    -    if ( error )
    -      return error;
    -
    -    exec->callTop = 0;
    -    exec->top     = 0;
    -
    -    exec->period    = 64;
    -    exec->phase     = 0;
    -    exec->threshold = 0;
    -
    -    exec->instruction_trap = FALSE;
    -    exec->F_dot_P          = 0x4000L;
    -
    -    exec->pedantic_hinting = pedantic;
    -
    -    {
    -      FT_Size_Metrics*  size_metrics = &exec->metrics;
    -      TT_Size_Metrics*  tt_metrics   = &exec->tt_metrics;
    -
    -
    -      size_metrics->x_ppem   = 0;
    -      size_metrics->y_ppem   = 0;
    -      size_metrics->x_scale  = 0;
    -      size_metrics->y_scale  = 0;
    -
    -      tt_metrics->ppem  = 0;
    -      tt_metrics->scale = 0;
    -      tt_metrics->ratio = 0x10000L;
    -    }
    -
    -    /* allow font program execution */
    -    TT_Set_CodeRange( exec,
    -                      tt_coderange_font,
    -                      face->font_program,
    -                      (FT_Long)face->font_program_size );
    +    TT_Load_Context( exec, face, size );
     
         /* disable CVT and glyph programs coderange */
         TT_Clear_CodeRange( exec, tt_coderange_cvt );
    @@ -945,15 +892,19 @@
     
         if ( face->font_program_size > 0 )
         {
    -      TT_Goto_CodeRange( exec, tt_coderange_font, 0 );
    +      /* allow font program execution */
    +      TT_Set_CodeRange( exec,
    +                        tt_coderange_font,
    +                        face->font_program,
    +                        (FT_Long)face->font_program_size );
    +
    +      exec->pts.n_points   = 0;
    +      exec->pts.n_contours = 0;
     
           FT_TRACE4(( "Executing `fpgm' table.\n" ));
    -      error = face->interpreter( exec );
    -#ifdef FT_DEBUG_LEVEL_TRACE
    -      if ( error )
    -        FT_TRACE4(( "  interpretation failed with error code 0x%x\n",
    -                    error ));
    -#endif
    +      error = TT_Run_Context( exec, size );
    +      FT_TRACE4(( error ? "  failed (error code 0x%x)\n" : "",
    +                  error ));
         }
         else
           error = FT_Err_Ok;
    @@ -979,212 +930,146 @@
        *   size ::
        *     A handle to the size object.
        *
    -   *   pedantic ::
    -   *     Set if bytecode execution should be pedantic.
    -   *
        * @Return:
        *   FreeType error code.  0 means success.
        */
       FT_LOCAL_DEF( FT_Error )
    -  tt_size_run_prep( TT_Size  size,
    -                    FT_Bool  pedantic )
    +  tt_size_run_prep( TT_Size  size )
       {
         TT_Face         face = (TT_Face)size->root.face;
    -    TT_ExecContext  exec;
    +    TT_ExecContext  exec = size->context;
         FT_Error        error;
         FT_UInt         i;
     
     
    +    /* set default GS, twilight points, and storage */
    +    /* before CV program can modify them.           */
    +    size->GS = tt_default_graphics_state;
    +
    +    /* all twilight points are originally zero */
    +    FT_ARRAY_ZERO( size->twilight.org, size->twilight.n_points );
    +    FT_ARRAY_ZERO( size->twilight.cur, size->twilight.n_points );
    +
    +    TT_Load_Context( exec, face, size );
    +
    +    /* clear storage area */
    +    FT_ARRAY_ZERO( exec->storage, exec->storeSize );
    +
         /* Scale the cvt values to the new ppem.            */
         /* By default, we use the y ppem value for scaling. */
         FT_TRACE6(( "CVT values:\n" ));
    -    for ( i = 0; i < size->cvt_size; i++ )
    +    for ( i = 0; i < exec->cvtSize; i++ )
         {
           /* Unscaled CVT values are already stored in 26.6 format.            */
           /* Note that this scaling operation is very sensitive to rounding;   */
           /* the integer division by 64 must be applied to the first argument. */
    -      size->cvt[i] = FT_MulFix( face->cvt[i] / 64, size->ttmetrics.scale );
    -      FT_TRACE6(( "  %3d: %f (%f)\n",
    -                  i, (double)face->cvt[i] / 64, (double)size->cvt[i] / 64 ));
    +      exec->cvt[i] = FT_MulFix( face->cvt[i] / 64, size->ttmetrics.scale );
    +      FT_TRACE6(( "  %3u: %f (%f)\n",
    +                  i, (double)face->cvt[i] / 64, (double)exec->cvt[i] / 64 ));
         }
         FT_TRACE6(( "\n" ));
     
    -    exec = size->context;
    -
    -    error = TT_Load_Context( exec, face, size );
    -    if ( error )
    -      return error;
    -
    -    exec->callTop = 0;
    -    exec->top     = 0;
    -
    -    exec->instruction_trap = FALSE;
    -
    -    exec->pedantic_hinting = pedantic;
    -
    -    TT_Set_CodeRange( exec,
    -                      tt_coderange_cvt,
    -                      face->cvt_program,
    -                      (FT_Long)face->cvt_program_size );
    -
         TT_Clear_CodeRange( exec, tt_coderange_glyph );
     
         if ( face->cvt_program_size > 0 )
         {
    -      TT_Goto_CodeRange( exec, tt_coderange_cvt, 0 );
    +      /* allow CV program execution */
    +      TT_Set_CodeRange( exec,
    +                        tt_coderange_cvt,
    +                        face->cvt_program,
    +                        (FT_Long)face->cvt_program_size );
    +
    +      exec->pts.n_points   = 0;
    +      exec->pts.n_contours = 0;
     
           FT_TRACE4(( "Executing `prep' table.\n" ));
    -      error = face->interpreter( exec );
    -#ifdef FT_DEBUG_LEVEL_TRACE
    -      if ( error )
    -        FT_TRACE4(( "  interpretation failed with error code 0x%x\n",
    -                    error ));
    -#endif
    +      error = TT_Run_Context( exec, size );
    +      FT_TRACE4(( error ? "  failed (error code 0x%x)\n" : "",
    +                  error ));
         }
         else
           error = FT_Err_Ok;
     
         size->cvt_ready = error;
     
    -    /* UNDOCUMENTED!  The MS rasterizer doesn't allow the following */
    -    /* graphics state variables to be modified by the CVT program.  */
    -
    -    exec->GS.dualVector.x = 0x4000;
    -    exec->GS.dualVector.y = 0;
    -    exec->GS.projVector.x = 0x4000;
    -    exec->GS.projVector.y = 0x0;
    -    exec->GS.freeVector.x = 0x4000;
    -    exec->GS.freeVector.y = 0x0;
    -
    -    exec->GS.rp0 = 0;
    -    exec->GS.rp1 = 0;
    -    exec->GS.rp2 = 0;
    -
    -    exec->GS.gep0 = 1;
    -    exec->GS.gep1 = 1;
    -    exec->GS.gep2 = 1;
    -
    -    exec->GS.loop = 1;
    -
    -    /* save as default graphics state */
    -    size->GS = exec->GS;
    -
    -    TT_Save_Context( exec, size );
    +    if ( !error )
    +      TT_Save_Context( exec, size );
     
         return error;
       }
     
     
       static void
    -  tt_size_done_bytecode( FT_Size  ftsize )
    +  tt_size_done_bytecode( TT_Size  size )
       {
    -    TT_Size    size   = (TT_Size)ftsize;
    -    TT_Face    face   = (TT_Face)ftsize->face;
    -    FT_Memory  memory = face->root.memory;
    +    FT_Memory       memory = size->root.face->memory;
    +    TT_ExecContext  exec   = size->context;
     
    -    if ( size->context )
    +
    +    if ( exec )
         {
    -      TT_Done_Context( size->context );
    +      FT_FREE( exec->stack );
    +      FT_FREE( exec->FDefs );
    +
    +      TT_Done_Context( exec );
           size->context = NULL;
         }
     
    -    FT_FREE( size->cvt );
    -    size->cvt_size = 0;
    -
    -    /* free storage area */
    -    FT_FREE( size->storage );
    -    size->storage_size = 0;
    -
         /* twilight zone */
    -    tt_glyphzone_done( &size->twilight );
    -
    -    FT_FREE( size->function_defs );
    -    FT_FREE( size->instruction_defs );
    -
    -    size->num_function_defs    = 0;
    -    size->max_function_defs    = 0;
    -    size->num_instruction_defs = 0;
    -    size->max_instruction_defs = 0;
    -
    -    size->max_func = 0;
    -    size->max_ins  = 0;
    -
    -    size->bytecode_ready = -1;
    -    size->cvt_ready      = -1;
    +    tt_glyphzone_done( memory, &size->twilight );
       }
     
     
       /* Initialize bytecode-related fields in the size object.       */
       /* We do this only if bytecode interpretation is really needed. */
    -  static FT_Error
    -  tt_size_init_bytecode( FT_Size  ftsize,
    +  FT_LOCAL_DEF( FT_Error )
    +  tt_size_init_bytecode( TT_Size  size,
                              FT_Bool  pedantic )
       {
         FT_Error   error;
    -    TT_Size    size = (TT_Size)ftsize;
    -    TT_Face    face = (TT_Face)ftsize->face;
    -    FT_Memory  memory = face->root.memory;
    +    TT_Face    face = (TT_Face)size->root.face;
    +    FT_Memory  memory = size->root.face->memory;
     
         FT_UShort       n_twilight;
         TT_MaxProfile*  maxp = &face->max_profile;
    +    TT_ExecContext  exec;
     
     
    -    /* clean up bytecode related data */
    -    FT_FREE( size->function_defs );
    -    FT_FREE( size->instruction_defs );
    -    FT_FREE( size->cvt );
    -    FT_FREE( size->storage );
    +    exec = TT_New_Context( (TT_Driver)face->root.driver );
    +    if ( !exec )
    +      return FT_THROW( Could_Not_Find_Context );
     
    -    if ( size->context )
    -      TT_Done_Context( size->context );
    -    tt_glyphzone_done( &size->twilight );
    +    size->context = exec;
     
    -    size->bytecode_ready = -1;
    -    size->cvt_ready      = -1;
    +    exec->pedantic_hinting = pedantic;
     
    -    size->context = TT_New_Context( (TT_Driver)face->root.driver );
    +    exec->maxFDefs = maxp->maxFunctionDefs;
    +    exec->maxIDefs = maxp->maxInstructionDefs;
     
    -    size->max_function_defs    = maxp->maxFunctionDefs;
    -    size->max_instruction_defs = maxp->maxInstructionDefs;
    +    if ( FT_NEW_ARRAY( exec->FDefs, exec->maxFDefs + exec->maxIDefs ) )
    +      goto Fail;
     
    -    size->num_function_defs    = 0;
    -    size->num_instruction_defs = 0;
    +    exec->IDefs = exec->FDefs + exec->maxFDefs;
     
    -    size->max_func = 0;
    -    size->max_ins  = 0;
    +    exec->numFDefs = 0;
    +    exec->numIDefs = 0;
     
    -    size->cvt_size     = face->cvt_size;
    -    size->storage_size = maxp->maxStorage;
    +    exec->maxFunc = 0;
    +    exec->maxIns  = 0;
     
    -    /* Set default metrics */
    -    {
    -      TT_Size_Metrics*  tt_metrics = &size->ttmetrics;
    +    /* XXX: We reserve a little more elements on the stack to deal */
    +    /*      with broken fonts like arialbs, courbs, timesbs, etc.  */
    +    exec->stackSize = maxp->maxStackElements + 32;
    +    exec->storeSize = maxp->maxStorage;
    +    exec->cvtSize   = face->cvt_size;
     
    +    if ( FT_NEW_ARRAY( exec->stack,
    +                       exec->stackSize +
    +                         (FT_Long)( exec->storeSize + exec->cvtSize ) ) )
    +      goto Fail;
     
    -      tt_metrics->rotated   = FALSE;
    -      tt_metrics->stretched = FALSE;
    -
    -      /* Set default engine compensation.  Value 3 is not described */
    -      /* in the OpenType specification (as of Mai 2019), but Greg   */
    -      /* says that MS handles it the same as `gray'.                */
    -      /*                                                            */
    -      /* The Apple specification says that the compensation for     */
    -      /* `gray' is always zero.  FreeType doesn't do any            */
    -      /* compensation at all.                                       */
    -      tt_metrics->compensations[0] = 0;   /* gray  */
    -      tt_metrics->compensations[1] = 0;   /* black */
    -      tt_metrics->compensations[2] = 0;   /* white */
    -      tt_metrics->compensations[3] = 0;   /* zero  */
    -    }
    -
    -    /* allocate function defs, instruction defs, cvt, and storage area */
    -    if ( FT_NEW_ARRAY( size->function_defs,    size->max_function_defs    ) ||
    -         FT_NEW_ARRAY( size->instruction_defs, size->max_instruction_defs ) ||
    -         FT_NEW_ARRAY( size->cvt,              size->cvt_size             ) ||
    -         FT_NEW_ARRAY( size->storage,          size->storage_size         ) )
    -      goto Exit;
    -
    -    /* reserve twilight zone */
    +    /* reserve twilight zone and set GS before fpgm is executed, */
    +    /* just in case, even though fpgm should not touch them      */
         n_twilight = maxp->maxTwilightPoints;
     
         /* there are 4 phantom points (do we need this?) */
    @@ -1192,22 +1077,13 @@
     
         error = tt_glyphzone_new( memory, n_twilight, 0, &size->twilight );
         if ( error )
    -      goto Exit;
    +      goto Fail;
     
    -    size->twilight.n_points = n_twilight;
    +    size->GS        = tt_default_graphics_state;
    +    size->cvt_ready = -1;
     
    -    size->GS = tt_default_graphics_state;
    -
    -    /* set `face->interpreter' according to the debug hook present */
    -    {
    -      FT_Library  library = face->root.driver->root.library;
    -
    -
    -      face->interpreter = (TT_Interpreter)
    -                            library->debug_hooks[FT_DEBUG_HOOK_TRUETYPE];
    -      if ( !face->interpreter )
    -        face->interpreter = (TT_Interpreter)TT_RunIns;
    -    }
    +    size->ttmetrics.rotated   = FALSE;
    +    size->ttmetrics.stretched = FALSE;
     
         /* Fine, now run the font program! */
     
    @@ -1217,59 +1093,11 @@
         /* to be executed just once; calling it again is completely useless   */
         /* and might even lead to extremely slow behaviour if it is malformed */
         /* (containing an infinite loop, for example).                        */
    -    error = tt_size_run_fpgm( size, pedantic );
    +    error = tt_size_run_fpgm( size );
         return error;
     
    -  Exit:
    -    if ( error )
    -      tt_size_done_bytecode( ftsize );
    -
    -    return error;
    -  }
    -
    -
    -  FT_LOCAL_DEF( FT_Error )
    -  tt_size_ready_bytecode( TT_Size  size,
    -                          FT_Bool  pedantic )
    -  {
    -    FT_Error  error = FT_Err_Ok;
    -
    -
    -    if ( size->bytecode_ready < 0 )
    -      error = tt_size_init_bytecode( (FT_Size)size, pedantic );
    -    else
    -      error = size->bytecode_ready;
    -
    -    if ( error )
    -      goto Exit;
    -
    -    /* rescale CVT when needed */
    -    if ( size->cvt_ready < 0 )
    -    {
    -      FT_UShort  i;
    -
    -
    -      /* all twilight points are originally zero */
    -      for ( i = 0; i < size->twilight.n_points; i++ )
    -      {
    -        size->twilight.org[i].x = 0;
    -        size->twilight.org[i].y = 0;
    -        size->twilight.cur[i].x = 0;
    -        size->twilight.cur[i].y = 0;
    -      }
    -
    -      /* clear storage area */
    -      for ( i = 0; i < size->storage_size; i++ )
    -        size->storage[i] = 0;
    -
    -      size->GS = tt_default_graphics_state;
    -
    -      error = tt_size_run_prep( size, pedantic );
    -    }
    -    else
    -      error = size->cvt_ready;
    -
    -  Exit:
    +  Fail:
    +    tt_size_done_bytecode( size );
         return error;
       }
     
    @@ -1300,11 +1128,9 @@
     
     #ifdef TT_USE_BYTECODE_INTERPRETER
         size->bytecode_ready = -1;
    -    size->cvt_ready      = -1;
     #endif
     
    -    size->ttmetrics.valid = FALSE;
    -    size->strike_index    = 0xFFFFFFFFUL;
    +    size->strike_index = 0xFFFFFFFFUL;
     
         return error;
       }
    @@ -1325,14 +1151,11 @@
       FT_LOCAL_DEF( void )
       tt_size_done( FT_Size  ttsize )           /* TT_Size */
       {
    -    TT_Size  size = (TT_Size)ttsize;
    -
    -
     #ifdef TT_USE_BYTECODE_INTERPRETER
    -    tt_size_done_bytecode( ttsize );
    +    tt_size_done_bytecode( (TT_Size)ttsize );
    +#else
    +    FT_UNUSED( ttsize );
     #endif
    -
    -    size->ttmetrics.valid = FALSE;
       }
     
     
    @@ -1353,21 +1176,13 @@
        *     function must take `FT_Size` as a result. The passed `FT_Size` is
        *     expected to point to a `TT_Size`.
        */
    -  FT_LOCAL_DEF( FT_Error )
    +  FT_LOCAL_DEF( void )
       tt_size_reset_height( FT_Size  ft_size )
       {
         TT_Size           size         = (TT_Size)ft_size;
    -    TT_Face           face         = (TT_Face)size->root.face;
    +    TT_Face           face         = (TT_Face)ft_size->face;
         FT_Size_Metrics*  size_metrics = &size->hinted_metrics;
     
    -    size->ttmetrics.valid = FALSE;
    -
    -    /* copy the result from base layer */
    -    *size_metrics = size->root.metrics;
    -
    -    if ( size_metrics->x_ppem < 1 || size_metrics->y_ppem < 1 )
    -      return FT_THROW( Invalid_PPem );
    -
         /* This bit flag, if set, indicates that the ppems must be       */
         /* rounded to integers.  Nearly all TrueType fonts have this bit */
         /* set, as hinting won't work really well otherwise.             */
    @@ -1385,10 +1200,6 @@
                                    FT_MulFix( face->root.height,
                                               size_metrics->y_scale ) );
         }
    -
    -    size->ttmetrics.valid = TRUE;
    -
    -    return FT_Err_Ok;
       }
     
     
    @@ -1408,14 +1219,20 @@
       FT_LOCAL_DEF( FT_Error )
       tt_size_reset( TT_Size  size )
       {
    -    FT_Error          error;
         TT_Face           face         = (TT_Face)size->root.face;
         FT_Size_Metrics*  size_metrics = &size->hinted_metrics;
     
     
    -    error = tt_size_reset_height( (FT_Size)size );
    -    if ( error )
    -      return error;
    +    /* invalidate the size object first */
    +    size->ttmetrics.ppem = 0;
    +
    +    if ( size->root.metrics.x_ppem == 0 || size->root.metrics.y_ppem == 0 )
    +      return FT_THROW( Invalid_PPem );
    +
    +    /* copy the result from base layer */
    +    *size_metrics = size->root.metrics;
    +
    +    tt_size_reset_height( (FT_Size)size );
     
         if ( face->header.Flags & 8 )
         {
    diff --git a/src/java.desktop/share/native/libfreetype/src/truetype/ttobjs.h b/src/java.desktop/share/native/libfreetype/src/truetype/ttobjs.h
    index 9c36ca78362..28d6c7d855f 100644
    --- a/src/java.desktop/share/native/libfreetype/src/truetype/ttobjs.h
    +++ b/src/java.desktop/share/native/libfreetype/src/truetype/ttobjs.h
    @@ -4,7 +4,7 @@
      *
      *   Objects manager (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -53,6 +53,8 @@ FT_BEGIN_HEADER
       typedef FT_GlyphSlot  TT_GlyphSlot;
     
     
    +#ifdef TT_USE_BYTECODE_INTERPRETER
    +
       /**************************************************************************
        *
        * @Struct:
    @@ -67,21 +69,27 @@ FT_BEGIN_HEADER
         FT_UShort      rp1;
         FT_UShort      rp2;
     
    +    FT_UShort      gep0;
    +    FT_UShort      gep1;
    +    FT_UShort      gep2;
    +
         FT_UnitVector  dualVector;
         FT_UnitVector  projVector;
         FT_UnitVector  freeVector;
     
         FT_Long        loop;
    -    FT_F26Dot6     minimum_distance;
         FT_Int         round_state;
    +    FT_F26Dot6     compensation[4];   /* device-specific compensations  */
     
    -    FT_Bool        auto_flip;
    +    /* default values below can be modified by 'fpgm' and 'prep' */
    +    FT_F26Dot6     minimum_distance;
         FT_F26Dot6     control_value_cutin;
         FT_F26Dot6     single_width_cutin;
         FT_F26Dot6     single_width_value;
         FT_UShort      delta_base;
         FT_UShort      delta_shift;
     
    +    FT_Bool        auto_flip;
         FT_Byte        instruct_control;
         /* According to Greg Hitchcock from Microsoft, the `scan_control'     */
         /* variable as documented in the TrueType specification is a 32-bit   */
    @@ -90,17 +98,12 @@ FT_BEGIN_HEADER
         FT_Bool        scan_control;
         FT_Int         scan_type;
     
    -    FT_UShort      gep0;
    -    FT_UShort      gep1;
    -    FT_UShort      gep2;
    -
       } TT_GraphicsState;
     
     
    -#ifdef TT_USE_BYTECODE_INTERPRETER
    -
       FT_LOCAL( void )
    -  tt_glyphzone_done( TT_GlyphZone  zone );
    +  tt_glyphzone_done( FT_Memory     memory,
    +                     TT_GlyphZone  zone );
     
       FT_LOCAL( FT_Error )
       tt_glyphzone_new( FT_Memory     memory,
    @@ -112,73 +115,6 @@ FT_BEGIN_HEADER
     
     
     
    -  /**************************************************************************
    -   *
    -   * EXECUTION SUBTABLES
    -   *
    -   * These sub-tables relate to instruction execution.
    -   *
    -   */
    -
    -
    -#define TT_MAX_CODE_RANGES  3
    -
    -
    -  /**************************************************************************
    -   *
    -   * There can only be 3 active code ranges at once:
    -   *   - the Font Program
    -   *   - the CVT Program
    -   *   - a glyph's instructions set
    -   */
    -  typedef enum  TT_CodeRange_Tag_
    -  {
    -    tt_coderange_none = 0,
    -    tt_coderange_font,
    -    tt_coderange_cvt,
    -    tt_coderange_glyph
    -
    -  } TT_CodeRange_Tag;
    -
    -
    -  typedef struct  TT_CodeRange_
    -  {
    -    FT_Byte*  base;
    -    FT_Long   size;
    -
    -  } TT_CodeRange;
    -
    -  typedef TT_CodeRange  TT_CodeRangeTable[TT_MAX_CODE_RANGES];
    -
    -
    -  /**************************************************************************
    -   *
    -   * Defines a function/instruction definition record.
    -   */
    -  typedef struct  TT_DefRecord_
    -  {
    -    FT_Int    range;          /* in which code range is it located?     */
    -    FT_Long   start;          /* where does it start?                   */
    -    FT_Long   end;            /* where does it end?                     */
    -    FT_UInt   opc;            /* function #, or instruction code        */
    -    FT_Bool   active;         /* is it active?                          */
    -
    -  } TT_DefRecord, *TT_DefArray;
    -
    -
    -  /**************************************************************************
    -   *
    -   * Subglyph transformation record.
    -   */
    -  typedef struct  TT_Transform_
    -  {
    -    FT_Fixed    xx, xy;     /* transformation matrix coefficients */
    -    FT_Fixed    yx, yy;
    -    FT_F26Dot6  ox, oy;     /* offsets                            */
    -
    -  } TT_Transform;
    -
    -
       /**************************************************************************
        *
        * A note regarding non-squared pixels:
    @@ -251,13 +187,9 @@ FT_BEGIN_HEADER
         FT_Long     x_ratio;
         FT_Long     y_ratio;
     
    -    FT_UShort   ppem;               /* maximum ppem size              */
         FT_Long     ratio;              /* current ratio                  */
         FT_Fixed    scale;
    -
    -    FT_F26Dot6  compensations[4];   /* device-specific compensations  */
    -
    -    FT_Bool     valid;
    +    FT_UShort   ppem;               /* maximum ppem size              */
     
         FT_Bool     rotated;            /* `is the glyph rotated?'-flag   */
         FT_Bool     stretched;          /* `is the glyph stretched?'-flag */
    @@ -288,27 +220,8 @@ FT_BEGIN_HEADER
     
         FT_Long            point_size;    /* for the `MPS' bytecode instruction */
     
    -    FT_UInt            num_function_defs; /* number of function definitions */
    -    FT_UInt            max_function_defs;
    -    TT_DefArray        function_defs;     /* table of function definitions  */
    -
    -    FT_UInt            num_instruction_defs;  /* number of ins. definitions */
    -    FT_UInt            max_instruction_defs;
    -    TT_DefArray        instruction_defs;      /* table of ins. definitions  */
    -
    -    FT_UInt            max_func;
    -    FT_UInt            max_ins;
    -
    -    TT_CodeRangeTable  codeRangeTable;
    -
         TT_GraphicsState   GS;
     
    -    FT_ULong           cvt_size;      /* the scaled control value table */
    -    FT_Long*           cvt;
    -
    -    FT_UShort          storage_size; /* The storage area is now part of */
    -    FT_Long*           storage;      /* the instance                    */
    -
         TT_GlyphZoneRec    twilight;     /* The instance's twilight zone    */
     
         TT_ExecContext     context;
    @@ -375,20 +288,18 @@ FT_BEGIN_HEADER
     #ifdef TT_USE_BYTECODE_INTERPRETER
     
       FT_LOCAL( FT_Error )
    -  tt_size_run_fpgm( TT_Size  size,
    -                    FT_Bool  pedantic );
    +  tt_size_run_fpgm( TT_Size  size );
     
       FT_LOCAL( FT_Error )
    -  tt_size_run_prep( TT_Size  size,
    -                    FT_Bool  pedantic );
    +  tt_size_run_prep( TT_Size  size );
     
       FT_LOCAL( FT_Error )
    -  tt_size_ready_bytecode( TT_Size  size,
    -                          FT_Bool  pedantic );
    +  tt_size_init_bytecode( TT_Size  size,
    +                         FT_Bool  pedantic );
     
     #endif /* TT_USE_BYTECODE_INTERPRETER */
     
    -  FT_LOCAL( FT_Error )
    +  FT_LOCAL( void )
       tt_size_reset_height( FT_Size  size );
     
       FT_LOCAL( FT_Error )
    diff --git a/src/java.desktop/share/native/libfreetype/src/truetype/ttpload.c b/src/java.desktop/share/native/libfreetype/src/truetype/ttpload.c
    index 9505b5f179f..827454d8574 100644
    --- a/src/java.desktop/share/native/libfreetype/src/truetype/ttpload.c
    +++ b/src/java.desktop/share/native/libfreetype/src/truetype/ttpload.c
    @@ -4,7 +4,7 @@
      *
      *   TrueType-specific tables loader (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -110,7 +110,7 @@
     
         if ( face->num_locations != (FT_ULong)face->root.num_glyphs + 1 )
         {
    -      FT_TRACE2(( "glyph count mismatch!  loca: %ld, maxp: %ld\n",
    +      FT_TRACE2(( "glyph count mismatch!  loca: %lu, maxp: %ld\n",
                       face->num_locations - 1, face->root.num_glyphs ));
     
           /* we only handle the case where `maxp' gives a larger value */
    @@ -151,7 +151,7 @@
               face->num_locations = (FT_ULong)face->root.num_glyphs + 1;
               table_len           = new_loca_len;
     
    -          FT_TRACE2(( "adjusting num_locations to %ld\n",
    +          FT_TRACE2(( "adjusting num_locations to %lu\n",
                           face->num_locations ));
             }
             else
    @@ -225,7 +225,7 @@
         if ( pos1 > ttface->glyf_len )
         {
           FT_TRACE1(( "tt_face_get_location:"
    -                  " too large offset (0x%08lx) found for glyph index %d,\n",
    +                  " too large offset (0x%08lx) found for glyph index %u,\n",
                       pos1, gindex ));
           FT_TRACE1(( "                     "
                       " exceeding the end of `glyf' table (0x%08lx)\n",
    @@ -240,17 +240,17 @@
           if ( gindex == ttface->num_locations - 2 )
           {
             FT_TRACE1(( "tt_face_get_location:"
    -                    " too large size (%ld bytes) found for glyph index %d,\n",
    +                    " too large size (%lu bytes) found for glyph index %u,\n",
                         pos2 - pos1, gindex ));
             FT_TRACE1(( "                     "
    -                    " truncating at the end of `glyf' table to %ld bytes\n",
    +                    " truncating at the end of `glyf' table to %lu bytes\n",
                         ttface->glyf_len - pos1 ));
             pos2 = ttface->glyf_len;
           }
           else
           {
             FT_TRACE1(( "tt_face_get_location:"
    -                    " too large offset (0x%08lx) found for glyph index %d,\n",
    +                    " too large offset (0x%08lx) found for glyph index %u,\n",
                         pos2, gindex + 1 ));
             FT_TRACE1(( "                     "
                         " exceeding the end of `glyf' table (0x%08lx)\n",
    @@ -419,7 +419,7 @@
           if ( FT_FRAME_EXTRACT( table_len, face->font_program ) )
             goto Exit;
     
    -      FT_TRACE2(( "loaded, %12ld bytes\n", face->font_program_size ));
    +      FT_TRACE2(( "loaded, %12lu bytes\n", face->font_program_size ));
         }
     
       Exit:
    @@ -482,7 +482,7 @@
           if ( FT_FRAME_EXTRACT( table_len, face->cvt_program ) )
             goto Exit;
     
    -      FT_TRACE2(( "loaded, %12ld bytes\n", face->cvt_program_size ));
    +      FT_TRACE2(( "loaded, %12lu bytes\n", face->cvt_program_size ));
         }
     
       Exit:
    diff --git a/src/java.desktop/share/native/libfreetype/src/truetype/ttpload.h b/src/java.desktop/share/native/libfreetype/src/truetype/ttpload.h
    index bc32b58020c..bb4d3c9cc55 100644
    --- a/src/java.desktop/share/native/libfreetype/src/truetype/ttpload.h
    +++ b/src/java.desktop/share/native/libfreetype/src/truetype/ttpload.h
    @@ -4,7 +4,7 @@
      *
      *   TrueType-specific tables loader (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/truetype/ttsubpix.c b/src/java.desktop/share/native/libfreetype/src/truetype/ttsubpix.c
    deleted file mode 100644
    index d811beef0df..00000000000
    --- a/src/java.desktop/share/native/libfreetype/src/truetype/ttsubpix.c
    +++ /dev/null
    @@ -1,1013 +0,0 @@
    -/****************************************************************************
    - *
    - * ttsubpix.c
    - *
    - *   TrueType Subpixel Hinting.
    - *
    - * Copyright (C) 2010-2023 by
    - * David Turner, Robert Wilhelm, and Werner Lemberg.
    - *
    - * This file is part of the FreeType project, and may only be used,
    - * modified, and distributed under the terms of the FreeType project
    - * license, LICENSE.TXT.  By continuing to use, modify, or distribute
    - * this file you indicate that you have read the license and
    - * understand and accept it fully.
    - *
    - */
    -
    -#include 
    -#include 
    -#include 
    -#include 
    -#include 
    -#include 
    -#include 
    -
    -#include "ttsubpix.h"
    -
    -
    -#if defined( TT_USE_BYTECODE_INTERPRETER )            && \
    -    defined( TT_SUPPORT_SUBPIXEL_HINTING_INFINALITY )
    -
    -  /**************************************************************************
    -   *
    -   * These rules affect how the TT Interpreter does hinting, with the
    -   * goal of doing subpixel hinting by (in general) ignoring x moves.
    -   * Some of these rules are fixes that go above and beyond the
    -   * stated techniques in the MS whitepaper on Cleartype, due to
    -   * artifacts in many glyphs.  So, these rules make some glyphs render
    -   * better than they do in the MS rasterizer.
    -   *
    -   * "" string or 0 int/char indicates to apply to all glyphs.
    -   * "-" used as dummy placeholders, but any non-matching string works.
    -   *
    -   * Some of this could arguably be implemented in fontconfig, however:
    -   *
    -   * - Fontconfig can't set things on a glyph-by-glyph basis.
    -   * - The tweaks that happen here are very low-level, from an average
    -   *   user's point of view and are best implemented in the hinter.
    -   *
    -   * The goal is to make the subpixel hinting techniques as generalized
    -   * as possible across all fonts to prevent the need for extra rules such
    -   * as these.
    -   *
    -   * The rule structure is designed so that entirely new rules can easily
    -   * be added when a new compatibility feature is discovered.
    -   *
    -   * The rule structures could also use some enhancement to handle ranges.
    -   *
    -   *     ****************** WORK IN PROGRESS *******************
    -   */
    -
    -  /* These are `classes' of fonts that can be grouped together and used in */
    -  /* rules below.  A blank entry "" is required at the end of these!       */
    -#define FAMILY_CLASS_RULES_SIZE  7
    -
    -  static const SPH_Font_Class  FAMILY_CLASS_Rules
    -                               [FAMILY_CLASS_RULES_SIZE] =
    -  {
    -    { "MS Legacy Fonts",
    -      { "Aharoni",
    -        "Andale Mono",
    -        "Andalus",
    -        "Angsana New",
    -        "AngsanaUPC",
    -        "Arabic Transparent",
    -        "Arial Black",
    -        "Arial Narrow",
    -        "Arial Unicode MS",
    -        "Arial",
    -        "Batang",
    -        "Browallia New",
    -        "BrowalliaUPC",
    -        "Comic Sans MS",
    -        "Cordia New",
    -        "CordiaUPC",
    -        "Courier New",
    -        "DFKai-SB",
    -        "David Transparent",
    -        "David",
    -        "DilleniaUPC",
    -        "Estrangelo Edessa",
    -        "EucrosiaUPC",
    -        "FangSong_GB2312",
    -        "Fixed Miriam Transparent",
    -        "FrankRuehl",
    -        "Franklin Gothic Medium",
    -        "FreesiaUPC",
    -        "Garamond",
    -        "Gautami",
    -        "Georgia",
    -        "Gulim",
    -        "Impact",
    -        "IrisUPC",
    -        "JasmineUPC",
    -        "KaiTi_GB2312",
    -        "KodchiangUPC",
    -        "Latha",
    -        "Levenim MT",
    -        "LilyUPC",
    -        "Lucida Console",
    -        "Lucida Sans Unicode",
    -        "MS Gothic",
    -        "MS Mincho",
    -        "MV Boli",
    -        "Mangal",
    -        "Marlett",
    -        "Microsoft Sans Serif",
    -        "Mingliu",
    -        "Miriam Fixed",
    -        "Miriam Transparent",
    -        "Miriam",
    -        "Narkisim",
    -        "Palatino Linotype",
    -        "Raavi",
    -        "Rod Transparent",
    -        "Rod",
    -        "Shruti",
    -        "SimHei",
    -        "Simplified Arabic Fixed",
    -        "Simplified Arabic",
    -        "Simsun",
    -        "Sylfaen",
    -        "Symbol",
    -        "Tahoma",
    -        "Times New Roman",
    -        "Traditional Arabic",
    -        "Trebuchet MS",
    -        "Tunga",
    -        "Verdana",
    -        "Webdings",
    -        "Wingdings",
    -        "",
    -      },
    -    },
    -    { "Core MS Legacy Fonts",
    -      { "Arial Black",
    -        "Arial Narrow",
    -        "Arial Unicode MS",
    -        "Arial",
    -        "Comic Sans MS",
    -        "Courier New",
    -        "Garamond",
    -        "Georgia",
    -        "Impact",
    -        "Lucida Console",
    -        "Lucida Sans Unicode",
    -        "Microsoft Sans Serif",
    -        "Palatino Linotype",
    -        "Tahoma",
    -        "Times New Roman",
    -        "Trebuchet MS",
    -        "Verdana",
    -        "",
    -      },
    -    },
    -    { "Apple Legacy Fonts",
    -      { "Geneva",
    -        "Times",
    -        "Monaco",
    -        "Century",
    -        "Chalkboard",
    -        "Lobster",
    -        "Century Gothic",
    -        "Optima",
    -        "Lucida Grande",
    -        "Gill Sans",
    -        "Baskerville",
    -        "Helvetica",
    -        "Helvetica Neue",
    -        "",
    -      },
    -    },
    -    { "Legacy Sans Fonts",
    -      { "Andale Mono",
    -        "Arial Unicode MS",
    -        "Arial",
    -        "Century Gothic",
    -        "Comic Sans MS",
    -        "Franklin Gothic Medium",
    -        "Geneva",
    -        "Lucida Console",
    -        "Lucida Grande",
    -        "Lucida Sans Unicode",
    -        "Lucida Sans Typewriter",
    -        "Microsoft Sans Serif",
    -        "Monaco",
    -        "Tahoma",
    -        "Trebuchet MS",
    -        "Verdana",
    -        "",
    -      },
    -    },
    -
    -    { "Misc Legacy Fonts",
    -      { "Dark Courier", "", }, },
    -    { "Verdana Clones",
    -      { "DejaVu Sans",
    -        "Bitstream Vera Sans", "", }, },
    -    { "Verdana and Clones",
    -      { "DejaVu Sans",
    -        "Bitstream Vera Sans",
    -        "Verdana", "", }, },
    -  };
    -
    -
    -  /* Define this to force natural (i.e. not bitmap-compatible) widths.     */
    -  /* The default leans strongly towards natural widths except for a few    */
    -  /* legacy fonts where a selective combination produces nicer results.    */
    -/* #define FORCE_NATURAL_WIDTHS   */
    -
    -
    -  /* Define `classes' of styles that can be grouped together and used in   */
    -  /* rules below.  A blank entry "" is required at the end of these!       */
    -#define STYLE_CLASS_RULES_SIZE  5
    -
    -  static const SPH_Font_Class  STYLE_CLASS_Rules
    -                               [STYLE_CLASS_RULES_SIZE] =
    -  {
    -    { "Regular Class",
    -      { "Regular",
    -        "Book",
    -        "Medium",
    -        "Roman",
    -        "Normal",
    -        "",
    -      },
    -    },
    -    { "Regular/Italic Class",
    -      { "Regular",
    -        "Book",
    -        "Medium",
    -        "Italic",
    -        "Oblique",
    -        "Roman",
    -        "Normal",
    -        "",
    -      },
    -    },
    -    { "Bold/BoldItalic Class",
    -      { "Bold",
    -        "Bold Italic",
    -        "Black",
    -        "",
    -      },
    -    },
    -    { "Bold/Italic/BoldItalic Class",
    -      { "Bold",
    -        "Bold Italic",
    -        "Black",
    -        "Italic",
    -        "Oblique",
    -        "",
    -      },
    -    },
    -    { "Regular/Bold Class",
    -      { "Regular",
    -        "Book",
    -        "Medium",
    -        "Normal",
    -        "Roman",
    -        "Bold",
    -        "Black",
    -        "",
    -      },
    -    },
    -  };
    -
    -
    -  /* Force special legacy fixes for fonts.                                 */
    -#define COMPATIBILITY_MODE_RULES_SIZE  1
    -
    -  static const SPH_TweakRule  COMPATIBILITY_MODE_Rules
    -                              [COMPATIBILITY_MODE_RULES_SIZE] =
    -  {
    -    { "Verdana Clones", 0, "", 0 },
    -  };
    -
    -
    -  /* Don't do subpixel (ignore_x_mode) hinting; do normal hinting.         */
    -#define PIXEL_HINTING_RULES_SIZE  2
    -
    -  static const SPH_TweakRule  PIXEL_HINTING_Rules
    -                              [PIXEL_HINTING_RULES_SIZE] =
    -  {
    -    /* these characters are almost always safe */
    -    { "Courier New", 12, "Italic", 'z' },
    -    { "Courier New", 11, "Italic", 'z' },
    -  };
    -
    -
    -  /* Subpixel hinting ignores SHPIX rules on X.  Force SHPIX for these.    */
    -#define DO_SHPIX_RULES_SIZE  1
    -
    -  static const SPH_TweakRule  DO_SHPIX_Rules
    -                              [DO_SHPIX_RULES_SIZE] =
    -  {
    -    { "-", 0, "", 0 },
    -  };
    -
    -
    -  /* Skip Y moves that start with a point that is not on a Y pixel         */
    -  /* boundary and don't move that point to a Y pixel boundary.             */
    -#define SKIP_NONPIXEL_Y_MOVES_RULES_SIZE  4
    -
    -  static const SPH_TweakRule  SKIP_NONPIXEL_Y_MOVES_Rules
    -                              [SKIP_NONPIXEL_Y_MOVES_RULES_SIZE] =
    -  {
    -    /* fix vwxyz thinness */
    -    { "Consolas", 0, "", 0 },
    -    /* Fix thin middle stems */
    -    { "Core MS Legacy Fonts", 0, "Regular", 0 },
    -    /* Cyrillic small letter I */
    -    { "Legacy Sans Fonts", 0, "", 0 },
    -    /* Fix artifacts with some Regular & Bold */
    -    { "Verdana Clones", 0, "", 0 },
    -  };
    -
    -
    -#define SKIP_NONPIXEL_Y_MOVES_RULES_EXCEPTIONS_SIZE  1
    -
    -  static const SPH_TweakRule  SKIP_NONPIXEL_Y_MOVES_Rules_Exceptions
    -                              [SKIP_NONPIXEL_Y_MOVES_RULES_EXCEPTIONS_SIZE] =
    -  {
    -    /* Fixes < and > */
    -    { "Courier New", 0, "Regular", 0 },
    -  };
    -
    -
    -  /* Skip Y moves that start with a point that is not on a Y pixel         */
    -  /* boundary and don't move that point to a Y pixel boundary.             */
    -#define SKIP_NONPIXEL_Y_MOVES_DELTAP_RULES_SIZE  2
    -
    -  static const SPH_TweakRule  SKIP_NONPIXEL_Y_MOVES_DELTAP_Rules
    -                              [SKIP_NONPIXEL_Y_MOVES_DELTAP_RULES_SIZE] =
    -  {
    -    /* Maintain thickness of diagonal in 'N' */
    -    { "Times New Roman", 0, "Regular/Bold Class", 'N' },
    -    { "Georgia", 0, "Regular/Bold Class", 'N' },
    -  };
    -
    -
    -  /* Skip Y moves that move a point off a Y pixel boundary.                */
    -#define SKIP_OFFPIXEL_Y_MOVES_RULES_SIZE  1
    -
    -  static const SPH_TweakRule  SKIP_OFFPIXEL_Y_MOVES_Rules
    -                              [SKIP_OFFPIXEL_Y_MOVES_RULES_SIZE] =
    -  {
    -    { "-", 0, "", 0 },
    -  };
    -
    -
    -#define SKIP_OFFPIXEL_Y_MOVES_RULES_EXCEPTIONS_SIZE  1
    -
    -  static const SPH_TweakRule  SKIP_OFFPIXEL_Y_MOVES_Rules_Exceptions
    -                              [SKIP_OFFPIXEL_Y_MOVES_RULES_EXCEPTIONS_SIZE] =
    -  {
    -    { "-", 0, "", 0 },
    -  };
    -
    -
    -  /* Round moves that don't move a point to a Y pixel boundary.            */
    -#define ROUND_NONPIXEL_Y_MOVES_RULES_SIZE  2
    -
    -  static const SPH_TweakRule  ROUND_NONPIXEL_Y_MOVES_Rules
    -                              [ROUND_NONPIXEL_Y_MOVES_RULES_SIZE] =
    -  {
    -    /* Droid font instructions don't snap Y to pixels */
    -    { "Droid Sans", 0, "Regular/Italic Class", 0 },
    -    { "Droid Sans Mono", 0, "", 0 },
    -  };
    -
    -
    -#define ROUND_NONPIXEL_Y_MOVES_RULES_EXCEPTIONS_SIZE  1
    -
    -  static const SPH_TweakRule  ROUND_NONPIXEL_Y_MOVES_Rules_Exceptions
    -                              [ROUND_NONPIXEL_Y_MOVES_RULES_EXCEPTIONS_SIZE] =
    -  {
    -    { "-", 0, "", 0 },
    -  };
    -
    -
    -  /* Allow a Direct_Move along X freedom vector if matched.                */
    -#define ALLOW_X_DMOVE_RULES_SIZE  1
    -
    -  static const SPH_TweakRule  ALLOW_X_DMOVE_Rules
    -                              [ALLOW_X_DMOVE_RULES_SIZE] =
    -  {
    -    /* Fixes vanishing diagonal in 4 */
    -    { "Verdana", 0, "Regular", '4' },
    -  };
    -
    -
    -  /* Return MS rasterizer version 35 if matched.                           */
    -#define RASTERIZER_35_RULES_SIZE  8
    -
    -  static const SPH_TweakRule  RASTERIZER_35_Rules
    -                              [RASTERIZER_35_RULES_SIZE] =
    -  {
    -    /* This seems to be the only way to make these look good */
    -    { "Times New Roman", 0, "Regular", 'i' },
    -    { "Times New Roman", 0, "Regular", 'j' },
    -    { "Times New Roman", 0, "Regular", 'm' },
    -    { "Times New Roman", 0, "Regular", 'r' },
    -    { "Times New Roman", 0, "Regular", 'a' },
    -    { "Times New Roman", 0, "Regular", 'n' },
    -    { "Times New Roman", 0, "Regular", 'p' },
    -    { "Times", 0, "", 0 },
    -  };
    -
    -
    -  /* Don't round to the subpixel grid.  Round to pixel grid.               */
    -#define NORMAL_ROUND_RULES_SIZE  1
    -
    -  static const SPH_TweakRule  NORMAL_ROUND_Rules
    -                              [NORMAL_ROUND_RULES_SIZE] =
    -  {
    -    /* Fix serif thickness for certain ppems */
    -    /* Can probably be generalized somehow   */
    -    { "Courier New", 0, "", 0 },
    -  };
    -
    -
    -  /* Skip IUP instructions if matched.                                     */
    -#define SKIP_IUP_RULES_SIZE  1
    -
    -  static const SPH_TweakRule  SKIP_IUP_Rules
    -                              [SKIP_IUP_RULES_SIZE] =
    -  {
    -    { "Arial", 13, "Regular", 'a' },
    -  };
    -
    -
    -  /* Skip MIAP Twilight hack if matched.                                   */
    -#define MIAP_HACK_RULES_SIZE  1
    -
    -  static const SPH_TweakRule  MIAP_HACK_Rules
    -                              [MIAP_HACK_RULES_SIZE] =
    -  {
    -    { "Geneva", 12, "", 0 },
    -  };
    -
    -
    -  /* Skip DELTAP instructions if matched.                                  */
    -#define ALWAYS_SKIP_DELTAP_RULES_SIZE  23
    -
    -  static const SPH_TweakRule  ALWAYS_SKIP_DELTAP_Rules
    -                              [ALWAYS_SKIP_DELTAP_RULES_SIZE] =
    -  {
    -    { "Georgia", 0, "Regular", 'k' },
    -    /* fix various problems with e in different versions */
    -    { "Trebuchet MS", 14, "Regular", 'e' },
    -    { "Trebuchet MS", 13, "Regular", 'e' },
    -    { "Trebuchet MS", 15, "Regular", 'e' },
    -    { "Trebuchet MS", 0, "Italic", 'v' },
    -    { "Trebuchet MS", 0, "Italic", 'w' },
    -    { "Trebuchet MS", 0, "Regular", 'Y' },
    -    { "Arial", 11, "Regular", 's' },
    -    /* prevent problems with '3' and others */
    -    { "Verdana", 10, "Regular", 0 },
    -    { "Verdana", 9, "Regular", 0 },
    -    /* Cyrillic small letter short I */
    -    { "Legacy Sans Fonts", 0, "", 0x438 },
    -    { "Legacy Sans Fonts", 0, "", 0x439 },
    -    { "Arial", 10, "Regular", '6' },
    -    { "Arial", 0, "Bold/BoldItalic Class", 'a' },
    -    /* Make horizontal stems consistent with the rest */
    -    { "Arial", 24, "Bold", 'a' },
    -    { "Arial", 25, "Bold", 'a' },
    -    { "Arial", 24, "Bold", 's' },
    -    { "Arial", 25, "Bold", 's' },
    -    { "Arial", 34, "Bold", 's' },
    -    { "Arial", 35, "Bold", 's' },
    -    { "Arial", 36, "Bold", 's' },
    -    { "Arial", 25, "Regular", 's' },
    -    { "Arial", 26, "Regular", 's' },
    -  };
    -
    -
    -  /* Always do DELTAP instructions if matched.                             */
    -#define ALWAYS_DO_DELTAP_RULES_SIZE  1
    -
    -  static const SPH_TweakRule  ALWAYS_DO_DELTAP_Rules
    -                              [ALWAYS_DO_DELTAP_RULES_SIZE] =
    -  {
    -    { "-", 0, "", 0 },
    -  };
    -
    -
    -  /* Don't allow ALIGNRP after IUP.                                        */
    -#define NO_ALIGNRP_AFTER_IUP_RULES_SIZE  1
    -
    -  static const SPH_TweakRule  NO_ALIGNRP_AFTER_IUP_Rules
    -                              [NO_ALIGNRP_AFTER_IUP_RULES_SIZE] =
    -  {
    -    /* Prevent creation of dents in outline */
    -    { "-", 0, "", 0 },
    -  };
    -
    -
    -  /* Don't allow DELTAP after IUP.                                         */
    -#define NO_DELTAP_AFTER_IUP_RULES_SIZE  1
    -
    -  static const SPH_TweakRule  NO_DELTAP_AFTER_IUP_Rules
    -                              [NO_DELTAP_AFTER_IUP_RULES_SIZE] =
    -  {
    -    { "-", 0, "", 0 },
    -  };
    -
    -
    -  /* Don't allow CALL after IUP.                                           */
    -#define NO_CALL_AFTER_IUP_RULES_SIZE  1
    -
    -  static const SPH_TweakRule  NO_CALL_AFTER_IUP_Rules
    -                              [NO_CALL_AFTER_IUP_RULES_SIZE] =
    -  {
    -    /* Prevent creation of dents in outline */
    -    { "-", 0, "", 0 },
    -  };
    -
    -
    -  /* De-embolden these glyphs slightly.                                    */
    -#define DEEMBOLDEN_RULES_SIZE  9
    -
    -  static const SPH_TweakRule  DEEMBOLDEN_Rules
    -                              [DEEMBOLDEN_RULES_SIZE] =
    -  {
    -    { "Courier New", 0, "Bold", 'A' },
    -    { "Courier New", 0, "Bold", 'W' },
    -    { "Courier New", 0, "Bold", 'w' },
    -    { "Courier New", 0, "Bold", 'M' },
    -    { "Courier New", 0, "Bold", 'X' },
    -    { "Courier New", 0, "Bold", 'K' },
    -    { "Courier New", 0, "Bold", 'x' },
    -    { "Courier New", 0, "Bold", 'z' },
    -    { "Courier New", 0, "Bold", 'v' },
    -  };
    -
    -
    -  /* Embolden these glyphs slightly.                                       */
    -#define EMBOLDEN_RULES_SIZE  2
    -
    -  static const SPH_TweakRule  EMBOLDEN_Rules
    -                              [EMBOLDEN_RULES_SIZE] =
    -  {
    -    { "Courier New", 0, "Regular", 0 },
    -    { "Courier New", 0, "Italic", 0 },
    -  };
    -
    -
    -  /* This is a CVT hack that makes thick horizontal stems on 2, 5, 7       */
    -  /* similar to Windows XP.                                                */
    -#define TIMES_NEW_ROMAN_HACK_RULES_SIZE  12
    -
    -  static const SPH_TweakRule  TIMES_NEW_ROMAN_HACK_Rules
    -                              [TIMES_NEW_ROMAN_HACK_RULES_SIZE] =
    -  {
    -    { "Times New Roman", 16, "Italic", '2' },
    -    { "Times New Roman", 16, "Italic", '5' },
    -    { "Times New Roman", 16, "Italic", '7' },
    -    { "Times New Roman", 16, "Regular", '2' },
    -    { "Times New Roman", 16, "Regular", '5' },
    -    { "Times New Roman", 16, "Regular", '7' },
    -    { "Times New Roman", 17, "Italic", '2' },
    -    { "Times New Roman", 17, "Italic", '5' },
    -    { "Times New Roman", 17, "Italic", '7' },
    -    { "Times New Roman", 17, "Regular", '2' },
    -    { "Times New Roman", 17, "Regular", '5' },
    -    { "Times New Roman", 17, "Regular", '7' },
    -  };
    -
    -
    -  /* This fudges distance on 2 to get rid of the vanishing stem issue.     */
    -  /* A real solution to this is certainly welcome.                         */
    -#define COURIER_NEW_2_HACK_RULES_SIZE  15
    -
    -  static const SPH_TweakRule  COURIER_NEW_2_HACK_Rules
    -                              [COURIER_NEW_2_HACK_RULES_SIZE] =
    -  {
    -    { "Courier New", 10, "Regular", '2' },
    -    { "Courier New", 11, "Regular", '2' },
    -    { "Courier New", 12, "Regular", '2' },
    -    { "Courier New", 13, "Regular", '2' },
    -    { "Courier New", 14, "Regular", '2' },
    -    { "Courier New", 15, "Regular", '2' },
    -    { "Courier New", 16, "Regular", '2' },
    -    { "Courier New", 17, "Regular", '2' },
    -    { "Courier New", 18, "Regular", '2' },
    -    { "Courier New", 19, "Regular", '2' },
    -    { "Courier New", 20, "Regular", '2' },
    -    { "Courier New", 21, "Regular", '2' },
    -    { "Courier New", 22, "Regular", '2' },
    -    { "Courier New", 23, "Regular", '2' },
    -    { "Courier New", 24, "Regular", '2' },
    -  };
    -
    -
    -#ifndef FORCE_NATURAL_WIDTHS
    -
    -  /* Use compatible widths with these glyphs.  Compatible widths is always */
    -  /* on when doing B/W TrueType instructing, but is used selectively here, */
    -  /* typically on glyphs with 3 or more vertical stems.                    */
    -#define COMPATIBLE_WIDTHS_RULES_SIZE  38
    -
    -  static const SPH_TweakRule  COMPATIBLE_WIDTHS_Rules
    -                              [COMPATIBLE_WIDTHS_RULES_SIZE] =
    -  {
    -    { "Arial Unicode MS", 12, "Regular Class", 'm' },
    -    { "Arial Unicode MS", 14, "Regular Class", 'm' },
    -    /* Cyrillic small letter sha */
    -    { "Arial", 10, "Regular Class", 0x448 },
    -    { "Arial", 11, "Regular Class", 'm' },
    -    { "Arial", 12, "Regular Class", 'm' },
    -    /* Cyrillic small letter sha */
    -    { "Arial", 12, "Regular Class", 0x448 },
    -    { "Arial", 13, "Regular Class", 0x448 },
    -    { "Arial", 14, "Regular Class", 'm' },
    -    /* Cyrillic small letter sha */
    -    { "Arial", 14, "Regular Class", 0x448 },
    -    { "Arial", 15, "Regular Class", 0x448 },
    -    { "Arial", 17, "Regular Class", 'm' },
    -    { "DejaVu Sans", 15, "Regular Class", 0 },
    -    { "Microsoft Sans Serif", 11, "Regular Class", 0 },
    -    { "Microsoft Sans Serif", 12, "Regular Class", 0 },
    -    { "Segoe UI", 11, "Regular Class", 0 },
    -    { "Monaco", 0, "Regular Class", 0 },
    -    { "Segoe UI", 12, "Regular Class", 'm' },
    -    { "Segoe UI", 14, "Regular Class", 'm' },
    -    { "Tahoma", 11, "Regular Class", 0 },
    -    { "Times New Roman", 16, "Regular Class", 'c' },
    -    { "Times New Roman", 16, "Regular Class", 'm' },
    -    { "Times New Roman", 16, "Regular Class", 'o' },
    -    { "Times New Roman", 16, "Regular Class", 'w' },
    -    { "Trebuchet MS", 11, "Regular Class", 0 },
    -    { "Trebuchet MS", 12, "Regular Class", 0 },
    -    { "Trebuchet MS", 14, "Regular Class", 0 },
    -    { "Trebuchet MS", 15, "Regular Class", 0 },
    -    { "Ubuntu", 12, "Regular Class", 'm' },
    -    /* Cyrillic small letter sha */
    -    { "Verdana", 10, "Regular Class", 0x448 },
    -    { "Verdana", 11, "Regular Class", 0x448 },
    -    { "Verdana and Clones", 12, "Regular Class", 'i' },
    -    { "Verdana and Clones", 12, "Regular Class", 'j' },
    -    { "Verdana and Clones", 12, "Regular Class", 'l' },
    -    { "Verdana and Clones", 12, "Regular Class", 'm' },
    -    { "Verdana and Clones", 13, "Regular Class", 'i' },
    -    { "Verdana and Clones", 13, "Regular Class", 'j' },
    -    { "Verdana and Clones", 13, "Regular Class", 'l' },
    -    { "Verdana and Clones", 14, "Regular Class", 'm' },
    -  };
    -
    -
    -  /* Scaling slightly in the x-direction prior to hinting results in       */
    -  /* more visually pleasing glyphs in certain cases.                       */
    -  /* This sometimes needs to be coordinated with compatible width rules.   */
    -  /* A value of 1000 corresponds to a scaled value of 1.0.                 */
    -
    -#define X_SCALING_RULES_SIZE  50
    -
    -  static const SPH_ScaleRule  X_SCALING_Rules[X_SCALING_RULES_SIZE] =
    -  {
    -    { "DejaVu Sans", 12, "Regular Class", 'm', 950 },
    -    { "Verdana and Clones", 12, "Regular Class", 'a', 1100 },
    -    { "Verdana and Clones", 13, "Regular Class", 'a', 1050 },
    -    { "Arial", 11, "Regular Class", 'm', 975 },
    -    { "Arial", 12, "Regular Class", 'm', 1050 },
    -    /* Cyrillic small letter el */
    -    { "Arial", 13, "Regular Class", 0x43B, 950 },
    -    { "Arial", 13, "Regular Class", 'o', 950 },
    -    { "Arial", 13, "Regular Class", 'e', 950 },
    -    { "Arial", 14, "Regular Class", 'm', 950 },
    -    /* Cyrillic small letter el */
    -    { "Arial", 15, "Regular Class", 0x43B, 925 },
    -    { "Bitstream Vera Sans", 10, "Regular/Italic Class", 0, 1100 },
    -    { "Bitstream Vera Sans", 12, "Regular/Italic Class", 0, 1050 },
    -    { "Bitstream Vera Sans", 16, "Regular Class", 0, 1050 },
    -    { "Bitstream Vera Sans", 9, "Regular/Italic Class", 0, 1050 },
    -    { "DejaVu Sans", 12, "Regular Class", 'l', 975 },
    -    { "DejaVu Sans", 12, "Regular Class", 'i', 975 },
    -    { "DejaVu Sans", 12, "Regular Class", 'j', 975 },
    -    { "DejaVu Sans", 13, "Regular Class", 'l', 950 },
    -    { "DejaVu Sans", 13, "Regular Class", 'i', 950 },
    -    { "DejaVu Sans", 13, "Regular Class", 'j', 950 },
    -    { "DejaVu Sans", 10, "Regular/Italic Class", 0, 1100 },
    -    { "DejaVu Sans", 12, "Regular/Italic Class", 0, 1050 },
    -    { "Georgia", 10, "", 0, 1050 },
    -    { "Georgia", 11, "", 0, 1100 },
    -    { "Georgia", 12, "", 0, 1025 },
    -    { "Georgia", 13, "", 0, 1050 },
    -    { "Georgia", 16, "", 0, 1050 },
    -    { "Georgia", 17, "", 0, 1030 },
    -    { "Liberation Sans", 12, "Regular Class", 'm', 1100 },
    -    { "Lucida Grande", 11, "Regular Class", 'm', 1100 },
    -    { "Microsoft Sans Serif", 11, "Regular Class", 'm', 950 },
    -    { "Microsoft Sans Serif", 12, "Regular Class", 'm', 1050 },
    -    { "Segoe UI", 12, "Regular Class", 'H', 1050 },
    -    { "Segoe UI", 12, "Regular Class", 'm', 1050 },
    -    { "Segoe UI", 14, "Regular Class", 'm', 1050 },
    -    { "Tahoma", 11, "Regular Class", 'i', 975 },
    -    { "Tahoma", 11, "Regular Class", 'l', 975 },
    -    { "Tahoma", 11, "Regular Class", 'j', 900 },
    -    { "Tahoma", 11, "Regular Class", 'm', 918 },
    -    { "Verdana", 10, "Regular/Italic Class", 0, 1100 },
    -    { "Verdana", 12, "Regular Class", 'm', 975 },
    -    { "Verdana", 12, "Regular/Italic Class", 0, 1050 },
    -    { "Verdana", 13, "Regular/Italic Class", 'i', 950 },
    -    { "Verdana", 13, "Regular/Italic Class", 'j', 950 },
    -    { "Verdana", 13, "Regular/Italic Class", 'l', 950 },
    -    { "Verdana", 16, "Regular Class", 0, 1050 },
    -    { "Verdana", 9, "Regular/Italic Class", 0, 1050 },
    -    { "Times New Roman", 16, "Regular Class", 'm', 918 },
    -    { "Trebuchet MS", 11, "Regular Class", 'm', 800 },
    -    { "Trebuchet MS", 12, "Regular Class", 'm', 800 },
    -  };
    -
    -#else
    -
    -#define COMPATIBLE_WIDTHS_RULES_SIZE  1
    -
    -  static const SPH_TweakRule  COMPATIBLE_WIDTHS_Rules
    -                              [COMPATIBLE_WIDTHS_RULES_SIZE] =
    -  {
    -    { "-", 0, "", 0 },
    -  };
    -
    -
    -#define X_SCALING_RULES_SIZE  1
    -
    -  static const SPH_ScaleRule  X_SCALING_Rules
    -                              [X_SCALING_RULES_SIZE] =
    -  {
    -    { "-", 0, "", 0, 1000 },
    -  };
    -
    -#endif /* FORCE_NATURAL_WIDTHS */
    -
    -
    -  static FT_Bool
    -  is_member_of_family_class( const FT_String*  detected_font_name,
    -                             const FT_String*  rule_font_name )
    -  {
    -    FT_UInt  i, j;
    -
    -
    -    /* Does font name match rule family? */
    -    if ( ft_strcmp( detected_font_name, rule_font_name ) == 0 )
    -      return TRUE;
    -
    -    /* Is font name a wildcard ""? */
    -    if ( ft_strcmp( rule_font_name, "" ) == 0 )
    -      return TRUE;
    -
    -    /* Is font name contained in a class list? */
    -    for ( i = 0; i < FAMILY_CLASS_RULES_SIZE; i++ )
    -    {
    -      if ( ft_strcmp( FAMILY_CLASS_Rules[i].name, rule_font_name ) == 0 )
    -      {
    -        for ( j = 0; j < SPH_MAX_CLASS_MEMBERS; j++ )
    -        {
    -          if ( ft_strcmp( FAMILY_CLASS_Rules[i].member[j], "" ) == 0 )
    -            continue;
    -          if ( ft_strcmp( FAMILY_CLASS_Rules[i].member[j],
    -                          detected_font_name ) == 0 )
    -            return TRUE;
    -        }
    -      }
    -    }
    -
    -    return FALSE;
    -  }
    -
    -
    -  static FT_Bool
    -  is_member_of_style_class( const FT_String*  detected_font_style,
    -                            const FT_String*  rule_font_style )
    -  {
    -    FT_UInt  i, j;
    -
    -
    -    /* Does font style match rule style? */
    -    if ( ft_strcmp( detected_font_style, rule_font_style ) == 0 )
    -      return TRUE;
    -
    -    /* Is font style a wildcard ""? */
    -    if ( ft_strcmp( rule_font_style, "" ) == 0 )
    -      return TRUE;
    -
    -    /* Is font style contained in a class list? */
    -    for ( i = 0; i < STYLE_CLASS_RULES_SIZE; i++ )
    -    {
    -      if ( ft_strcmp( STYLE_CLASS_Rules[i].name, rule_font_style ) == 0 )
    -      {
    -        for ( j = 0; j < SPH_MAX_CLASS_MEMBERS; j++ )
    -        {
    -          if ( ft_strcmp( STYLE_CLASS_Rules[i].member[j], "" ) == 0 )
    -            continue;
    -          if ( ft_strcmp( STYLE_CLASS_Rules[i].member[j],
    -                          detected_font_style ) == 0 )
    -            return TRUE;
    -        }
    -      }
    -    }
    -
    -    return FALSE;
    -  }
    -
    -
    -  FT_LOCAL_DEF( FT_Bool )
    -  sph_test_tweak( TT_Face               face,
    -                  const FT_String*      family,
    -                  FT_UInt               ppem,
    -                  const FT_String*      style,
    -                  FT_UInt               glyph_index,
    -                  const SPH_TweakRule*  rule,
    -                  FT_UInt               num_rules )
    -  {
    -    FT_UInt  i;
    -
    -
    -    /* rule checks may be able to be optimized further */
    -    for ( i = 0; i < num_rules; i++ )
    -    {
    -      if ( family                                                   &&
    -           ( is_member_of_family_class ( family, rule[i].family ) ) )
    -        if ( rule[i].ppem == 0    ||
    -             rule[i].ppem == ppem )
    -          if ( style                                             &&
    -               is_member_of_style_class ( style, rule[i].style ) )
    -            if ( rule[i].glyph == 0                                ||
    -                 FT_Get_Char_Index( (FT_Face)face,
    -                                    rule[i].glyph ) == glyph_index )
    -        return TRUE;
    -    }
    -
    -    return FALSE;
    -  }
    -
    -
    -  static FT_UInt
    -  scale_test_tweak( TT_Face               face,
    -                    const FT_String*      family,
    -                    FT_UInt               ppem,
    -                    const FT_String*      style,
    -                    FT_UInt               glyph_index,
    -                    const SPH_ScaleRule*  rule,
    -                    FT_UInt               num_rules )
    -  {
    -    FT_UInt  i;
    -
    -
    -    /* rule checks may be able to be optimized further */
    -    for ( i = 0; i < num_rules; i++ )
    -    {
    -      if ( family                                                   &&
    -           ( is_member_of_family_class ( family, rule[i].family ) ) )
    -        if ( rule[i].ppem == 0    ||
    -             rule[i].ppem == ppem )
    -          if ( style                                            &&
    -               is_member_of_style_class( style, rule[i].style ) )
    -            if ( rule[i].glyph == 0                                ||
    -                 FT_Get_Char_Index( (FT_Face)face,
    -                                    rule[i].glyph ) == glyph_index )
    -        return rule[i].scale;
    -    }
    -
    -    return 1000;
    -  }
    -
    -
    -  FT_LOCAL_DEF( FT_UInt )
    -  sph_test_tweak_x_scaling( TT_Face           face,
    -                            const FT_String*  family,
    -                            FT_UInt           ppem,
    -                            const FT_String*  style,
    -                            FT_UInt           glyph_index )
    -  {
    -    return scale_test_tweak( face, family, ppem, style, glyph_index,
    -                             X_SCALING_Rules, X_SCALING_RULES_SIZE );
    -  }
    -
    -
    -#define TWEAK_RULES( x )                                       \
    -  if ( sph_test_tweak( face, family, ppem, style, glyph_index, \
    -                       x##_Rules, x##_RULES_SIZE ) )           \
    -    loader->exec->sph_tweak_flags |= SPH_TWEAK_##x
    -
    -#define TWEAK_RULES_EXCEPTIONS( x )                                        \
    -  if ( sph_test_tweak( face, family, ppem, style, glyph_index,             \
    -                       x##_Rules_Exceptions, x##_RULES_EXCEPTIONS_SIZE ) ) \
    -    loader->exec->sph_tweak_flags &= ~SPH_TWEAK_##x
    -
    -
    -  FT_LOCAL_DEF( void )
    -  sph_set_tweaks( TT_Loader  loader,
    -                  FT_UInt    glyph_index )
    -  {
    -    TT_Face     face   = loader->face;
    -    FT_String*  family = face->root.family_name;
    -    FT_UInt     ppem   = loader->size->metrics->x_ppem;
    -    FT_String*  style  = face->root.style_name;
    -
    -
    -    /* don't apply rules if style isn't set */
    -    if ( !face->root.style_name )
    -      return;
    -
    -#ifdef SPH_DEBUG_MORE_VERBOSE
    -    printf( "%s,%d,%s,%c=%d ",
    -            family, ppem, style, glyph_index, glyph_index );
    -#endif
    -
    -    TWEAK_RULES( PIXEL_HINTING );
    -
    -    if ( loader->exec->sph_tweak_flags & SPH_TWEAK_PIXEL_HINTING )
    -    {
    -      loader->exec->ignore_x_mode = FALSE;
    -      return;
    -    }
    -
    -    TWEAK_RULES( ALLOW_X_DMOVE );
    -    TWEAK_RULES( ALWAYS_DO_DELTAP );
    -    TWEAK_RULES( ALWAYS_SKIP_DELTAP );
    -    TWEAK_RULES( DEEMBOLDEN );
    -    TWEAK_RULES( DO_SHPIX );
    -    TWEAK_RULES( EMBOLDEN );
    -    TWEAK_RULES( MIAP_HACK );
    -    TWEAK_RULES( NORMAL_ROUND );
    -    TWEAK_RULES( NO_ALIGNRP_AFTER_IUP );
    -    TWEAK_RULES( NO_CALL_AFTER_IUP );
    -    TWEAK_RULES( NO_DELTAP_AFTER_IUP );
    -    TWEAK_RULES( RASTERIZER_35 );
    -    TWEAK_RULES( SKIP_IUP );
    -
    -    TWEAK_RULES( SKIP_OFFPIXEL_Y_MOVES );
    -    TWEAK_RULES_EXCEPTIONS( SKIP_OFFPIXEL_Y_MOVES );
    -
    -    TWEAK_RULES( SKIP_NONPIXEL_Y_MOVES_DELTAP );
    -
    -    TWEAK_RULES( SKIP_NONPIXEL_Y_MOVES );
    -    TWEAK_RULES_EXCEPTIONS( SKIP_NONPIXEL_Y_MOVES );
    -
    -    TWEAK_RULES( ROUND_NONPIXEL_Y_MOVES );
    -    TWEAK_RULES_EXCEPTIONS( ROUND_NONPIXEL_Y_MOVES );
    -
    -    if ( loader->exec->sph_tweak_flags & SPH_TWEAK_RASTERIZER_35 )
    -    {
    -      if ( loader->exec->rasterizer_version != TT_INTERPRETER_VERSION_35 )
    -      {
    -        loader->exec->rasterizer_version = TT_INTERPRETER_VERSION_35;
    -        loader->exec->size->cvt_ready    = -1;
    -
    -        tt_size_ready_bytecode(
    -          loader->exec->size,
    -          FT_BOOL( loader->load_flags & FT_LOAD_PEDANTIC ) );
    -      }
    -      else
    -        loader->exec->rasterizer_version = TT_INTERPRETER_VERSION_35;
    -    }
    -    else
    -    {
    -      if ( loader->exec->rasterizer_version  !=
    -           SPH_OPTION_SET_RASTERIZER_VERSION )
    -      {
    -        loader->exec->rasterizer_version = SPH_OPTION_SET_RASTERIZER_VERSION;
    -        loader->exec->size->cvt_ready    = -1;
    -
    -        tt_size_ready_bytecode(
    -          loader->exec->size,
    -          FT_BOOL( loader->load_flags & FT_LOAD_PEDANTIC ) );
    -      }
    -      else
    -        loader->exec->rasterizer_version = SPH_OPTION_SET_RASTERIZER_VERSION;
    -    }
    -
    -    if ( IS_HINTED( loader->load_flags ) )
    -    {
    -      TWEAK_RULES( TIMES_NEW_ROMAN_HACK );
    -      TWEAK_RULES( COURIER_NEW_2_HACK );
    -    }
    -
    -    if ( sph_test_tweak( face, family, ppem, style, glyph_index,
    -           COMPATIBILITY_MODE_Rules, COMPATIBILITY_MODE_RULES_SIZE ) )
    -      loader->exec->face->sph_compatibility_mode = TRUE;
    -
    -
    -    if ( IS_HINTED( loader->load_flags ) )
    -    {
    -      if ( sph_test_tweak( face, family, ppem, style, glyph_index,
    -             COMPATIBLE_WIDTHS_Rules, COMPATIBLE_WIDTHS_RULES_SIZE ) )
    -        loader->exec->compatible_widths |= TRUE;
    -    }
    -  }
    -
    -#else /* !(TT_USE_BYTECODE_INTERPRETER &&          */
    -      /*   TT_SUPPORT_SUBPIXEL_HINTING_INFINALITY) */
    -
    -  /* ANSI C doesn't like empty source files */
    -  typedef int  _tt_subpix_dummy;
    -
    -#endif /* !(TT_USE_BYTECODE_INTERPRETER &&          */
    -       /*   TT_SUPPORT_SUBPIXEL_HINTING_INFINALITY) */
    -
    -
    -/* END */
    diff --git a/src/java.desktop/share/native/libfreetype/src/truetype/ttsubpix.h b/src/java.desktop/share/native/libfreetype/src/truetype/ttsubpix.h
    deleted file mode 100644
    index 62af4c272d1..00000000000
    --- a/src/java.desktop/share/native/libfreetype/src/truetype/ttsubpix.h
    +++ /dev/null
    @@ -1,110 +0,0 @@
    -/****************************************************************************
    - *
    - * ttsubpix.h
    - *
    - *   TrueType Subpixel Hinting.
    - *
    - * Copyright (C) 2010-2023 by
    - * David Turner, Robert Wilhelm, and Werner Lemberg.
    - *
    - * This file is part of the FreeType project, and may only be used,
    - * modified, and distributed under the terms of the FreeType project
    - * license, LICENSE.TXT.  By continuing to use, modify, or distribute
    - * this file you indicate that you have read the license and
    - * understand and accept it fully.
    - *
    - */
    -
    -
    -#ifndef TTSUBPIX_H_
    -#define TTSUBPIX_H_
    -
    -#include "ttobjs.h"
    -#include "ttinterp.h"
    -
    -
    -FT_BEGIN_HEADER
    -
    -
    -#ifdef TT_SUPPORT_SUBPIXEL_HINTING_INFINALITY
    -
    -  /**************************************************************************
    -   *
    -   * ID flags to identify special functions at FDEF and runtime.
    -   *
    -   */
    -#define SPH_FDEF_INLINE_DELTA_1       0x0000001
    -#define SPH_FDEF_INLINE_DELTA_2       0x0000002
    -#define SPH_FDEF_DIAGONAL_STROKE      0x0000004
    -#define SPH_FDEF_VACUFORM_ROUND_1     0x0000008
    -#define SPH_FDEF_TTFAUTOHINT_1        0x0000010
    -#define SPH_FDEF_SPACING_1            0x0000020
    -#define SPH_FDEF_SPACING_2            0x0000040
    -#define SPH_FDEF_TYPEMAN_STROKES      0x0000080
    -#define SPH_FDEF_TYPEMAN_DIAGENDCTRL  0x0000100
    -
    -
    -  /**************************************************************************
    -   *
    -   * Tweak flags that are set for each glyph by the below rules.
    -   *
    -   */
    -#define SPH_TWEAK_ALLOW_X_DMOVE                   0x0000001UL
    -#define SPH_TWEAK_ALWAYS_DO_DELTAP                0x0000002UL
    -#define SPH_TWEAK_ALWAYS_SKIP_DELTAP              0x0000004UL
    -#define SPH_TWEAK_COURIER_NEW_2_HACK              0x0000008UL
    -#define SPH_TWEAK_DEEMBOLDEN                      0x0000010UL
    -#define SPH_TWEAK_DO_SHPIX                        0x0000020UL
    -#define SPH_TWEAK_EMBOLDEN                        0x0000040UL
    -#define SPH_TWEAK_MIAP_HACK                       0x0000080UL
    -#define SPH_TWEAK_NORMAL_ROUND                    0x0000100UL
    -#define SPH_TWEAK_NO_ALIGNRP_AFTER_IUP            0x0000200UL
    -#define SPH_TWEAK_NO_CALL_AFTER_IUP               0x0000400UL
    -#define SPH_TWEAK_NO_DELTAP_AFTER_IUP             0x0000800UL
    -#define SPH_TWEAK_PIXEL_HINTING                   0x0001000UL
    -#define SPH_TWEAK_RASTERIZER_35                   0x0002000UL
    -#define SPH_TWEAK_ROUND_NONPIXEL_Y_MOVES          0x0004000UL
    -#define SPH_TWEAK_SKIP_IUP                        0x0008000UL
    -#define SPH_TWEAK_SKIP_NONPIXEL_Y_MOVES           0x0010000UL
    -#define SPH_TWEAK_SKIP_OFFPIXEL_Y_MOVES           0x0020000UL
    -#define SPH_TWEAK_TIMES_NEW_ROMAN_HACK            0x0040000UL
    -#define SPH_TWEAK_SKIP_NONPIXEL_Y_MOVES_DELTAP    0x0080000UL
    -
    -
    -  FT_LOCAL( FT_Bool )
    -  sph_test_tweak( TT_Face               face,
    -                  const FT_String*      family,
    -                  FT_UInt               ppem,
    -                  const FT_String*      style,
    -                  FT_UInt               glyph_index,
    -                  const SPH_TweakRule*  rule,
    -                  FT_UInt               num_rules );
    -
    -  FT_LOCAL( FT_UInt )
    -  sph_test_tweak_x_scaling( TT_Face           face,
    -                            const FT_String*  family,
    -                            FT_UInt           ppem,
    -                            const FT_String*  style,
    -                            FT_UInt           glyph_index );
    -
    -  FT_LOCAL( void )
    -  sph_set_tweaks( TT_Loader  loader,
    -                  FT_UInt    glyph_index );
    -
    -
    -  /* These macros are defined absent a method for setting them */
    -#define SPH_OPTION_BITMAP_WIDTHS           FALSE
    -#define SPH_OPTION_SET_SUBPIXEL            TRUE
    -#define SPH_OPTION_SET_GRAYSCALE           FALSE
    -#define SPH_OPTION_SET_COMPATIBLE_WIDTHS   FALSE
    -#define SPH_OPTION_SET_RASTERIZER_VERSION  38
    -
    -#endif /* TT_SUPPORT_SUBPIXEL_HINTING_INFINALITY */
    -
    -
    -FT_END_HEADER
    -
    -#endif /* TTSUBPIX_H_ */
    -
    -
    -/* END */
    diff --git a/src/java.desktop/share/native/libfreetype/src/type1/t1afm.c b/src/java.desktop/share/native/libfreetype/src/type1/t1afm.c
    index a63cd4dc48a..b1a0d23bed6 100644
    --- a/src/java.desktop/share/native/libfreetype/src/type1/t1afm.c
    +++ b/src/java.desktop/share/native/libfreetype/src/type1/t1afm.c
    @@ -4,7 +4,7 @@
      *
      *   AFM support for Type 1 fonts (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/type1/t1afm.h b/src/java.desktop/share/native/libfreetype/src/type1/t1afm.h
    index 7f5cdda191f..92ff627dd0d 100644
    --- a/src/java.desktop/share/native/libfreetype/src/type1/t1afm.h
    +++ b/src/java.desktop/share/native/libfreetype/src/type1/t1afm.h
    @@ -4,7 +4,7 @@
      *
      *   AFM support for Type 1 fonts (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/type1/t1driver.c b/src/java.desktop/share/native/libfreetype/src/type1/t1driver.c
    index 8ed01914a5a..5ded7714021 100644
    --- a/src/java.desktop/share/native/libfreetype/src/type1/t1driver.c
    +++ b/src/java.desktop/share/native/libfreetype/src/type1/t1driver.c
    @@ -4,7 +4,7 @@
      *
      *   Type 1 driver interface (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/type1/t1driver.h b/src/java.desktop/share/native/libfreetype/src/type1/t1driver.h
    index 5ff52b55b1a..1cc3d24e7dd 100644
    --- a/src/java.desktop/share/native/libfreetype/src/type1/t1driver.h
    +++ b/src/java.desktop/share/native/libfreetype/src/type1/t1driver.h
    @@ -4,7 +4,7 @@
      *
      *   High-level Type 1 driver interface (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/type1/t1errors.h b/src/java.desktop/share/native/libfreetype/src/type1/t1errors.h
    index 8aeb24ae188..46bddbc30fd 100644
    --- a/src/java.desktop/share/native/libfreetype/src/type1/t1errors.h
    +++ b/src/java.desktop/share/native/libfreetype/src/type1/t1errors.h
    @@ -4,7 +4,7 @@
      *
      *   Type 1 error codes (specification only).
      *
    - * Copyright (C) 2001-2024 by
    + * Copyright (C) 2001-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/type1/t1gload.c b/src/java.desktop/share/native/libfreetype/src/type1/t1gload.c
    index c29e682510c..b9bc0b56ce8 100644
    --- a/src/java.desktop/share/native/libfreetype/src/type1/t1gload.c
    +++ b/src/java.desktop/share/native/libfreetype/src/type1/t1gload.c
    @@ -4,7 +4,7 @@
      *
      *   Type 1 Glyph Loader (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -70,8 +70,13 @@
         /* For incremental fonts get the character data using the */
         /* callback function.                                     */
         if ( inc )
    +    {
    +      /* So `free_glyph_data` knows whether to free it. */
    +      char_string->pointer = NULL;
    +
           error = inc->funcs->get_glyph_data( inc->object,
                                               glyph_index, char_string );
    +    }
         else
     
     #endif /* FT_CONFIG_OPTION_INCREMENTAL */
    @@ -155,6 +160,9 @@
           decoder->builder.advance.y      = INT_TO_FIXED( metrics.advance_v );
         }
     
    +    if ( error && inc )
    +      inc->funcs->free_glyph_data( inc->object, char_string );
    +
     #endif /* FT_CONFIG_OPTION_INCREMENTAL */
     
         return error;
    @@ -295,7 +303,7 @@
           {
             advances[nn] = 0;
     
    -        FT_TRACE5(( "  idx %d: advance height 0 font units\n",
    +        FT_TRACE5(( "  idx %u: advance height 0 font units\n",
                         first + nn ));
           }
     
    @@ -333,7 +341,7 @@
           else
             advances[nn] = 0;
     
    -      FT_TRACE5(( "  idx %d: advance width %ld font unit%s\n",
    +      FT_TRACE5(( "  idx %u: advance width %ld font unit%s\n",
                       first + nn,
                       advances[nn],
                       advances[nn] == 1 ? "" : "s" ));
    @@ -380,7 +388,7 @@
           goto Exit;
         }
     
    -    FT_TRACE1(( "T1_Load_Glyph: glyph index %d\n", glyph_index ));
    +    FT_TRACE1(( "T1_Load_Glyph: glyph index %u\n", glyph_index ));
     
         FT_ASSERT( ( face->len_buildchar == 0 ) == ( face->buildchar == NULL ) );
     
    @@ -398,16 +406,12 @@
           glyph->y_scale = 0x10000L;
         }
     
    -    t1glyph->outline.n_points   = 0;
    -    t1glyph->outline.n_contours = 0;
    -
         hinting = FT_BOOL( !( load_flags & FT_LOAD_NO_SCALE   ) &&
                            !( load_flags & FT_LOAD_NO_HINTING ) );
         scaled  = FT_BOOL( !( load_flags & FT_LOAD_NO_SCALE   ) );
     
         glyph->hint     = hinting;
         glyph->scaled   = scaled;
    -    t1glyph->format = FT_GLYPH_FORMAT_OUTLINE;
     
         error = decoder_funcs->init( &decoder,
                                      t1glyph->face,
    @@ -452,16 +456,12 @@
     
         must_finish_decoder = FALSE;
     
    -    /* now, set the metrics -- this is rather simple, as   */
    -    /* the left side bearing is the xMin, and the top side */
    -    /* bearing the yMax                                    */
         if ( !error )
         {
    -      t1glyph->outline.flags &= FT_OUTLINE_OWNER;
    -      t1glyph->outline.flags |= FT_OUTLINE_REVERSE_FILL;
    -
    -      /* for composite glyphs, return only left side bearing and */
    -      /* advance width                                           */
    +      /* now, set the metrics -- this is rather simple, as   */
    +      /* the left side bearing is the xMin, and the top side */
    +      /* bearing the yMax; for composite glyphs, return only */
    +      /* left side bearing and advance width                 */
           if ( load_flags & FT_LOAD_NO_RECURSE )
           {
             FT_Slot_Internal  internal = t1glyph->internal;
    @@ -482,6 +482,13 @@
             FT_Glyph_Metrics*  metrics = &t1glyph->metrics;
     
     
    +        t1glyph->format = FT_GLYPH_FORMAT_OUTLINE;
    +
    +        t1glyph->outline.flags &= FT_OUTLINE_OWNER;
    +        t1glyph->outline.flags |= FT_OUTLINE_REVERSE_FILL;
    +        if ( t1size && t1size->metrics.y_ppem < 24 )
    +          t1glyph->outline.flags |= FT_OUTLINE_HIGH_PRECISION;
    +
             /* copy the _unscaled_ advance width */
             metrics->horiAdvance =
               FIXED_TO_INT( decoder.builder.advance.x );
    @@ -504,11 +511,6 @@
                 FIXED_TO_INT( decoder.builder.advance.y );
             }
     
    -        t1glyph->format = FT_GLYPH_FORMAT_OUTLINE;
    -
    -        if ( t1size && t1size->metrics.y_ppem < 24 )
    -          t1glyph->outline.flags |= FT_OUTLINE_HIGH_PRECISION;
    -
     #if 1
             /* apply the font matrix, if any */
             if ( font_matrix.xx != 0x10000L || font_matrix.yy != 0x10000L ||
    diff --git a/src/java.desktop/share/native/libfreetype/src/type1/t1gload.h b/src/java.desktop/share/native/libfreetype/src/type1/t1gload.h
    index 17a6a5941e3..6bedd132c5f 100644
    --- a/src/java.desktop/share/native/libfreetype/src/type1/t1gload.h
    +++ b/src/java.desktop/share/native/libfreetype/src/type1/t1gload.h
    @@ -4,7 +4,7 @@
      *
      *   Type 1 Glyph Loader (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/type1/t1load.c b/src/java.desktop/share/native/libfreetype/src/type1/t1load.c
    index ee7fb42a517..0f11445bef0 100644
    --- a/src/java.desktop/share/native/libfreetype/src/type1/t1load.c
    +++ b/src/java.desktop/share/native/libfreetype/src/type1/t1load.c
    @@ -4,7 +4,7 @@
      *
      *   Type 1 font loader (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -471,7 +471,7 @@
         nc = num_coords;
         if ( num_coords > blend->num_axis )
         {
    -      FT_TRACE2(( "T1_Get_MM_Blend: only using first %d of %d coordinates\n",
    +      FT_TRACE2(( "T1_Get_MM_Blend: only using first %u of %u coordinates\n",
                       blend->num_axis, num_coords ));
           nc = blend->num_axis;
         }
    @@ -640,7 +640,7 @@
       {
         FT_UNUSED( instance_index );
     
    -    return T1_Set_MM_Blend( face, 0, NULL );
    +    return T1_Set_MM_WeightVector( face, 0, NULL );
       }
     
     
    @@ -691,7 +691,7 @@
         if ( num_coords > blend->num_axis )
         {
           FT_TRACE2(( "T1_Get_Var_Design:"
    -                  " only using first %d of %d coordinates\n",
    +                  " only using first %u of %u coordinates\n",
                       blend->num_axis, num_coords ));
           nc = blend->num_axis;
         }
    diff --git a/src/java.desktop/share/native/libfreetype/src/type1/t1load.h b/src/java.desktop/share/native/libfreetype/src/type1/t1load.h
    index a45efa7cb7b..2cd8241968d 100644
    --- a/src/java.desktop/share/native/libfreetype/src/type1/t1load.h
    +++ b/src/java.desktop/share/native/libfreetype/src/type1/t1load.h
    @@ -4,7 +4,7 @@
      *
      *   Type 1 font loader (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/type1/t1objs.c b/src/java.desktop/share/native/libfreetype/src/type1/t1objs.c
    index b1b27c31fe3..7f25208f875 100644
    --- a/src/java.desktop/share/native/libfreetype/src/type1/t1objs.c
    +++ b/src/java.desktop/share/native/libfreetype/src/type1/t1objs.c
    @@ -4,7 +4,7 @@
      *
      *   Type 1 objects manager (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/type1/t1objs.h b/src/java.desktop/share/native/libfreetype/src/type1/t1objs.h
    index 3809370c1e0..6c71977c154 100644
    --- a/src/java.desktop/share/native/libfreetype/src/type1/t1objs.h
    +++ b/src/java.desktop/share/native/libfreetype/src/type1/t1objs.h
    @@ -4,7 +4,7 @@
      *
      *   Type 1 objects manager (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/type1/t1parse.c b/src/java.desktop/share/native/libfreetype/src/type1/t1parse.c
    index 3717ea7c572..ef643e298f4 100644
    --- a/src/java.desktop/share/native/libfreetype/src/type1/t1parse.c
    +++ b/src/java.desktop/share/native/libfreetype/src/type1/t1parse.c
    @@ -4,7 +4,7 @@
      *
      *   Type 1 parser (body).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/type1/t1parse.h b/src/java.desktop/share/native/libfreetype/src/type1/t1parse.h
    index a0a2134d45c..f4ad426e9e1 100644
    --- a/src/java.desktop/share/native/libfreetype/src/type1/t1parse.h
    +++ b/src/java.desktop/share/native/libfreetype/src/type1/t1parse.h
    @@ -4,7 +4,7 @@
      *
      *   Type 1 parser (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    diff --git a/src/java.desktop/share/native/libfreetype/src/type1/t1tokens.h b/src/java.desktop/share/native/libfreetype/src/type1/t1tokens.h
    index 5a3d2f1ef08..a526406a411 100644
    --- a/src/java.desktop/share/native/libfreetype/src/type1/t1tokens.h
    +++ b/src/java.desktop/share/native/libfreetype/src/type1/t1tokens.h
    @@ -4,7 +4,7 @@
      *
      *   Type 1 tokenizer (specification).
      *
    - * Copyright (C) 1996-2024 by
    + * Copyright (C) 1996-2025 by
      * David Turner, Robert Wilhelm, and Werner Lemberg.
      *
      * This file is part of the FreeType project, and may only be used,
    @@ -33,7 +33,7 @@
                        T1_FIELD_DICT_FONTDICT )
     
       /* we use pointers to detect modifications made by synthetic fonts */
    -  T1_FIELD_NUM   ( "ItalicAngle",        italic_angle,
    +  T1_FIELD_FIXED ( "ItalicAngle",        italic_angle,
                        T1_FIELD_DICT_FONTDICT )
       T1_FIELD_BOOL  ( "isFixedPitch",       is_fixed_pitch,
                        T1_FIELD_DICT_FONTDICT )
    diff --git a/src/java.desktop/share/native/libharfbuzz/hb-aat-layout-common.hh b/src/java.desktop/share/native/libharfbuzz/hb-aat-layout-common.hh
    index d2ce32616be..4bb41fd0189 100644
    --- a/src/java.desktop/share/native/libharfbuzz/hb-aat-layout-common.hh
    +++ b/src/java.desktop/share/native/libharfbuzz/hb-aat-layout-common.hh
    @@ -1266,8 +1266,7 @@ struct StateTableDriver
                                       next_state == StateTableT::STATE_START_OF_TEXT &&
                                       start_state_safe_to_break_eot &&
                                       is_not_actionable &&
    -                                  is_not_epsilon_transition &&
    -                                  !last_range;
    +                                  is_not_epsilon_transition;
     
             if (is_null_transition)
             {
    diff --git a/src/java.desktop/share/native/libharfbuzz/hb-ot-layout-common.hh b/src/java.desktop/share/native/libharfbuzz/hb-ot-layout-common.hh
    index dcacc9cb86c..6b62732bf54 100644
    --- a/src/java.desktop/share/native/libharfbuzz/hb-ot-layout-common.hh
    +++ b/src/java.desktop/share/native/libharfbuzz/hb-ot-layout-common.hh
    @@ -2480,7 +2480,7 @@ struct VarRegionAxis
         /* TODO Move these to sanitize(). */
         if (unlikely (start > peak || peak > end))
           return 1.f;
    -    if (unlikely (start < 0 && end > 0 && peak != 0))
    +    if (unlikely (start < 0 && end > 0))
           return 1.f;
     
         if (coord <= start || end <= coord)
    diff --git a/src/java.desktop/share/native/libmlib_image/mlib_ImageConv_16nw.c b/src/java.desktop/share/native/libmlib_image/mlib_ImageConv_16nw.c
    index 2e035d12453..26686e59b0b 100644
    --- a/src/java.desktop/share/native/libmlib_image/mlib_ImageConv_16nw.c
    +++ b/src/java.desktop/share/native/libmlib_image/mlib_ImageConv_16nw.c
    @@ -1,5 +1,5 @@
     /*
    - * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
    + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
      * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      *
      * This code is free software; you can redistribute it and/or modify it
    @@ -286,13 +286,14 @@ static mlib_status mlib_ImageConv1xN(mlib_image       *dst,
             pk = k + off;
             sp = sl0;
     
    -        k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    -        p2 = sp[0]; p3 = sp[sll]; p4 = sp[2*sll];
    -
             dp = dl;
             kh = n - off;
     
             if (kh == 4) {
    +
    +          k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +          p2 = sp[0]; p3 = sp[sll]; p4 = sp[2*sll];
    +
               sp += 3*sll;
     
               for (j = 0; j <= (hsize - 2); j += 2) {
    @@ -325,6 +326,10 @@ static mlib_status mlib_ImageConv1xN(mlib_image       *dst,
               }
     
             } else if (kh == 3) {
    +
    +          k0 = pk[0]; k1 = pk[1]; k2 = pk[2];
    +          p2 = sp[0]; p3 = sp[sll];
    +
               sp += 2*sll;
     
               for (j = 0; j <= (hsize - 2); j += 2) {
    @@ -357,6 +362,10 @@ static mlib_status mlib_ImageConv1xN(mlib_image       *dst,
               }
     
             } else if (kh == 2) {
    +
    +          k0 = pk[0]; k1 = pk[1];
    +          p2 = sp[0];
    +
               sp += sll;
     
               for (j = 0; j <= (hsize - 2); j += 2) {
    @@ -389,6 +398,9 @@ static mlib_status mlib_ImageConv1xN(mlib_image       *dst,
               }
     
             } else /* if (kh == 1) */ {
    +
    +          k0 = pk[0];
    +
               for (j = 0; j < hsize; j++) {
                 p0 = sp[0];
     
    @@ -536,15 +548,14 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
               sp = sl;
               dp = dl;
     
    -          p2 = buff[0]; p3 = buff[1]; p4 = buff[2];
    -          p5 = buff[3]; p6 = buff[4]; p7 = buff[5];
    -
    -          k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    -          k4 = pk[4]; k5 = pk[5]; k6 = pk[6];
    -          pk += kw;
    -
               if (kw == 7) {
     
    +            p2 = buff[0]; p3 = buff[1]; p4 = buff[2];
    +            p5 = buff[3]; p6 = buff[4]; p7 = buff[5];
    +
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +            k4 = pk[4]; k5 = pk[5]; k6 = pk[6];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2; p1 = p3; p2 = p4; p3 = p5; p4 = p6; p5 = p7;
    @@ -583,6 +594,12 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
     
               } else if (kw == 6) {
     
    +            p2 = buff[0]; p3 = buff[1]; p4 = buff[2];
    +            p5 = buff[3]; p6 = buff[4];
    +
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +            k4 = pk[4]; k5 = pk[5];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2; p1 = p3; p2 = p4; p3 = p5; p4 = p6;
    @@ -618,6 +635,12 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
     
               } else if (kw == 5) {
     
    +            p2 = buff[0]; p3 = buff[1]; p4 = buff[2];
    +            p5 = buff[3];
    +
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +            k4 = pk[4];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2; p1 = p3; p2 = p4; p3 = p5;
    @@ -653,6 +676,10 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
     
               } else if (kw == 4) {
     
    +            p2 = buff[0]; p3 = buff[1]; p4 = buff[2];
    +
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2; p1 = p3; p2 = p4;
    @@ -688,6 +715,10 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
     
               } else if (kw == 3) {
     
    +            p2 = buff[0]; p3 = buff[1];
    +
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2; p1 = p3;
    @@ -723,6 +754,10 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
     
               } else /*if (kw == 2)*/ {
     
    +            p2 = buff[0];
    +
    +            k0 = pk[0]; k1 = pk[1];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2;
    @@ -756,6 +791,8 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
                   }
                 }
               }
    +
    +          pk += kw;
             }
           }
     
    @@ -882,17 +919,16 @@ mlib_status CONV_FUNC_I(MxN)(mlib_image       *dst,
                 if (kw > MAX_KER) kw = kw/2;
               off += kw;
     
    -          p2 = sp[0]; p3 = sp[chan1]; p4 = sp[chan2];
    -          p5 = sp[chan2 + chan1]; p6 = sp[chan2 + chan2]; p7 = sp[5*chan1];
    -
    -          k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    -          k4 = pk[4]; k5 = pk[5]; k6 = pk[6];
    -          pk += kw;
    -
    -          sp += (kw - 1)*chan1;
    -
               if (kw == 7) {
     
    +            p2 = sp[0]; p3 = sp[chan1]; p4 = sp[chan2];
    +            p5 = sp[chan2 + chan1]; p6 = sp[chan2 + chan2]; p7 = sp[5*chan1];
    +
    +            sp += (kw - 1)*chan1;
    +
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +            k4 = pk[4]; k5 = pk[5]; k6 = pk[6];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2; p1 = p3; p2 = p4; p3 = p5; p4 = p6; p5 = p7;
    @@ -927,6 +963,14 @@ mlib_status CONV_FUNC_I(MxN)(mlib_image       *dst,
     
               } else if (kw == 6) {
     
    +            p2 = sp[0]; p3 = sp[chan1]; p4 = sp[chan2];
    +            p5 = sp[chan2 + chan1]; p6 = sp[chan2 + chan2];
    +
    +            sp += (kw - 1)*chan1;
    +
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +            k4 = pk[4]; k5 = pk[5];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2; p1 = p3; p2 = p4; p3 = p5; p4 = p6;
    @@ -961,6 +1005,14 @@ mlib_status CONV_FUNC_I(MxN)(mlib_image       *dst,
     
               } else if (kw == 5) {
     
    +            p2 = sp[0]; p3 = sp[chan1]; p4 = sp[chan2];
    +            p5 = sp[chan2 + chan1];
    +
    +            sp += (kw - 1)*chan1;
    +
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +            k4 = pk[4];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2; p1 = p3; p2 = p4; p3 = p5;
    @@ -995,6 +1047,12 @@ mlib_status CONV_FUNC_I(MxN)(mlib_image       *dst,
     
               } else if (kw == 4) {
     
    +            p2 = sp[0]; p3 = sp[chan1]; p4 = sp[chan2];
    +
    +            sp += (kw - 1)*chan1;
    +
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2; p1 = p3; p2 = p4;
    @@ -1029,6 +1087,12 @@ mlib_status CONV_FUNC_I(MxN)(mlib_image       *dst,
     
               } else if (kw == 3) {
     
    +            p2 = sp[0]; p3 = sp[chan1];
    +
    +            sp += (kw - 1)*chan1;
    +
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2; p1 = p3;
    @@ -1063,6 +1127,12 @@ mlib_status CONV_FUNC_I(MxN)(mlib_image       *dst,
     
               } else if (kw == 2) {
     
    +            p2 = sp[0];
    +
    +            sp += (kw - 1)*chan1;
    +
    +            k0 = pk[0]; k1 = pk[1];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2;
    @@ -1097,6 +1167,10 @@ mlib_status CONV_FUNC_I(MxN)(mlib_image       *dst,
     
               } else /*if (kw == 1)*/ {
     
    +            k0 = pk[0];
    +
    +            sp += (kw - 1)*chan1;
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = sp[0];
    @@ -1127,6 +1201,8 @@ mlib_status CONV_FUNC_I(MxN)(mlib_image       *dst,
                   }
                 }
               }
    +
    +        pk += kw;
             }
           }
     
    diff --git a/src/java.desktop/share/native/libmlib_image/mlib_ImageConv_32nw.c b/src/java.desktop/share/native/libmlib_image/mlib_ImageConv_32nw.c
    index bb264d9dcd2..b2ee9742213 100644
    --- a/src/java.desktop/share/native/libmlib_image/mlib_ImageConv_32nw.c
    +++ b/src/java.desktop/share/native/libmlib_image/mlib_ImageConv_32nw.c
    @@ -1,5 +1,5 @@
     /*
    - * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
    + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
      * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      *
      * This code is free software; you can redistribute it and/or modify it
    @@ -158,13 +158,14 @@ static mlib_status mlib_ImageConv1xN(mlib_image       *dst,
             pk = k + off;
             sp = sl0;
     
    -        k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    -        p2 = sp[0]; p3 = sp[sll]; p4 = sp[2*sll];
    -
             dp = dl;
             kh = n - off;
     
             if (kh == 4) {
    +
    +          k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +          p2 = sp[0]; p3 = sp[sll]; p4 = sp[2*sll];
    +
               sp += 3*sll;
     
               for (j = 0; j <= (hsize - 2); j += 2) {
    @@ -195,6 +196,10 @@ static mlib_status mlib_ImageConv1xN(mlib_image       *dst,
               }
     
             } else if (kh == 3) {
    +
    +          k0 = pk[0]; k1 = pk[1]; k2 = pk[2];
    +          p2 = sp[0]; p3 = sp[sll];
    +
               sp += 2*sll;
     
               for (j = 0; j <= (hsize - 2); j += 2) {
    @@ -225,6 +230,10 @@ static mlib_status mlib_ImageConv1xN(mlib_image       *dst,
               }
     
             } else if (kh == 2) {
    +
    +          k0 = pk[0]; k1 = pk[1];
    +          p2 = sp[0];
    +
               sp += sll;
     
               for (j = 0; j <= (hsize - 2); j += 2) {
    @@ -255,6 +264,9 @@ static mlib_status mlib_ImageConv1xN(mlib_image       *dst,
               }
     
             } else /* if (kh == 1) */ {
    +
    +          k0 = pk[0];
    +
               for (j = 0; j < hsize; j++) {
                 p0 = sp[0];
     
    @@ -400,15 +412,14 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
               sp = sl;
               dp = dl;
     
    -          p2 = buff[0]; p3 = buff[1]; p4 = buff[2];
    -          p5 = buff[3]; p6 = buff[4]; p7 = buff[5];
    -
    -          k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    -          k4 = pk[4]; k5 = pk[5]; k6 = pk[6];
    -          pk += kw;
    -
               if (kw == 7) {
     
    +            p2 = buff[0]; p3 = buff[1]; p4 = buff[2];
    +            p5 = buff[3]; p6 = buff[4]; p7 = buff[5];
    +
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +            k4 = pk[4]; k5 = pk[5]; k6 = pk[6];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2; p1 = p3; p2 = p4; p3 = p5; p4 = p6; p5 = p7;
    @@ -444,6 +455,12 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
     
               } else if (kw == 6) {
     
    +            p2 = buff[0]; p3 = buff[1]; p4 = buff[2];
    +            p5 = buff[3]; p6 = buff[4];
    +
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +            k4 = pk[4]; k5 = pk[5];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2; p1 = p3; p2 = p4; p3 = p5; p4 = p6;
    @@ -479,6 +496,12 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
     
               } else if (kw == 5) {
     
    +            p2 = buff[0]; p3 = buff[1]; p4 = buff[2];
    +            p5 = buff[3];
    +
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +            k4 = pk[4];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2; p1 = p3; p2 = p4; p3 = p5;
    @@ -514,6 +537,10 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
     
               } else if (kw == 4) {
     
    +            p2 = buff[0]; p3 = buff[1]; p4 = buff[2];
    +
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2; p1 = p3; p2 = p4;
    @@ -549,6 +576,10 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
     
               } else if (kw == 3) {
     
    +            p2 = buff[0]; p3 = buff[1];
    +
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2; p1 = p3;
    @@ -584,6 +615,10 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
     
               } else { /* kw == 2 */
     
    +            p2 = buff[0];
    +
    +            k0 = pk[0]; k1 = pk[1];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2;
    @@ -617,6 +652,8 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
                   }
                 }
               }
    +
    +          pk += kw;
             }
           }
     
    diff --git a/src/java.desktop/share/native/libmlib_image/mlib_ImageConv_8nw.c b/src/java.desktop/share/native/libmlib_image/mlib_ImageConv_8nw.c
    index c144404b0f4..ce7cac00f72 100644
    --- a/src/java.desktop/share/native/libmlib_image/mlib_ImageConv_8nw.c
    +++ b/src/java.desktop/share/native/libmlib_image/mlib_ImageConv_8nw.c
    @@ -1,5 +1,5 @@
     /*
    - * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
    + * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
      * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      *
      * This code is free software; you can redistribute it and/or modify it
    @@ -287,13 +287,14 @@ static mlib_status mlib_ImageConv1xN(mlib_image       *dst,
             pk = k + off;
             sp = sl0;
     
    -        k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    -        p2 = sp[0]; p3 = sp[sll]; p4 = sp[2*sll];
    -
             dp = dl;
             kh = n - off;
     
             if (kh == 4) {
    +
    +          k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +          p2 = sp[0]; p3 = sp[sll]; p4 = sp[2*sll];
    +
               sp += 3*sll;
     
               for (j = 0; j <= (hsize - 2); j += 2) {
    @@ -326,6 +327,10 @@ static mlib_status mlib_ImageConv1xN(mlib_image       *dst,
               }
     
             } else if (kh == 3) {
    +
    +          k0 = pk[0]; k1 = pk[1]; k2 = pk[2];
    +          p2 = sp[0]; p3 = sp[sll];
    +
               sp += 2*sll;
     
               for (j = 0; j <= (hsize - 2); j += 2) {
    @@ -358,6 +363,10 @@ static mlib_status mlib_ImageConv1xN(mlib_image       *dst,
               }
     
             } else if (kh == 2) {
    +
    +          k0 = pk[0]; k1 = pk[1];
    +          p2 = sp[0];
    +
               sp += sll;
     
               for (j = 0; j <= (hsize - 2); j += 2) {
    @@ -390,6 +399,9 @@ static mlib_status mlib_ImageConv1xN(mlib_image       *dst,
               }
     
             } else /* if (kh == 1) */ {
    +
    +          k0 = pk[0];
    +
               for (j = 0; j < hsize; j++) {
                 p0 = sp[0];
     
    @@ -537,15 +549,14 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
               sp = sl;
               dp = dl;
     
    -          p2 = buff[0]; p3 = buff[1]; p4 = buff[2];
    -          p5 = buff[3]; p6 = buff[4]; p7 = buff[5];
    -
    -          k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    -          k4 = pk[4]; k5 = pk[5]; k6 = pk[6];
    -          pk += kw;
    -
               if (kw == 7) {
     
    +            p2 = buff[0]; p3 = buff[1]; p4 = buff[2];
    +            p5 = buff[3]; p6 = buff[4]; p7 = buff[5];
    +
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +            k4 = pk[4]; k5 = pk[5]; k6 = pk[6];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2; p1 = p3; p2 = p4; p3 = p5; p4 = p6; p5 = p7;
    @@ -584,6 +595,12 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
     
               } else if (kw == 6) {
     
    +            p2 = buff[0]; p3 = buff[1]; p4 = buff[2];
    +            p5 = buff[3]; p6 = buff[4];
    +
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +            k4 = pk[4]; k5 = pk[5];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2; p1 = p3; p2 = p4; p3 = p5; p4 = p6;
    @@ -619,6 +636,12 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
     
               } else if (kw == 5) {
     
    +            p2 = buff[0]; p3 = buff[1]; p4 = buff[2];
    +            p5 = buff[3];
    +
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +            k4 = pk[4];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2; p1 = p3; p2 = p4; p3 = p5;
    @@ -654,6 +677,10 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
     
               } else if (kw == 4) {
     
    +            p2 = buff[0]; p3 = buff[1]; p4 = buff[2];
    +
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2; p1 = p3; p2 = p4;
    @@ -689,6 +716,10 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
     
               } else if (kw == 3) {
     
    +            p2 = buff[0]; p3 = buff[1];
    +
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2; p1 = p3;
    @@ -724,6 +755,10 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
     
               } else /*if (kw == 2)*/ {
     
    +            p2 = buff[0];
    +
    +            k0 = pk[0]; k1 = pk[1];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2;
    @@ -757,6 +792,8 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
                   }
                 }
               }
    +
    +          pk += kw;
             }
           }
     
    @@ -883,17 +920,16 @@ mlib_status CONV_FUNC_I(MxN)(mlib_image       *dst,
                 if (kw > MAX_KER) kw = kw/2;
               off += kw;
     
    -          p2 = sp[0]; p3 = sp[chan1]; p4 = sp[chan2];
    -          p5 = sp[chan2 + chan1]; p6 = sp[chan2 + chan2]; p7 = sp[5*chan1];
    -
    -          k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    -          k4 = pk[4]; k5 = pk[5]; k6 = pk[6];
    -          pk += kw;
    -
    -          sp += (kw - 1)*chan1;
    -
               if (kw == 7) {
     
    +            p2 = sp[0]; p3 = sp[chan1]; p4 = sp[chan2];
    +            p5 = sp[chan2 + chan1]; p6 = sp[chan2 + chan2]; p7 = sp[5*chan1];
    +
    +            sp += (kw - 1)*chan1;
    +
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +            k4 = pk[4]; k5 = pk[5]; k6 = pk[6];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2; p1 = p3; p2 = p4; p3 = p5; p4 = p6; p5 = p7;
    @@ -928,6 +964,14 @@ mlib_status CONV_FUNC_I(MxN)(mlib_image       *dst,
     
               } else if (kw == 6) {
     
    +            p2 = sp[0]; p3 = sp[chan1]; p4 = sp[chan2];
    +            p5 = sp[chan2 + chan1]; p6 = sp[chan2 + chan2];
    +
    +            sp += (kw - 1)*chan1;
    +
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +            k4 = pk[4]; k5 = pk[5];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2; p1 = p3; p2 = p4; p3 = p5; p4 = p6;
    @@ -962,6 +1006,14 @@ mlib_status CONV_FUNC_I(MxN)(mlib_image       *dst,
     
               } else if (kw == 5) {
     
    +            p2 = sp[0]; p3 = sp[chan1]; p4 = sp[chan2];
    +            p5 = sp[chan2 + chan1];
    +
    +            sp += (kw - 1)*chan1;
    +
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +            k4 = pk[4];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2; p1 = p3; p2 = p4; p3 = p5;
    @@ -996,6 +1048,12 @@ mlib_status CONV_FUNC_I(MxN)(mlib_image       *dst,
     
               } else if (kw == 4) {
     
    +            p2 = sp[0]; p3 = sp[chan1]; p4 = sp[chan2];
    +
    +            sp += (kw - 1)*chan1;
    +
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2; p1 = p3; p2 = p4;
    @@ -1030,6 +1088,12 @@ mlib_status CONV_FUNC_I(MxN)(mlib_image       *dst,
     
               } else if (kw == 3) {
     
    +            p2 = sp[0]; p3 = sp[chan1];
    +
    +            sp += (kw - 1)*chan1;
    +
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2; p1 = p3;
    @@ -1064,6 +1128,12 @@ mlib_status CONV_FUNC_I(MxN)(mlib_image       *dst,
     
               } else if (kw == 2) {
     
    +            p2 = sp[0];
    +
    +            sp += (kw - 1)*chan1;
    +
    +            k0 = pk[0]; k1 = pk[1];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2;
    @@ -1098,6 +1168,10 @@ mlib_status CONV_FUNC_I(MxN)(mlib_image       *dst,
     
               } else /*if (kw == 1)*/ {
     
    +            k0 = pk[0];
    +
    +            sp += (kw - 1)*chan1;
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = sp[0];
    @@ -1128,6 +1202,8 @@ mlib_status CONV_FUNC_I(MxN)(mlib_image       *dst,
                   }
                 }
               }
    +
    +          pk += kw;
             }
           }
     
    diff --git a/src/java.desktop/share/native/libmlib_image/mlib_ImageConv_D64nw.c b/src/java.desktop/share/native/libmlib_image/mlib_ImageConv_D64nw.c
    index 54e11c64a49..389c1b52c1e 100644
    --- a/src/java.desktop/share/native/libmlib_image/mlib_ImageConv_D64nw.c
    +++ b/src/java.desktop/share/native/libmlib_image/mlib_ImageConv_D64nw.c
    @@ -1,5 +1,5 @@
     /*
    - * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
    + * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
      * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      *
      * This code is free software; you can redistribute it and/or modify it
    @@ -150,13 +150,14 @@ static mlib_status mlib_ImageConv1xN(mlib_image       *dst,
             pk = k + off;
             sp = sl0;
     
    -        k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    -        p2 = sp[0]; p3 = sp[sll]; p4 = sp[2*sll];
    -
             dp = dl;
             kh = n - off;
     
             if (kh == 4) {
    +
    +          k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +          p2 = sp[0]; p3 = sp[sll]; p4 = sp[2*sll];
    +
               sp += 3*sll;
     
               for (j = 0; j <= (hsize - 2); j += 2) {
    @@ -184,6 +185,10 @@ static mlib_status mlib_ImageConv1xN(mlib_image       *dst,
               }
     
             } else if (kh == 3) {
    +
    +          k0 = pk[0]; k1 = pk[1]; k2 = pk[2];
    +          p2 = sp[0]; p3 = sp[sll];
    +
               sp += 2*sll;
     
               for (j = 0; j <= (hsize - 2); j += 2) {
    @@ -211,6 +216,10 @@ static mlib_status mlib_ImageConv1xN(mlib_image       *dst,
               }
     
             } else if (kh == 2) {
    +
    +          k0 = pk[0]; k1 = pk[1];
    +          p2 = sp[0];
    +
               sp += sll;
     
               for (j = 0; j <= (hsize - 2); j += 2) {
    @@ -238,6 +247,9 @@ static mlib_status mlib_ImageConv1xN(mlib_image       *dst,
               }
     
             } else /* if (kh == 1) */ {
    +
    +          k0 = pk[0];
    +
               for (j = 0; j < hsize; j++) {
                 p0 = sp[0];
     
    @@ -322,17 +334,16 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
               if (kw > 2*MAX_KER) kw = MAX_KER; else
                 if (kw > MAX_KER) kw = kw/2;
     
    -          p2 = sp0[0]; p3 = sp0[chan1]; p4 = sp0[chan2];
    -          sp0 += chan3;
    -          p5 = sp0[0]; p6 = sp0[chan1]; p7 = sp0[chan2];
    -
    -          k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    -          k4 = pk[4]; k5 = pk[5]; k6 = pk[6];
    -
               dp = dl;
     
               if (kw == 7) {
    +
    +            p2 = sp0[0]; p3 = sp0[chan1]; p4 = sp0[chan2];
    +            sp0 += chan3;
    +            p5 = sp0[0]; p6 = sp0[chan1]; p7 = sp0[chan2];
                 sp = sp0 += chan3;
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +            k4 = pk[4]; k5 = pk[5]; k6 = pk[6];
     
                 if (pk == k) {
                   for (i = 0; i <= (wid - 2); i += 2) {
    @@ -362,7 +373,13 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
                 }
     
               } else if (kw == 6) {
    +
    +            p2 = sp0[0]; p3 = sp0[chan1]; p4 = sp0[chan2];
    +            sp0 += chan3;
    +            p5 = sp0[0]; p6 = sp0[chan1];
                 sp = sp0 += chan2;
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +            k4 = pk[4]; k5 = pk[5];
     
                 if (pk == k) {
                   for (i = 0; i <= (wid - 2); i += 2) {
    @@ -392,7 +409,13 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
                 }
     
               } else if (kw == 5) {
    +
    +            p2 = sp0[0]; p3 = sp0[chan1]; p4 = sp0[chan2];
    +            sp0 += chan3;
    +            p5 = sp0[0];
                 sp = sp0 += chan1;
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +            k4 = pk[4];
     
                 if (pk == k) {
                   for (i = 0; i <= (wid - 2); i += 2) {
    @@ -423,7 +446,10 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
     
               } else if (kw == 4) {
     
    +            p2 = sp0[0]; p3 = sp0[chan1]; p4 = sp0[chan2];
    +            sp0 += chan3;
                 sp = sp0;
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
     
                 if (pk == k) {
                   for (i = 0; i <= (wid - 2); i += 2) {
    @@ -453,7 +479,11 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
                 }
     
               } else if (kw == 3) {
    +
    +            p2 = sp0[0]; p3 = sp0[chan1];
    +            sp0 += chan3;
                 sp = sp0 -= chan1;
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2];
     
                 if (pk == k) {
                   for (i = 0; i <= (wid - 2); i += 2) {
    @@ -483,7 +513,11 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
                 }
     
               } else { /* kw == 2 */
    +
    +            p2 = sp0[0];
    +            sp0 += chan3;
                 sp = sp0 -= chan2;
    +            k0 = pk[0]; k1 = pk[1];
     
                 if (pk == k) {
                   for (i = 0; i <= (wid - 2); i += 2) {
    diff --git a/src/java.desktop/share/native/libmlib_image/mlib_ImageConv_F32nw.c b/src/java.desktop/share/native/libmlib_image/mlib_ImageConv_F32nw.c
    index e87607ddf12..e6c68de5054 100644
    --- a/src/java.desktop/share/native/libmlib_image/mlib_ImageConv_F32nw.c
    +++ b/src/java.desktop/share/native/libmlib_image/mlib_ImageConv_F32nw.c
    @@ -1,5 +1,5 @@
     /*
    - * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
    + * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
      * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      *
      * This code is free software; you can redistribute it and/or modify it
    @@ -150,13 +150,14 @@ static mlib_status mlib_ImageConv1xN(mlib_image       *dst,
             pk = k + off;
             sp = sl0;
     
    -        k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    -        p2 = sp[0]; p3 = sp[sll]; p4 = sp[2*sll];
    -
             dp = dl;
             kh = n - off;
     
             if (kh == 4) {
    +
    +          k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +          p2 = sp[0]; p3 = sp[sll]; p4 = sp[2*sll];
    +
               sp += 3*sll;
     
               for (j = 0; j <= (hsize - 2); j += 2) {
    @@ -184,6 +185,10 @@ static mlib_status mlib_ImageConv1xN(mlib_image       *dst,
               }
     
             } else if (kh == 3) {
    +
    +          k0 = pk[0]; k1 = pk[1]; k2 = pk[2];
    +          p2 = sp[0]; p3 = sp[sll];
    +
               sp += 2*sll;
     
               for (j = 0; j <= (hsize - 2); j += 2) {
    @@ -211,6 +216,10 @@ static mlib_status mlib_ImageConv1xN(mlib_image       *dst,
               }
     
             } else if (kh == 2) {
    +
    +          k0 = pk[0]; k1 = pk[1];
    +          p2 = sp[0];
    +
               sp += sll;
     
               for (j = 0; j <= (hsize - 2); j += 2) {
    @@ -238,6 +247,9 @@ static mlib_status mlib_ImageConv1xN(mlib_image       *dst,
               }
     
             } else /* if (kh == 1) */ {
    +
    +          k0 = pk[0];
    +
               for (j = 0; j < hsize; j++) {
                 p0 = sp[0];
     
    @@ -322,17 +334,15 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
               if (kw > 2*MAX_KER) kw = MAX_KER; else
                 if (kw > MAX_KER) kw = kw/2;
     
    -          p2 = sp0[0]; p3 = sp0[chan1]; p4 = sp0[chan2];
    -          sp0 += chan3;
    -          p5 = sp0[0]; p6 = sp0[chan1]; p7 = sp0[chan2];
    -
    -          k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    -          k4 = pk[4]; k5 = pk[5]; k6 = pk[6];
    -
               dp = dl;
     
               if (kw == 7) {
    +            p2 = sp0[0]; p3 = sp0[chan1]; p4 = sp0[chan2];
    +            sp0 += chan3;
    +            p5 = sp0[0]; p6 = sp0[chan1]; p7 = sp0[chan2];
                 sp = sp0 += chan3;
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +            k4 = pk[4]; k5 = pk[5]; k6 = pk[6];
     
                 if (pk == k) {
                   for (i = 0; i <= (wid - 2); i += 2) {
    @@ -362,7 +372,12 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
                 }
     
               } else if (kw == 6) {
    +            p2 = sp0[0]; p3 = sp0[chan1]; p4 = sp0[chan2];
    +            sp0 += chan3;
    +            p5 = sp0[0]; p6 = sp0[chan1];
                 sp = sp0 += chan2;
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +            k4 = pk[4]; k5 = pk[5];
     
                 if (pk == k) {
                   for (i = 0; i <= (wid - 2); i += 2) {
    @@ -392,7 +407,12 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
                 }
     
               } else if (kw == 5) {
    +            p2 = sp0[0]; p3 = sp0[chan1]; p4 = sp0[chan2];
    +            sp0 += chan3;
    +            p5 = sp0[0];
                 sp = sp0 += chan1;
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +            k4 = pk[4];
     
                 if (pk == k) {
                   for (i = 0; i <= (wid - 2); i += 2) {
    @@ -423,7 +443,10 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
     
               } else if (kw == 4) {
     
    +            p2 = sp0[0]; p3 = sp0[chan1]; p4 = sp0[chan2];
    +            sp0 += chan3;
                 sp = sp0;
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
     
                 if (pk == k) {
                   for (i = 0; i <= (wid - 2); i += 2) {
    @@ -453,7 +476,11 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
                 }
     
               } else if (kw == 3) {
    +
    +            p2 = sp0[0]; p3 = sp0[chan1];
    +            sp0 += chan3;
                 sp = sp0 -= chan1;
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2];
     
                 if (pk == k) {
                   for (i = 0; i <= (wid - 2); i += 2) {
    @@ -483,7 +510,11 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
                 }
     
               } else { /* kw == 2 */
    +
    +            p2 = sp0[0];
    +            sp0 += chan3;
                 sp = sp0 -= chan2;
    +            k0 = pk[0]; k1 = pk[1];
     
                 if (pk == k) {
                   for (i = 0; i <= (wid - 2); i += 2) {
    diff --git a/src/java.desktop/share/native/libmlib_image/mlib_ImageConv_u16nw.c b/src/java.desktop/share/native/libmlib_image/mlib_ImageConv_u16nw.c
    index 49412c7d7ef..438531e2c07 100644
    --- a/src/java.desktop/share/native/libmlib_image/mlib_ImageConv_u16nw.c
    +++ b/src/java.desktop/share/native/libmlib_image/mlib_ImageConv_u16nw.c
    @@ -1,5 +1,5 @@
     /*
    - * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
    + * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
      * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      *
      * This code is free software; you can redistribute it and/or modify it
    @@ -286,13 +286,14 @@ static mlib_status mlib_ImageConv1xN(mlib_image       *dst,
             pk = k + off;
             sp = sl0;
     
    -        k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    -        p2 = sp[0]; p3 = sp[sll]; p4 = sp[2*sll];
    -
             dp = dl;
             kh = n - off;
     
             if (kh == 4) {
    +
    +          k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +          p2 = sp[0]; p3 = sp[sll]; p4 = sp[2*sll];
    +
               sp += 3*sll;
     
               for (j = 0; j <= (hsize - 2); j += 2) {
    @@ -325,6 +326,10 @@ static mlib_status mlib_ImageConv1xN(mlib_image       *dst,
               }
     
             } else if (kh == 3) {
    +
    +          k0 = pk[0]; k1 = pk[1]; k2 = pk[2];
    +          p2 = sp[0]; p3 = sp[sll];
    +
               sp += 2*sll;
     
               for (j = 0; j <= (hsize - 2); j += 2) {
    @@ -357,6 +362,10 @@ static mlib_status mlib_ImageConv1xN(mlib_image       *dst,
               }
     
             } else if (kh == 2) {
    +
    +          k0 = pk[0]; k1 = pk[1];
    +          p2 = sp[0];
    +
               sp += sll;
     
               for (j = 0; j <= (hsize - 2); j += 2) {
    @@ -389,6 +398,9 @@ static mlib_status mlib_ImageConv1xN(mlib_image       *dst,
               }
     
             } else /* if (kh == 1) */ {
    +
    +          k0 = pk[0];
    +
               for (j = 0; j < hsize; j++) {
                 p0 = sp[0];
     
    @@ -536,15 +548,14 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
               sp = sl;
               dp = dl;
     
    -          p2 = buff[0]; p3 = buff[1]; p4 = buff[2];
    -          p5 = buff[3]; p6 = buff[4]; p7 = buff[5];
    -
    -          k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    -          k4 = pk[4]; k5 = pk[5]; k6 = pk[6];
    -          pk += kw;
    -
               if (kw == 7) {
     
    +            p2 = buff[0]; p3 = buff[1]; p4 = buff[2];
    +            p5 = buff[3]; p6 = buff[4]; p7 = buff[5];
    +
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +            k4 = pk[4]; k5 = pk[5]; k6 = pk[6];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2; p1 = p3; p2 = p4; p3 = p5; p4 = p6; p5 = p7;
    @@ -583,6 +594,12 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
     
               } else if (kw == 6) {
     
    +            p2 = buff[0]; p3 = buff[1]; p4 = buff[2];
    +            p5 = buff[3]; p6 = buff[4];
    +
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +            k4 = pk[4]; k5 = pk[5];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2; p1 = p3; p2 = p4; p3 = p5; p4 = p6;
    @@ -618,6 +635,12 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
     
               } else if (kw == 5) {
     
    +            p2 = buff[0]; p3 = buff[1]; p4 = buff[2];
    +            p5 = buff[3];
    +
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +            k4 = pk[4];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2; p1 = p3; p2 = p4; p3 = p5;
    @@ -653,6 +676,10 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
     
               } else if (kw == 4) {
     
    +            p2 = buff[0]; p3 = buff[1]; p4 = buff[2];
    +
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2; p1 = p3; p2 = p4;
    @@ -688,6 +715,10 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
     
               } else if (kw == 3) {
     
    +            p2 = buff[0]; p3 = buff[1];
    +
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2; p1 = p3;
    @@ -723,6 +754,10 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
     
               } else /*if (kw == 2)*/ {
     
    +            p2 = buff[0];
    +
    +            k0 = pk[0]; k1 = pk[1];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2;
    @@ -756,6 +791,8 @@ mlib_status CONV_FUNC(MxN)(mlib_image       *dst,
                   }
                 }
               }
    +
    +          pk += kw;
             }
           }
     
    @@ -882,17 +919,16 @@ mlib_status CONV_FUNC_I(MxN)(mlib_image       *dst,
                 if (kw > MAX_KER) kw = kw/2;
               off += kw;
     
    -          p2 = sp[0]; p3 = sp[chan1]; p4 = sp[chan2];
    -          p5 = sp[chan2 + chan1]; p6 = sp[chan2 + chan2]; p7 = sp[5*chan1];
    -
    -          k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    -          k4 = pk[4]; k5 = pk[5]; k6 = pk[6];
    -          pk += kw;
    -
    -          sp += (kw - 1)*chan1;
    -
               if (kw == 7) {
     
    +            p2 = sp[0]; p3 = sp[chan1]; p4 = sp[chan2];
    +            p5 = sp[chan2 + chan1]; p6 = sp[chan2 + chan2]; p7 = sp[5*chan1];
    +
    +            sp += (kw - 1)*chan1;
    +
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +            k4 = pk[4]; k5 = pk[5]; k6 = pk[6];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2; p1 = p3; p2 = p4; p3 = p5; p4 = p6; p5 = p7;
    @@ -927,6 +963,14 @@ mlib_status CONV_FUNC_I(MxN)(mlib_image       *dst,
     
               } else if (kw == 6) {
     
    +            p2 = sp[0]; p3 = sp[chan1]; p4 = sp[chan2];
    +            p5 = sp[chan2 + chan1]; p6 = sp[chan2 + chan2];
    +
    +            sp += (kw - 1)*chan1;
    +
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +            k4 = pk[4]; k5 = pk[5];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2; p1 = p3; p2 = p4; p3 = p5; p4 = p6;
    @@ -961,6 +1005,14 @@ mlib_status CONV_FUNC_I(MxN)(mlib_image       *dst,
     
               } else if (kw == 5) {
     
    +            p2 = sp[0]; p3 = sp[chan1]; p4 = sp[chan2];
    +            p5 = sp[chan2 + chan1];
    +
    +            sp += (kw - 1)*chan1;
    +
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +            k4 = pk[4];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2; p1 = p3; p2 = p4; p3 = p5;
    @@ -995,6 +1047,12 @@ mlib_status CONV_FUNC_I(MxN)(mlib_image       *dst,
     
               } else if (kw == 4) {
     
    +            p2 = sp[0]; p3 = sp[chan1]; p4 = sp[chan2];
    +
    +            sp += (kw - 1)*chan1;
    +
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2]; k3 = pk[3];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2; p1 = p3; p2 = p4;
    @@ -1029,6 +1087,12 @@ mlib_status CONV_FUNC_I(MxN)(mlib_image       *dst,
     
               } else if (kw == 3) {
     
    +            p2 = sp[0]; p3 = sp[chan1];
    +
    +            sp += (kw - 1)*chan1;
    +
    +            k0 = pk[0]; k1 = pk[1]; k2 = pk[2];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2; p1 = p3;
    @@ -1063,6 +1127,12 @@ mlib_status CONV_FUNC_I(MxN)(mlib_image       *dst,
     
               } else if (kw == 2) {
     
    +            p2 = sp[0];
    +
    +            sp += (kw - 1)*chan1;
    +
    +            k0 = pk[0]; k1 = pk[1];
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = p2;
    @@ -1097,6 +1167,10 @@ mlib_status CONV_FUNC_I(MxN)(mlib_image       *dst,
     
               } else /*if (kw == 1)*/ {
     
    +            k0 = pk[0];
    +
    +            sp += (kw - 1)*chan1;
    +
                 if (l < (n - 1) || off < m) {
                   for (i = 0; i <= (wid - 2); i += 2) {
                     p0 = sp[0];
    @@ -1127,6 +1201,8 @@ mlib_status CONV_FUNC_I(MxN)(mlib_image       *dst,
                   }
                 }
               }
    +
    +          pk += kw;
             }
           }
     
    diff --git a/src/java.desktop/share/native/libmlib_image/mlib_c_ImageConvVersion.c b/src/java.desktop/share/native/libmlib_image/mlib_c_ImageConvVersion.c
    index 5f8768fb180..b7e685609c9 100644
    --- a/src/java.desktop/share/native/libmlib_image/mlib_c_ImageConvVersion.c
    +++ b/src/java.desktop/share/native/libmlib_image/mlib_c_ImageConvVersion.c
    @@ -1,5 +1,5 @@
     /*
    - * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
    + * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
      * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      *
      * This code is free software; you can redistribute it and/or modify it
    @@ -51,7 +51,7 @@ mlib_s32 mlib_ImageConvVersion(mlib_s32 m,
                                    mlib_s32 scale,
                                    mlib_type type)
     {
    -  mlib_d64 dscale = 1.0 / (1 << scale); /* 16 < scale <= 31 */
    +  mlib_d64 dscale = 1.0 / (((mlib_s64)1) << scale); /* 16 < scale <= 31 */
     
       if (type == MLIB_BYTE) {
         if ((m * n * dscale * 32768.0) > MAX_U8)
    diff --git a/src/java.desktop/share/native/libsplashscreen/giflib/COPYING b/src/java.desktop/share/native/libsplashscreen/giflib/COPYING
    index b9c0b501260..92774a3b036 100644
    --- a/src/java.desktop/share/native/libsplashscreen/giflib/COPYING
    +++ b/src/java.desktop/share/native/libsplashscreen/giflib/COPYING
    @@ -1,4 +1,4 @@
    -The GIFLIB distribution is Copyright (c) 1997  Eric S. Raymond
    += MIT LICENSE
     
     Permission is hereby granted, free of charge, to any person obtaining a copy
     of this software and associated documentation files (the "Software"), to deal
    diff --git a/src/java.desktop/share/native/libsplashscreen/giflib/dgif_lib.c b/src/java.desktop/share/native/libsplashscreen/giflib/dgif_lib.c
    index 0b2860b4b50..2fa005ed20d 100644
    --- a/src/java.desktop/share/native/libsplashscreen/giflib/dgif_lib.c
    +++ b/src/java.desktop/share/native/libsplashscreen/giflib/dgif_lib.c
    @@ -30,9 +30,9 @@ The functions here and in egif_lib.c are partitioned carefully so that
     if you only require one of read and write capability, only one of these
     two modules will be linked.  Preserve this property!
     
    -SPDX-License-Identifier: MIT
    -
     *****************************************************************************/
    +// SPDX-License-Identifier: MIT
    +// SPDX-FileCopyrightText: Copyright (C) Eric S. Raymond 
     
     #include 
     #include 
    @@ -55,11 +55,11 @@ SPDX-License-Identifier: MIT
     
     /* avoid extra function call in case we use fread (TVT) */
     static int InternalRead(GifFileType *gif, GifByteType *buf, int len) {
    -    // fprintf(stderr, "### Read: %d\n", len);
    -    return (((GifFilePrivateType *)gif->Private)->Read
    -                ? ((GifFilePrivateType *)gif->Private)->Read(gif, buf, len)
    -                : fread(buf, 1, len,
    -                        ((GifFilePrivateType *)gif->Private)->File));
    +        // fprintf(stderr, "### Read: %d\n", len);
    +        return (((GifFilePrivateType *)gif->Private)->Read
    +                    ? ((GifFilePrivateType *)gif->Private)->Read(gif, buf, len)
    +                    : fread(buf, 1, len,
    +                            ((GifFilePrivateType *)gif->Private)->File));
     }
     
     static int DGifGetWord(GifFileType *GifFile, GifWord *Word);
    @@ -78,18 +78,18 @@ static int DGifBufferedInput(GifFileType *GifFile, GifByteType *Buf,
      info record.
     ******************************************************************************/
     GifFileType *DGifOpenFileName(const char *FileName, int *Error) {
    -    int FileHandle;
    -    GifFileType *GifFile;
    +        int FileHandle;
    +        GifFileType *GifFile;
     
    -    if ((FileHandle = open(FileName, O_RDONLY)) == -1) {
    -        if (Error != NULL) {
    -            *Error = D_GIF_ERR_OPEN_FAILED;
    +        if ((FileHandle = open(FileName, O_RDONLY)) == -1) {
    +                if (Error != NULL) {
    +                        *Error = D_GIF_ERR_OPEN_FAILED;
    +                }
    +                return NULL;
             }
    -        return NULL;
    -    }
     
    -    GifFile = DGifOpenFileHandle(FileHandle, Error);
    -    return GifFile;
    +        GifFile = DGifOpenFileHandle(FileHandle, Error);
    +        return GifFile;
     }
     
     /******************************************************************************
    @@ -98,171 +98,171 @@ GifFileType *DGifOpenFileName(const char *FileName, int *Error) {
      info record.
     ******************************************************************************/
     GifFileType *DGifOpenFileHandle(int FileHandle, int *Error) {
    -    char Buf[GIF_STAMP_LEN + 1];
    -    GifFileType *GifFile;
    -    GifFilePrivateType *Private;
    -    FILE *f;
    +        char Buf[GIF_STAMP_LEN + 1];
    +        GifFileType *GifFile;
    +        GifFilePrivateType *Private;
    +        FILE *f;
     
    -    GifFile = (GifFileType *)malloc(sizeof(GifFileType));
    -    if (GifFile == NULL) {
    -        if (Error != NULL) {
    -            *Error = D_GIF_ERR_NOT_ENOUGH_MEM;
    +        GifFile = (GifFileType *)malloc(sizeof(GifFileType));
    +        if (GifFile == NULL) {
    +                if (Error != NULL) {
    +                        *Error = D_GIF_ERR_NOT_ENOUGH_MEM;
    +                }
    +                (void)close(FileHandle);
    +                return NULL;
             }
    -        (void)close(FileHandle);
    -        return NULL;
    -    }
     
    -    /*@i1@*/ memset(GifFile, '\0', sizeof(GifFileType));
    +        /*@i1@*/ memset(GifFile, '\0', sizeof(GifFileType));
     
    -    /* Belt and suspenders, in case the null pointer isn't zero */
    -    GifFile->SavedImages = NULL;
    -    GifFile->SColorMap = NULL;
    +        /* Belt and suspenders, in case the null pointer isn't zero */
    +        GifFile->SavedImages = NULL;
    +        GifFile->SColorMap = NULL;
     
    -    Private = (GifFilePrivateType *)calloc(1, sizeof(GifFilePrivateType));
    -    if (Private == NULL) {
    -        if (Error != NULL) {
    -            *Error = D_GIF_ERR_NOT_ENOUGH_MEM;
    +        Private = (GifFilePrivateType *)calloc(1, sizeof(GifFilePrivateType));
    +        if (Private == NULL) {
    +                if (Error != NULL) {
    +                        *Error = D_GIF_ERR_NOT_ENOUGH_MEM;
    +                }
    +                (void)close(FileHandle);
    +                free((char *)GifFile);
    +                return NULL;
             }
    -        (void)close(FileHandle);
    -        free((char *)GifFile);
    -        return NULL;
    -    }
     
    -    /*@i1@*/ memset(Private, '\0', sizeof(GifFilePrivateType));
    +        /*@i1@*/ memset(Private, '\0', sizeof(GifFilePrivateType));
     
     #ifdef _WIN32
    -    _setmode(FileHandle, O_BINARY); /* Make sure it is in binary mode. */
    +        _setmode(FileHandle, O_BINARY); /* Make sure it is in binary mode. */
     #endif                                  /* _WIN32 */
     
    -    f = fdopen(FileHandle, "rb"); /* Make it into a stream: */
    +        f = fdopen(FileHandle, "rb"); /* Make it into a stream: */
     
    -    /*@-mustfreeonly@*/
    -    GifFile->Private = (void *)Private;
    -    Private->FileHandle = FileHandle;
    -    Private->File = f;
    -    Private->FileState = FILE_STATE_READ;
    -    Private->Read = NULL;     /* don't use alternate input method (TVT) */
    -    GifFile->UserData = NULL; /* TVT */
    -    /*@=mustfreeonly@*/
    +        /*@-mustfreeonly@*/
    +        GifFile->Private = (void *)Private;
    +        Private->FileHandle = FileHandle;
    +        Private->File = f;
    +        Private->FileState = FILE_STATE_READ;
    +        Private->Read = NULL;     /* don't use alternate input method (TVT) */
    +        GifFile->UserData = NULL; /* TVT */
    +        /*@=mustfreeonly@*/
     
    -    /* Let's see if this is a GIF file: */
    -    /* coverity[check_return] */
    -    if (InternalRead(GifFile, (unsigned char *)Buf, GIF_STAMP_LEN) !=
    -        GIF_STAMP_LEN) {
    -        if (Error != NULL) {
    -            *Error = D_GIF_ERR_READ_FAILED;
    +        /* Let's see if this is a GIF file: */
    +        /* coverity[check_return] */
    +        if (InternalRead(GifFile, (unsigned char *)Buf, GIF_STAMP_LEN) !=
    +            GIF_STAMP_LEN) {
    +                if (Error != NULL) {
    +                        *Error = D_GIF_ERR_READ_FAILED;
    +                }
    +                (void)fclose(f);
    +                free((char *)Private);
    +                free((char *)GifFile);
    +                return NULL;
             }
    -        (void)fclose(f);
    -        free((char *)Private);
    -        free((char *)GifFile);
    -        return NULL;
    -    }
     
    -    /* Check for GIF prefix at start of file */
    -    Buf[GIF_STAMP_LEN] = 0;
    -    if (strncmp(GIF_STAMP, Buf, GIF_VERSION_POS) != 0) {
    -        if (Error != NULL) {
    -            *Error = D_GIF_ERR_NOT_GIF_FILE;
    +        /* Check for GIF prefix at start of file */
    +        Buf[GIF_STAMP_LEN] = 0;
    +        if (strncmp(GIF_STAMP, Buf, GIF_VERSION_POS) != 0) {
    +                if (Error != NULL) {
    +                        *Error = D_GIF_ERR_NOT_GIF_FILE;
    +                }
    +                (void)fclose(f);
    +                free((char *)Private);
    +                free((char *)GifFile);
    +                return NULL;
             }
    -        (void)fclose(f);
    -        free((char *)Private);
    -        free((char *)GifFile);
    -        return NULL;
    -    }
     
    -    if (DGifGetScreenDesc(GifFile) == GIF_ERROR) {
    -        (void)fclose(f);
    -        free((char *)Private);
    -        free((char *)GifFile);
    -        return NULL;
    -    }
    +        if (DGifGetScreenDesc(GifFile) == GIF_ERROR) {
    +                (void)fclose(f);
    +                free((char *)Private);
    +                free((char *)GifFile);
    +                return NULL;
    +        }
     
    -    GifFile->Error = 0;
    +        GifFile->Error = 0;
     
    -    /* What version of GIF? */
    -    Private->gif89 = (Buf[GIF_VERSION_POS + 1] == '9');
    +        /* What version of GIF? */
    +        Private->gif89 = (Buf[GIF_VERSION_POS + 1] == '9');
     
    -    return GifFile;
    +        return GifFile;
     }
     
     /******************************************************************************
      GifFileType constructor with user supplied input function (TVT)
     ******************************************************************************/
     GifFileType *DGifOpen(void *userData, InputFunc readFunc, int *Error) {
    -    char Buf[GIF_STAMP_LEN + 1];
    -    GifFileType *GifFile;
    -    GifFilePrivateType *Private;
    +        char Buf[GIF_STAMP_LEN + 1];
    +        GifFileType *GifFile;
    +        GifFilePrivateType *Private;
     
    -    GifFile = (GifFileType *)malloc(sizeof(GifFileType));
    -    if (GifFile == NULL) {
    -        if (Error != NULL) {
    -            *Error = D_GIF_ERR_NOT_ENOUGH_MEM;
    +        GifFile = (GifFileType *)malloc(sizeof(GifFileType));
    +        if (GifFile == NULL) {
    +                if (Error != NULL) {
    +                        *Error = D_GIF_ERR_NOT_ENOUGH_MEM;
    +                }
    +                return NULL;
             }
    -        return NULL;
    -    }
     
    -    memset(GifFile, '\0', sizeof(GifFileType));
    +        memset(GifFile, '\0', sizeof(GifFileType));
     
    -    /* Belt and suspenders, in case the null pointer isn't zero */
    -    GifFile->SavedImages = NULL;
    -    GifFile->SColorMap = NULL;
    +        /* Belt and suspenders, in case the null pointer isn't zero */
    +        GifFile->SavedImages = NULL;
    +        GifFile->SColorMap = NULL;
     
    -    Private = (GifFilePrivateType *)calloc(1, sizeof(GifFilePrivateType));
    -    if (!Private) {
    -        if (Error != NULL) {
    -            *Error = D_GIF_ERR_NOT_ENOUGH_MEM;
    +        Private = (GifFilePrivateType *)calloc(1, sizeof(GifFilePrivateType));
    +        if (!Private) {
    +                if (Error != NULL) {
    +                        *Error = D_GIF_ERR_NOT_ENOUGH_MEM;
    +                }
    +                free((char *)GifFile);
    +                return NULL;
             }
    -        free((char *)GifFile);
    -        return NULL;
    -    }
    -    /*@i1@*/ memset(Private, '\0', sizeof(GifFilePrivateType));
    +        /*@i1@*/ memset(Private, '\0', sizeof(GifFilePrivateType));
     
    -    GifFile->Private = (void *)Private;
    -    Private->FileHandle = 0;
    -    Private->File = NULL;
    -    Private->FileState = FILE_STATE_READ;
    +        GifFile->Private = (void *)Private;
    +        Private->FileHandle = 0;
    +        Private->File = NULL;
    +        Private->FileState = FILE_STATE_READ;
     
    -    Private->Read = readFunc;     /* TVT */
    -    GifFile->UserData = userData; /* TVT */
    +        Private->Read = readFunc;     /* TVT */
    +        GifFile->UserData = userData; /* TVT */
     
    -    /* Lets see if this is a GIF file: */
    -    /* coverity[check_return] */
    -    if (InternalRead(GifFile, (unsigned char *)Buf, GIF_STAMP_LEN) !=
    -        GIF_STAMP_LEN) {
    -        if (Error != NULL) {
    -            *Error = D_GIF_ERR_READ_FAILED;
    +        /* Lets see if this is a GIF file: */
    +        /* coverity[check_return] */
    +        if (InternalRead(GifFile, (unsigned char *)Buf, GIF_STAMP_LEN) !=
    +            GIF_STAMP_LEN) {
    +                if (Error != NULL) {
    +                        *Error = D_GIF_ERR_READ_FAILED;
    +                }
    +                free((char *)Private);
    +                free((char *)GifFile);
    +                return NULL;
             }
    -        free((char *)Private);
    -        free((char *)GifFile);
    -        return NULL;
    -    }
     
    -    /* Check for GIF prefix at start of file */
    -    Buf[GIF_STAMP_LEN] = '\0';
    -    if (strncmp(GIF_STAMP, Buf, GIF_VERSION_POS) != 0) {
    -        if (Error != NULL) {
    -            *Error = D_GIF_ERR_NOT_GIF_FILE;
    +        /* Check for GIF prefix at start of file */
    +        Buf[GIF_STAMP_LEN] = '\0';
    +        if (strncmp(GIF_STAMP, Buf, GIF_VERSION_POS) != 0) {
    +                if (Error != NULL) {
    +                        *Error = D_GIF_ERR_NOT_GIF_FILE;
    +                }
    +                free((char *)Private);
    +                free((char *)GifFile);
    +                return NULL;
             }
    -        free((char *)Private);
    -        free((char *)GifFile);
    -        return NULL;
    -    }
     
    -    if (DGifGetScreenDesc(GifFile) == GIF_ERROR) {
    -        free((char *)Private);
    -        free((char *)GifFile);
    -        if (Error != NULL) {
    -            *Error = D_GIF_ERR_NO_SCRN_DSCR;
    +        if (DGifGetScreenDesc(GifFile) == GIF_ERROR) {
    +                free((char *)Private);
    +                free((char *)GifFile);
    +                if (Error != NULL) {
    +                        *Error = D_GIF_ERR_NO_SCRN_DSCR;
    +                }
    +                return NULL;
             }
    -        return NULL;
    -    }
     
    -    GifFile->Error = 0;
    +        GifFile->Error = 0;
     
    -    /* What version of GIF? */
    -    Private->gif89 = (Buf[GIF_VERSION_POS + 1] == '9');
    +        /* What version of GIF? */
    +        Private->gif89 = (Buf[GIF_VERSION_POS + 1] == '9');
     
    -    return GifFile;
    +        return GifFile;
     }
     
     /******************************************************************************
    @@ -270,180 +270,180 @@ GifFileType *DGifOpen(void *userData, InputFunc readFunc, int *Error) {
      this routine is called automatically from DGif file open routines.
     ******************************************************************************/
     int DGifGetScreenDesc(GifFileType *GifFile) {
    -    int BitsPerPixel;
    -    bool SortFlag;
    -    GifByteType Buf[3];
    -    GifFilePrivateType *Private = (GifFilePrivateType *)GifFile->Private;
    +        int BitsPerPixel;
    +        bool SortFlag;
    +        GifByteType Buf[3];
    +        GifFilePrivateType *Private = (GifFilePrivateType *)GifFile->Private;
     
    -    if (!IS_READABLE(Private)) {
    -        /* This file was NOT open for reading: */
    -        GifFile->Error = D_GIF_ERR_NOT_READABLE;
    -        return GIF_ERROR;
    -    }
    -
    -    /* Put the screen descriptor into the file: */
    -    if (DGifGetWord(GifFile, &GifFile->SWidth) == GIF_ERROR ||
    -        DGifGetWord(GifFile, &GifFile->SHeight) == GIF_ERROR) {
    -        return GIF_ERROR;
    -    }
    -
    -    if (InternalRead(GifFile, Buf, 3) != 3) {
    -        GifFile->Error = D_GIF_ERR_READ_FAILED;
    -        GifFreeMapObject(GifFile->SColorMap);
    -        GifFile->SColorMap = NULL;
    -        return GIF_ERROR;
    -    }
    -    GifFile->SColorResolution = (((Buf[0] & 0x70) + 1) >> 4) + 1;
    -    SortFlag = (Buf[0] & 0x08) != 0;
    -    BitsPerPixel = (Buf[0] & 0x07) + 1;
    -    GifFile->SBackGroundColor = Buf[1];
    -    GifFile->AspectByte = Buf[2];
    -    if (Buf[0] & 0x80) { /* Do we have global color map? */
    -        int i;
    -
    -        GifFile->SColorMap = GifMakeMapObject(1 << BitsPerPixel, NULL);
    -        if (GifFile->SColorMap == NULL) {
    -            GifFile->Error = D_GIF_ERR_NOT_ENOUGH_MEM;
    -            return GIF_ERROR;
    +        if (!IS_READABLE(Private)) {
    +                /* This file was NOT open for reading: */
    +                GifFile->Error = D_GIF_ERR_NOT_READABLE;
    +                return GIF_ERROR;
             }
     
    -        /* Get the global color map: */
    -        GifFile->SColorMap->SortFlag = SortFlag;
    -        for (i = 0; i < GifFile->SColorMap->ColorCount; i++) {
    -            /* coverity[check_return] */
    -            if (InternalRead(GifFile, Buf, 3) != 3) {
    +        /* Put the screen descriptor into the file: */
    +        if (DGifGetWord(GifFile, &GifFile->SWidth) == GIF_ERROR ||
    +            DGifGetWord(GifFile, &GifFile->SHeight) == GIF_ERROR) {
    +                return GIF_ERROR;
    +        }
    +
    +        if (InternalRead(GifFile, Buf, 3) != 3) {
    +                GifFile->Error = D_GIF_ERR_READ_FAILED;
                     GifFreeMapObject(GifFile->SColorMap);
                     GifFile->SColorMap = NULL;
    -                GifFile->Error = D_GIF_ERR_READ_FAILED;
                     return GIF_ERROR;
    -            }
    -            GifFile->SColorMap->Colors[i].Red = Buf[0];
    -            GifFile->SColorMap->Colors[i].Green = Buf[1];
    -            GifFile->SColorMap->Colors[i].Blue = Buf[2];
             }
    -    } else {
    -        GifFile->SColorMap = NULL;
    -    }
    +        GifFile->SColorResolution = (((Buf[0] & 0x70) + 1) >> 4) + 1;
    +        SortFlag = (Buf[0] & 0x08) != 0;
    +        BitsPerPixel = (Buf[0] & 0x07) + 1;
    +        GifFile->SBackGroundColor = Buf[1];
    +        GifFile->AspectByte = Buf[2];
    +        if (Buf[0] & 0x80) { /* Do we have global color map? */
    +                int i;
     
    -    /*
    -     * No check here for whether the background color is in range for the
    -     * screen color map.  Possibly there should be.
    -     */
    +                GifFile->SColorMap = GifMakeMapObject(1 << BitsPerPixel, NULL);
    +                if (GifFile->SColorMap == NULL) {
    +                        GifFile->Error = D_GIF_ERR_NOT_ENOUGH_MEM;
    +                        return GIF_ERROR;
    +                }
     
    -    return GIF_OK;
    +                /* Get the global color map: */
    +                GifFile->SColorMap->SortFlag = SortFlag;
    +                for (i = 0; i < GifFile->SColorMap->ColorCount; i++) {
    +                        /* coverity[check_return] */
    +                        if (InternalRead(GifFile, Buf, 3) != 3) {
    +                                GifFreeMapObject(GifFile->SColorMap);
    +                                GifFile->SColorMap = NULL;
    +                                GifFile->Error = D_GIF_ERR_READ_FAILED;
    +                                return GIF_ERROR;
    +                        }
    +                        GifFile->SColorMap->Colors[i].Red = Buf[0];
    +                        GifFile->SColorMap->Colors[i].Green = Buf[1];
    +                        GifFile->SColorMap->Colors[i].Blue = Buf[2];
    +                }
    +        } else {
    +                GifFile->SColorMap = NULL;
    +        }
    +
    +        /*
    +         * No check here for whether the background color is in range for the
    +         * screen color map.  Possibly there should be.
    +         */
    +
    +        return GIF_OK;
     }
     
     const char *DGifGetGifVersion(GifFileType *GifFile) {
    -    GifFilePrivateType *Private = (GifFilePrivateType *)GifFile->Private;
    +        GifFilePrivateType *Private = (GifFilePrivateType *)GifFile->Private;
     
    -    if (Private->gif89) {
    -        return GIF89_STAMP;
    -    } else {
    -        return GIF87_STAMP;
    -    }
    +        if (Private->gif89) {
    +                return GIF89_STAMP;
    +        } else {
    +                return GIF87_STAMP;
    +        }
     }
     
     /******************************************************************************
      This routine should be called before any attempt to read an image.
     ******************************************************************************/
     int DGifGetRecordType(GifFileType *GifFile, GifRecordType *Type) {
    -    GifByteType Buf;
    -    GifFilePrivateType *Private = (GifFilePrivateType *)GifFile->Private;
    +        GifByteType Buf;
    +        GifFilePrivateType *Private = (GifFilePrivateType *)GifFile->Private;
     
    -    if (!IS_READABLE(Private)) {
    -        /* This file was NOT open for reading: */
    -        GifFile->Error = D_GIF_ERR_NOT_READABLE;
    -        return GIF_ERROR;
    -    }
    +        if (!IS_READABLE(Private)) {
    +                /* This file was NOT open for reading: */
    +                GifFile->Error = D_GIF_ERR_NOT_READABLE;
    +                return GIF_ERROR;
    +        }
     
    -    /* coverity[check_return] */
    -    if (InternalRead(GifFile, &Buf, 1) != 1) {
    -        GifFile->Error = D_GIF_ERR_READ_FAILED;
    -        return GIF_ERROR;
    -    }
    +        /* coverity[check_return] */
    +        if (InternalRead(GifFile, &Buf, 1) != 1) {
    +                GifFile->Error = D_GIF_ERR_READ_FAILED;
    +                return GIF_ERROR;
    +        }
     
    -    // fprintf(stderr, "### DGifGetRecordType: %02x\n", Buf);
    -    switch (Buf) {
    -    case DESCRIPTOR_INTRODUCER:
    -        *Type = IMAGE_DESC_RECORD_TYPE;
    -        break;
    -    case EXTENSION_INTRODUCER:
    -        *Type = EXTENSION_RECORD_TYPE;
    -        break;
    -    case TERMINATOR_INTRODUCER:
    -        *Type = TERMINATE_RECORD_TYPE;
    -        break;
    -    default:
    -        *Type = UNDEFINED_RECORD_TYPE;
    -        GifFile->Error = D_GIF_ERR_WRONG_RECORD;
    -        return GIF_ERROR;
    -    }
    +        // fprintf(stderr, "### DGifGetRecordType: %02x\n", Buf);
    +        switch (Buf) {
    +        case DESCRIPTOR_INTRODUCER:
    +                *Type = IMAGE_DESC_RECORD_TYPE;
    +                break;
    +        case EXTENSION_INTRODUCER:
    +                *Type = EXTENSION_RECORD_TYPE;
    +                break;
    +        case TERMINATOR_INTRODUCER:
    +                *Type = TERMINATE_RECORD_TYPE;
    +                break;
    +        default:
    +                *Type = UNDEFINED_RECORD_TYPE;
    +                GifFile->Error = D_GIF_ERR_WRONG_RECORD;
    +                return GIF_ERROR;
    +        }
     
    -    return GIF_OK;
    +        return GIF_OK;
     }
     
     int DGifGetImageHeader(GifFileType *GifFile) {
    -    unsigned int BitsPerPixel;
    -    GifByteType Buf[3];
    -    GifFilePrivateType *Private = (GifFilePrivateType *)GifFile->Private;
    +        unsigned int BitsPerPixel;
    +        GifByteType Buf[3];
    +        GifFilePrivateType *Private = (GifFilePrivateType *)GifFile->Private;
     
    -    if (!IS_READABLE(Private)) {
    -        /* This file was NOT open for reading: */
    -        GifFile->Error = D_GIF_ERR_NOT_READABLE;
    -        return GIF_ERROR;
    -    }
    -
    -    if (DGifGetWord(GifFile, &GifFile->Image.Left) == GIF_ERROR ||
    -        DGifGetWord(GifFile, &GifFile->Image.Top) == GIF_ERROR ||
    -        DGifGetWord(GifFile, &GifFile->Image.Width) == GIF_ERROR ||
    -        DGifGetWord(GifFile, &GifFile->Image.Height) == GIF_ERROR) {
    -        return GIF_ERROR;
    -    }
    -    if (InternalRead(GifFile, Buf, 1) != 1) {
    -        GifFile->Error = D_GIF_ERR_READ_FAILED;
    -        GifFreeMapObject(GifFile->Image.ColorMap);
    -        GifFile->Image.ColorMap = NULL;
    -        return GIF_ERROR;
    -    }
    -    BitsPerPixel = (Buf[0] & 0x07) + 1;
    -    GifFile->Image.Interlace = (Buf[0] & 0x40) ? true : false;
    -
    -    /* Setup the colormap */
    -    if (GifFile->Image.ColorMap) {
    -        GifFreeMapObject(GifFile->Image.ColorMap);
    -        GifFile->Image.ColorMap = NULL;
    -    }
    -    /* Does this image have local color map? */
    -    if (Buf[0] & 0x80) {
    -        unsigned int i;
    -
    -        GifFile->Image.ColorMap =
    -            GifMakeMapObject(1 << BitsPerPixel, NULL);
    -        if (GifFile->Image.ColorMap == NULL) {
    -            GifFile->Error = D_GIF_ERR_NOT_ENOUGH_MEM;
    -            return GIF_ERROR;
    +        if (!IS_READABLE(Private)) {
    +                /* This file was NOT open for reading: */
    +                GifFile->Error = D_GIF_ERR_NOT_READABLE;
    +                return GIF_ERROR;
             }
     
    -        /* Get the image local color map: */
    -        for (i = 0; i < GifFile->Image.ColorMap->ColorCount; i++) {
    -            /* coverity[check_return] */
    -            if (InternalRead(GifFile, Buf, 3) != 3) {
    -                GifFreeMapObject(GifFile->Image.ColorMap);
    +        if (DGifGetWord(GifFile, &GifFile->Image.Left) == GIF_ERROR ||
    +            DGifGetWord(GifFile, &GifFile->Image.Top) == GIF_ERROR ||
    +            DGifGetWord(GifFile, &GifFile->Image.Width) == GIF_ERROR ||
    +            DGifGetWord(GifFile, &GifFile->Image.Height) == GIF_ERROR) {
    +                return GIF_ERROR;
    +        }
    +        if (InternalRead(GifFile, Buf, 1) != 1) {
                     GifFile->Error = D_GIF_ERR_READ_FAILED;
    +                GifFreeMapObject(GifFile->Image.ColorMap);
                     GifFile->Image.ColorMap = NULL;
                     return GIF_ERROR;
    -            }
    -            GifFile->Image.ColorMap->Colors[i].Red = Buf[0];
    -            GifFile->Image.ColorMap->Colors[i].Green = Buf[1];
    -            GifFile->Image.ColorMap->Colors[i].Blue = Buf[2];
             }
    -    }
    +        BitsPerPixel = (Buf[0] & 0x07) + 1;
    +        GifFile->Image.Interlace = (Buf[0] & 0x40) ? true : false;
     
    -    Private->PixelCount =
    -        (long)GifFile->Image.Width * (long)GifFile->Image.Height;
    +        /* Setup the colormap */
    +        if (GifFile->Image.ColorMap) {
    +                GifFreeMapObject(GifFile->Image.ColorMap);
    +                GifFile->Image.ColorMap = NULL;
    +        }
    +        /* Does this image have local color map? */
    +        if (Buf[0] & 0x80) {
    +                unsigned int i;
     
    -    /* Reset decompress algorithm parameters. */
    -    return DGifSetupDecompress(GifFile);
    +                GifFile->Image.ColorMap =
    +                    GifMakeMapObject(1 << BitsPerPixel, NULL);
    +                if (GifFile->Image.ColorMap == NULL) {
    +                        GifFile->Error = D_GIF_ERR_NOT_ENOUGH_MEM;
    +                        return GIF_ERROR;
    +                }
    +
    +                /* Get the image local color map: */
    +                for (i = 0; i < GifFile->Image.ColorMap->ColorCount; i++) {
    +                        /* coverity[check_return] */
    +                        if (InternalRead(GifFile, Buf, 3) != 3) {
    +                                GifFreeMapObject(GifFile->Image.ColorMap);
    +                                GifFile->Error = D_GIF_ERR_READ_FAILED;
    +                                GifFile->Image.ColorMap = NULL;
    +                                return GIF_ERROR;
    +                        }
    +                        GifFile->Image.ColorMap->Colors[i].Red = Buf[0];
    +                        GifFile->Image.ColorMap->Colors[i].Green = Buf[1];
    +                        GifFile->Image.ColorMap->Colors[i].Blue = Buf[2];
    +                }
    +        }
    +
    +        Private->PixelCount =
    +            (long)GifFile->Image.Width * (long)GifFile->Image.Height;
    +
    +        /* Reset decompress algorithm parameters. */
    +        return DGifSetupDecompress(GifFile);
     }
     
     /******************************************************************************
    @@ -451,133 +451,135 @@ int DGifGetImageHeader(GifFileType *GifFile) {
      Note it is assumed the Image desc. header has been read.
     ******************************************************************************/
     int DGifGetImageDesc(GifFileType *GifFile) {
    -    GifFilePrivateType *Private = (GifFilePrivateType *)GifFile->Private;
    -    SavedImage *sp;
    +        GifFilePrivateType *Private = (GifFilePrivateType *)GifFile->Private;
    +        SavedImage *sp;
     
    -    if (!IS_READABLE(Private)) {
    -        /* This file was NOT open for reading: */
    -        GifFile->Error = D_GIF_ERR_NOT_READABLE;
    -        return GIF_ERROR;
    -    }
    -
    -    if (DGifGetImageHeader(GifFile) == GIF_ERROR) {
    -        return GIF_ERROR;
    -    }
    -
    -    if (GifFile->SavedImages) {
    -        SavedImage *new_saved_images = (SavedImage *)reallocarray(
    -            GifFile->SavedImages, (GifFile->ImageCount + 1),
    -            sizeof(SavedImage));
    -        if (new_saved_images == NULL) {
    -            GifFile->Error = D_GIF_ERR_NOT_ENOUGH_MEM;
    -            return GIF_ERROR;
    +        if (!IS_READABLE(Private)) {
    +                /* This file was NOT open for reading: */
    +                GifFile->Error = D_GIF_ERR_NOT_READABLE;
    +                return GIF_ERROR;
             }
    -        GifFile->SavedImages = new_saved_images;
    -    } else {
    -        if ((GifFile->SavedImages =
    -                 (SavedImage *)malloc(sizeof(SavedImage))) == NULL) {
    -            GifFile->Error = D_GIF_ERR_NOT_ENOUGH_MEM;
    -            return GIF_ERROR;
    +
    +        if (DGifGetImageHeader(GifFile) == GIF_ERROR) {
    +                return GIF_ERROR;
             }
    -    }
     
    -    sp = &GifFile->SavedImages[GifFile->ImageCount];
    -    memcpy(&sp->ImageDesc, &GifFile->Image, sizeof(GifImageDesc));
    -    if (GifFile->Image.ColorMap != NULL) {
    -        sp->ImageDesc.ColorMap =
    -            GifMakeMapObject(GifFile->Image.ColorMap->ColorCount,
    -                             GifFile->Image.ColorMap->Colors);
    -        if (sp->ImageDesc.ColorMap == NULL) {
    -            GifFile->Error = D_GIF_ERR_NOT_ENOUGH_MEM;
    -            return GIF_ERROR;
    +        if (GifFile->SavedImages) {
    +                SavedImage *new_saved_images = (SavedImage *)reallocarray(
    +                    GifFile->SavedImages, (GifFile->ImageCount + 1),
    +                    sizeof(SavedImage));
    +                if (new_saved_images == NULL) {
    +                        GifFile->Error = D_GIF_ERR_NOT_ENOUGH_MEM;
    +                        return GIF_ERROR;
    +                }
    +                GifFile->SavedImages = new_saved_images;
    +        } else {
    +                if ((GifFile->SavedImages =
    +                         (SavedImage *)malloc(sizeof(SavedImage))) == NULL) {
    +                        GifFile->Error = D_GIF_ERR_NOT_ENOUGH_MEM;
    +                        return GIF_ERROR;
    +                }
             }
    -    }
    -    sp->RasterBits = (unsigned char *)NULL;
    -    sp->ExtensionBlockCount = 0;
    -    sp->ExtensionBlocks = (ExtensionBlock *)NULL;
     
    -    GifFile->ImageCount++;
    +        sp = &GifFile->SavedImages[GifFile->ImageCount];
    +        memcpy(&sp->ImageDesc, &GifFile->Image, sizeof(GifImageDesc));
    +        if (GifFile->Image.ColorMap != NULL) {
    +                sp->ImageDesc.ColorMap =
    +                    GifMakeMapObject(GifFile->Image.ColorMap->ColorCount,
    +                                     GifFile->Image.ColorMap->Colors);
    +                if (sp->ImageDesc.ColorMap == NULL) {
    +                        GifFile->Error = D_GIF_ERR_NOT_ENOUGH_MEM;
    +                        return GIF_ERROR;
    +                }
    +        }
    +        sp->RasterBits = (unsigned char *)NULL;
    +        sp->ExtensionBlockCount = 0;
    +        sp->ExtensionBlocks = (ExtensionBlock *)NULL;
     
    -    return GIF_OK;
    +        GifFile->ImageCount++;
    +
    +        return GIF_OK;
     }
     
     /******************************************************************************
      Get one full scanned line (Line) of length LineLen from GIF file.
     ******************************************************************************/
     int DGifGetLine(GifFileType *GifFile, GifPixelType *Line, int LineLen) {
    -    GifByteType *Dummy;
    -    GifFilePrivateType *Private = (GifFilePrivateType *)GifFile->Private;
    +        GifByteType *Dummy;
    +        GifFilePrivateType *Private = (GifFilePrivateType *)GifFile->Private;
     
    -    if (!IS_READABLE(Private)) {
    -        /* This file was NOT open for reading: */
    -        GifFile->Error = D_GIF_ERR_NOT_READABLE;
    -        return GIF_ERROR;
    -    }
    -
    -    if (!LineLen) {
    -        LineLen = GifFile->Image.Width;
    -    }
    -
    -    if ((Private->PixelCount -= LineLen) > 0xffff0000UL) {
    -        GifFile->Error = D_GIF_ERR_DATA_TOO_BIG;
    -        return GIF_ERROR;
    -    }
    -
    -    if (DGifDecompressLine(GifFile, Line, LineLen) == GIF_OK) {
    -        if (Private->PixelCount == 0) {
    -            /* We probably won't be called any more, so let's clean
    -             * up everything before we return: need to flush out all
    -             * the rest of image until an empty block (size 0)
    -             * detected. We use GetCodeNext.
    -             */
    -            do {
    -                if (DGifGetCodeNext(GifFile, &Dummy) ==
    -                    GIF_ERROR) {
    -                    return GIF_ERROR;
    -                }
    -            } while (Dummy != NULL);
    +        if (!IS_READABLE(Private)) {
    +                /* This file was NOT open for reading: */
    +                GifFile->Error = D_GIF_ERR_NOT_READABLE;
    +                return GIF_ERROR;
    +        }
    +
    +        if (!LineLen) {
    +                LineLen = GifFile->Image.Width;
    +        }
    +
    +        if (LineLen < 0 || Private->PixelCount < (unsigned long)LineLen) {
    +                GifFile->Error = D_GIF_ERR_DATA_TOO_BIG;
    +                return GIF_ERROR;
    +        }
    +        Private->PixelCount -= LineLen;
    +
    +        if (DGifDecompressLine(GifFile, Line, LineLen) == GIF_OK) {
    +                if (Private->PixelCount == 0) {
    +                        /* We probably won't be called any more, so let's clean
    +                         * up everything before we return: need to flush out all
    +                         * the rest of image until an empty block (size 0)
    +                         * detected. We use GetCodeNext.
    +                         */
    +                        do {
    +                                if (DGifGetCodeNext(GifFile, &Dummy) ==
    +                                    GIF_ERROR) {
    +                                        return GIF_ERROR;
    +                                }
    +                        } while (Dummy != NULL);
    +                }
    +                return GIF_OK;
    +        } else {
    +                return GIF_ERROR;
             }
    -        return GIF_OK;
    -    } else {
    -        return GIF_ERROR;
    -    }
     }
     
     /******************************************************************************
      Put one pixel (Pixel) into GIF file.
     ******************************************************************************/
     int DGifGetPixel(GifFileType *GifFile, GifPixelType Pixel) {
    -    GifByteType *Dummy;
    -    GifFilePrivateType *Private = (GifFilePrivateType *)GifFile->Private;
    +        GifByteType *Dummy;
    +        GifFilePrivateType *Private = (GifFilePrivateType *)GifFile->Private;
     
    -    if (!IS_READABLE(Private)) {
    -        /* This file was NOT open for reading: */
    -        GifFile->Error = D_GIF_ERR_NOT_READABLE;
    -        return GIF_ERROR;
    -    }
    -    if (--Private->PixelCount > 0xffff0000UL) {
    -        GifFile->Error = D_GIF_ERR_DATA_TOO_BIG;
    -        return GIF_ERROR;
    -    }
    -
    -    if (DGifDecompressLine(GifFile, &Pixel, 1) == GIF_OK) {
    -        if (Private->PixelCount == 0) {
    -            /* We probably won't be called any more, so let's clean
    -             * up everything before we return: need to flush out all
    -             * the rest of image until an empty block (size 0)
    -             * detected. We use GetCodeNext.
    -             */
    -            do {
    -                if (DGifGetCodeNext(GifFile, &Dummy) ==
    -                    GIF_ERROR) {
    -                    return GIF_ERROR;
    -                }
    -            } while (Dummy != NULL);
    +        if (!IS_READABLE(Private)) {
    +                /* This file was NOT open for reading: */
    +                GifFile->Error = D_GIF_ERR_NOT_READABLE;
    +                return GIF_ERROR;
    +        }
    +        if (Private->PixelCount == 0) {
    +                GifFile->Error = D_GIF_ERR_DATA_TOO_BIG;
    +                return GIF_ERROR;
    +        }
    +        Private->PixelCount --;
    +
    +        if (DGifDecompressLine(GifFile, &Pixel, 1) == GIF_OK) {
    +                if (Private->PixelCount == 0) {
    +                        /* We probably won't be called any more, so let's clean
    +                         * up everything before we return: need to flush out all
    +                         * the rest of image until an empty block (size 0)
    +                         * detected. We use GetCodeNext.
    +                         */
    +                        do {
    +                                if (DGifGetCodeNext(GifFile, &Dummy) ==
    +                                    GIF_ERROR) {
    +                                        return GIF_ERROR;
    +                                }
    +                        } while (Dummy != NULL);
    +                }
    +                return GIF_OK;
    +        } else {
    +                return GIF_ERROR;
             }
    -        return GIF_OK;
    -    } else {
    -        return GIF_ERROR;
    -    }
     }
     
     /******************************************************************************
    @@ -589,26 +591,26 @@ int DGifGetPixel(GifFileType *GifFile, GifPixelType Pixel) {
     ******************************************************************************/
     int DGifGetExtension(GifFileType *GifFile, int *ExtCode,
                          GifByteType **Extension) {
    -    GifByteType Buf;
    -    GifFilePrivateType *Private = (GifFilePrivateType *)GifFile->Private;
    +        GifByteType Buf;
    +        GifFilePrivateType *Private = (GifFilePrivateType *)GifFile->Private;
     
    -    // fprintf(stderr, "### -> DGifGetExtension:\n");
    -    if (!IS_READABLE(Private)) {
    -        /* This file was NOT open for reading: */
    -        GifFile->Error = D_GIF_ERR_NOT_READABLE;
    -        return GIF_ERROR;
    -    }
    +        // fprintf(stderr, "### -> DGifGetExtension:\n");
    +        if (!IS_READABLE(Private)) {
    +                /* This file was NOT open for reading: */
    +                GifFile->Error = D_GIF_ERR_NOT_READABLE;
    +                return GIF_ERROR;
    +        }
     
    -    /* coverity[check_return] */
    -    if (InternalRead(GifFile, &Buf, 1) != 1) {
    -        GifFile->Error = D_GIF_ERR_READ_FAILED;
    -        return GIF_ERROR;
    -    }
    -    *ExtCode = Buf;
    -    // fprintf(stderr, "### <- DGifGetExtension: %02x, about to call
    -    // next\n", Buf);
    +        /* coverity[check_return] */
    +        if (InternalRead(GifFile, &Buf, 1) != 1) {
    +                GifFile->Error = D_GIF_ERR_READ_FAILED;
    +                return GIF_ERROR;
    +        }
    +        *ExtCode = Buf;
    +        // fprintf(stderr, "### <- DGifGetExtension: %02x, about to call
    +        // next\n", Buf);
     
    -    return DGifGetExtensionNext(GifFile, Extension);
    +        return DGifGetExtensionNext(GifFile, Extension);
     }
     
     /******************************************************************************
    @@ -617,31 +619,31 @@ int DGifGetExtension(GifFileType *GifFile, int *ExtCode,
      The Extension should NOT be freed by the user (not dynamically allocated).
     ******************************************************************************/
     int DGifGetExtensionNext(GifFileType *GifFile, GifByteType **Extension) {
    -    GifByteType Buf;
    -    GifFilePrivateType *Private = (GifFilePrivateType *)GifFile->Private;
    +        GifByteType Buf;
    +        GifFilePrivateType *Private = (GifFilePrivateType *)GifFile->Private;
     
    -    // fprintf(stderr, "### -> DGifGetExtensionNext\n");
    -    if (InternalRead(GifFile, &Buf, 1) != 1) {
    -        GifFile->Error = D_GIF_ERR_READ_FAILED;
    -        return GIF_ERROR;
    -    }
    -    // fprintf(stderr, "### DGifGetExtensionNext sees %d\n", Buf);
    -
    -    if (Buf > 0) {
    -        *Extension = Private->Buf; /* Use private unused buffer. */
    -        (*Extension)[0] =
    -            Buf; /* Pascal strings notation (pos. 0 is len.). */
    -                 /* coverity[tainted_data,check_return] */
    -        if (InternalRead(GifFile, &((*Extension)[1]), Buf) != Buf) {
    -            GifFile->Error = D_GIF_ERR_READ_FAILED;
    -            return GIF_ERROR;
    +        // fprintf(stderr, "### -> DGifGetExtensionNext\n");
    +        if (InternalRead(GifFile, &Buf, 1) != 1) {
    +                GifFile->Error = D_GIF_ERR_READ_FAILED;
    +                return GIF_ERROR;
             }
    -    } else {
    -        *Extension = NULL;
    -    }
    -    // fprintf(stderr, "### <- DGifGetExtensionNext: %p\n", Extension);
    +        // fprintf(stderr, "### DGifGetExtensionNext sees %d\n", Buf);
     
    -    return GIF_OK;
    +        if (Buf > 0) {
    +                *Extension = Private->Buf; /* Use private unused buffer. */
    +                (*Extension)[0] =
    +                    Buf; /* Pascal strings notation (pos. 0 is len.). */
    +                         /* coverity[tainted_data,check_return] */
    +                if (InternalRead(GifFile, &((*Extension)[1]), Buf) != Buf) {
    +                        GifFile->Error = D_GIF_ERR_READ_FAILED;
    +                        return GIF_ERROR;
    +                }
    +        } else {
    +                *Extension = NULL;
    +        }
    +        // fprintf(stderr, "### <- DGifGetExtensionNext: %p\n", Extension);
    +
    +        return GIF_OK;
     }
     
     /******************************************************************************
    @@ -651,21 +653,21 @@ int DGifGetExtensionNext(GifFileType *GifFile, GifByteType **Extension) {
     int DGifExtensionToGCB(const size_t GifExtensionLength,
                            const GifByteType *GifExtension,
                            GraphicsControlBlock *GCB) {
    -    if (GifExtensionLength != 4) {
    -        return GIF_ERROR;
    -    }
    +        if (GifExtensionLength != 4) {
    +                return GIF_ERROR;
    +        }
     
    -    GCB->DisposalMode = (GifExtension[0] >> 2) & 0x07;
    -    GCB->UserInputFlag = (GifExtension[0] & 0x02) != 0;
    -    GCB->DelayTime =
    -        UNSIGNED_LITTLE_ENDIAN(GifExtension[1], GifExtension[2]);
    -    if (GifExtension[0] & 0x01) {
    -        GCB->TransparentColor = (int)GifExtension[3];
    -    } else {
    -        GCB->TransparentColor = NO_TRANSPARENT_COLOR;
    -    }
    +        GCB->DisposalMode = (GifExtension[0] >> 2) & 0x07;
    +        GCB->UserInputFlag = (GifExtension[0] & 0x02) != 0;
    +        GCB->DelayTime =
    +            UNSIGNED_LITTLE_ENDIAN(GifExtension[1], GifExtension[2]);
    +        if (GifExtension[0] & 0x01) {
    +                GCB->TransparentColor = (int)GifExtension[3];
    +        } else {
    +                GCB->TransparentColor = NO_TRANSPARENT_COLOR;
    +        }
     
    -    return GIF_OK;
    +        return GIF_OK;
     }
     
     /******************************************************************************
    @@ -674,101 +676,101 @@ int DGifExtensionToGCB(const size_t GifExtensionLength,
     
     int DGifSavedExtensionToGCB(GifFileType *GifFile, int ImageIndex,
                                 GraphicsControlBlock *GCB) {
    -    int i;
    +        int i;
     
    -    if (ImageIndex < 0 || ImageIndex > GifFile->ImageCount - 1) {
    -        return GIF_ERROR;
    -    }
    -
    -    GCB->DisposalMode = DISPOSAL_UNSPECIFIED;
    -    GCB->UserInputFlag = false;
    -    GCB->DelayTime = 0;
    -    GCB->TransparentColor = NO_TRANSPARENT_COLOR;
    -
    -    for (i = 0; i < GifFile->SavedImages[ImageIndex].ExtensionBlockCount;
    -         i++) {
    -        ExtensionBlock *ep =
    -            &GifFile->SavedImages[ImageIndex].ExtensionBlocks[i];
    -        if (ep->Function == GRAPHICS_EXT_FUNC_CODE) {
    -            return DGifExtensionToGCB(ep->ByteCount, ep->Bytes,
    -                                      GCB);
    +        if (ImageIndex < 0 || ImageIndex > GifFile->ImageCount - 1) {
    +                return GIF_ERROR;
             }
    -    }
     
    -    return GIF_ERROR;
    +        GCB->DisposalMode = DISPOSAL_UNSPECIFIED;
    +        GCB->UserInputFlag = false;
    +        GCB->DelayTime = 0;
    +        GCB->TransparentColor = NO_TRANSPARENT_COLOR;
    +
    +        for (i = 0; i < GifFile->SavedImages[ImageIndex].ExtensionBlockCount;
    +             i++) {
    +                ExtensionBlock *ep =
    +                    &GifFile->SavedImages[ImageIndex].ExtensionBlocks[i];
    +                if (ep->Function == GRAPHICS_EXT_FUNC_CODE) {
    +                        return DGifExtensionToGCB(ep->ByteCount, ep->Bytes,
    +                                                  GCB);
    +                }
    +        }
    +
    +        return GIF_ERROR;
     }
     
     /******************************************************************************
      This routine should be called last, to close the GIF file.
     ******************************************************************************/
     int DGifCloseFile(GifFileType *GifFile, int *ErrorCode) {
    -    GifFilePrivateType *Private;
    +        GifFilePrivateType *Private;
     
    -    if (GifFile == NULL || GifFile->Private == NULL) {
    -        return GIF_ERROR;
    -    }
    -
    -    if (GifFile->Image.ColorMap) {
    -        GifFreeMapObject(GifFile->Image.ColorMap);
    -        GifFile->Image.ColorMap = NULL;
    -    }
    -
    -    if (GifFile->SColorMap) {
    -        GifFreeMapObject(GifFile->SColorMap);
    -        GifFile->SColorMap = NULL;
    -    }
    -
    -    if (GifFile->SavedImages) {
    -        GifFreeSavedImages(GifFile);
    -        GifFile->SavedImages = NULL;
    -    }
    -
    -    GifFreeExtensions(&GifFile->ExtensionBlockCount,
    -                      &GifFile->ExtensionBlocks);
    -
    -    Private = (GifFilePrivateType *)GifFile->Private;
    -
    -    if (!IS_READABLE(Private)) {
    -        /* This file was NOT open for reading: */
    -        if (ErrorCode != NULL) {
    -            *ErrorCode = D_GIF_ERR_NOT_READABLE;
    +        if (GifFile == NULL || GifFile->Private == NULL) {
    +                return GIF_ERROR;
             }
    +
    +        if (GifFile->Image.ColorMap) {
    +                GifFreeMapObject(GifFile->Image.ColorMap);
    +                GifFile->Image.ColorMap = NULL;
    +        }
    +
    +        if (GifFile->SColorMap) {
    +                GifFreeMapObject(GifFile->SColorMap);
    +                GifFile->SColorMap = NULL;
    +        }
    +
    +        if (GifFile->SavedImages) {
    +                GifFreeSavedImages(GifFile);
    +                GifFile->SavedImages = NULL;
    +        }
    +
    +        GifFreeExtensions(&GifFile->ExtensionBlockCount,
    +                          &GifFile->ExtensionBlocks);
    +
    +        Private = (GifFilePrivateType *)GifFile->Private;
    +
    +        if (!IS_READABLE(Private)) {
    +                /* This file was NOT open for reading: */
    +                if (ErrorCode != NULL) {
    +                        *ErrorCode = D_GIF_ERR_NOT_READABLE;
    +                }
    +                free((char *)GifFile->Private);
    +                free(GifFile);
    +                return GIF_ERROR;
    +        }
    +
    +        if (Private->File && (fclose(Private->File) != 0)) {
    +                if (ErrorCode != NULL) {
    +                        *ErrorCode = D_GIF_ERR_CLOSE_FAILED;
    +                }
    +                free((char *)GifFile->Private);
    +                free(GifFile);
    +                return GIF_ERROR;
    +        }
    +
             free((char *)GifFile->Private);
             free(GifFile);
    -        return GIF_ERROR;
    -    }
    -
    -    if (Private->File && (fclose(Private->File) != 0)) {
             if (ErrorCode != NULL) {
    -            *ErrorCode = D_GIF_ERR_CLOSE_FAILED;
    +                *ErrorCode = D_GIF_SUCCEEDED;
             }
    -        free((char *)GifFile->Private);
    -        free(GifFile);
    -        return GIF_ERROR;
    -    }
    -
    -    free((char *)GifFile->Private);
    -    free(GifFile);
    -    if (ErrorCode != NULL) {
    -        *ErrorCode = D_GIF_SUCCEEDED;
    -    }
    -    return GIF_OK;
    +        return GIF_OK;
     }
     
     /******************************************************************************
      Get 2 bytes (word) from the given file:
     ******************************************************************************/
     static int DGifGetWord(GifFileType *GifFile, GifWord *Word) {
    -    unsigned char c[2];
    +        unsigned char c[2];
     
    -    /* coverity[check_return] */
    -    if (InternalRead(GifFile, c, 2) != 2) {
    -        GifFile->Error = D_GIF_ERR_READ_FAILED;
    -        return GIF_ERROR;
    -    }
    +        /* coverity[check_return] */
    +        if (InternalRead(GifFile, c, 2) != 2) {
    +                GifFile->Error = D_GIF_ERR_READ_FAILED;
    +                return GIF_ERROR;
    +        }
     
    -    *Word = (GifWord)UNSIGNED_LITTLE_ENDIAN(c[0], c[1]);
    -    return GIF_OK;
    +        *Word = (GifWord)UNSIGNED_LITTLE_ENDIAN(c[0], c[1]);
    +        return GIF_OK;
     }
     
     /******************************************************************************
    @@ -779,17 +781,17 @@ static int DGifGetWord(GifFileType *GifFile, GifWord *Word) {
      The block should NOT be freed by the user (not dynamically allocated).
     ******************************************************************************/
     int DGifGetCode(GifFileType *GifFile, int *CodeSize, GifByteType **CodeBlock) {
    -    GifFilePrivateType *Private = (GifFilePrivateType *)GifFile->Private;
    +        GifFilePrivateType *Private = (GifFilePrivateType *)GifFile->Private;
     
    -    if (!IS_READABLE(Private)) {
    -        /* This file was NOT open for reading: */
    -        GifFile->Error = D_GIF_ERR_NOT_READABLE;
    -        return GIF_ERROR;
    -    }
    +        if (!IS_READABLE(Private)) {
    +                /* This file was NOT open for reading: */
    +                GifFile->Error = D_GIF_ERR_NOT_READABLE;
    +                return GIF_ERROR;
    +        }
     
    -    *CodeSize = Private->BitsPerPixel;
    +        *CodeSize = Private->BitsPerPixel;
     
    -    return DGifGetCodeNext(GifFile, CodeBlock);
    +        return DGifGetCodeNext(GifFile, CodeBlock);
     }
     
     /******************************************************************************
    @@ -798,78 +800,78 @@ int DGifGetCode(GifFileType *GifFile, int *CodeSize, GifByteType **CodeBlock) {
      The block should NOT be freed by the user (not dynamically allocated).
     ******************************************************************************/
     int DGifGetCodeNext(GifFileType *GifFile, GifByteType **CodeBlock) {
    -    GifByteType Buf;
    -    GifFilePrivateType *Private = (GifFilePrivateType *)GifFile->Private;
    +        GifByteType Buf;
    +        GifFilePrivateType *Private = (GifFilePrivateType *)GifFile->Private;
     
    -    /* coverity[tainted_data_argument] */
    -    /* coverity[check_return] */
    -    if (InternalRead(GifFile, &Buf, 1) != 1) {
    -        GifFile->Error = D_GIF_ERR_READ_FAILED;
    -        return GIF_ERROR;
    -    }
    -
    -    /* coverity[lower_bounds] */
    -    if (Buf > 0) {
    -        *CodeBlock = Private->Buf; /* Use private unused buffer. */
    -        (*CodeBlock)[0] =
    -            Buf; /* Pascal strings notation (pos. 0 is len.). */
    -                 /* coverity[tainted_data] */
    -        if (InternalRead(GifFile, &((*CodeBlock)[1]), Buf) != Buf) {
    -            GifFile->Error = D_GIF_ERR_READ_FAILED;
    -            return GIF_ERROR;
    +        /* coverity[tainted_data_argument] */
    +        /* coverity[check_return] */
    +        if (InternalRead(GifFile, &Buf, 1) != 1) {
    +                GifFile->Error = D_GIF_ERR_READ_FAILED;
    +                return GIF_ERROR;
             }
    -    } else {
    -        *CodeBlock = NULL;
    -        Private->Buf[0] = 0; /* Make sure the buffer is empty! */
    -        Private->PixelCount =
    -            0; /* And local info. indicate image read. */
    -    }
     
    -    return GIF_OK;
    +        /* coverity[lower_bounds] */
    +        if (Buf > 0) {
    +                *CodeBlock = Private->Buf; /* Use private unused buffer. */
    +                (*CodeBlock)[0] =
    +                    Buf; /* Pascal strings notation (pos. 0 is len.). */
    +                         /* coverity[tainted_data] */
    +                if (InternalRead(GifFile, &((*CodeBlock)[1]), Buf) != Buf) {
    +                        GifFile->Error = D_GIF_ERR_READ_FAILED;
    +                        return GIF_ERROR;
    +                }
    +        } else {
    +                *CodeBlock = NULL;
    +                Private->Buf[0] = 0; /* Make sure the buffer is empty! */
    +                Private->PixelCount =
    +                    0; /* And local info. indicate image read. */
    +        }
    +
    +        return GIF_OK;
     }
     
     /******************************************************************************
      Setup the LZ decompression for this image:
     ******************************************************************************/
     static int DGifSetupDecompress(GifFileType *GifFile) {
    -    int i, BitsPerPixel;
    -    GifByteType CodeSize;
    -    GifPrefixType *Prefix;
    -    GifFilePrivateType *Private = (GifFilePrivateType *)GifFile->Private;
    +        int i, BitsPerPixel;
    +        GifByteType CodeSize;
    +        GifPrefixType *Prefix;
    +        GifFilePrivateType *Private = (GifFilePrivateType *)GifFile->Private;
     
    -    /* coverity[check_return] */
    -    if (InternalRead(GifFile, &CodeSize, 1) <
    -        1) { /* Read Code size from file. */
    -        GifFile->Error = D_GIF_ERR_READ_FAILED;
    -        return GIF_ERROR; /* Failed to read Code size. */
    -    }
    -    BitsPerPixel = CodeSize;
    +        /* coverity[check_return] */
    +        if (InternalRead(GifFile, &CodeSize, 1) <
    +            1) { /* Read Code size from file. */
    +                GifFile->Error = D_GIF_ERR_READ_FAILED;
    +                return GIF_ERROR; /* Failed to read Code size. */
    +        }
    +        BitsPerPixel = CodeSize;
     
    -    /* this can only happen on a severely malformed GIF */
    -    if (BitsPerPixel > 8) {
    -        GifFile->Error =
    -            D_GIF_ERR_READ_FAILED; /* somewhat bogus error code */
    -        return GIF_ERROR;          /* Failed to read Code size. */
    -    }
    +        /* this can only happen on a severely malformed GIF */
    +        if (BitsPerPixel > 8) {
    +                GifFile->Error =
    +                    D_GIF_ERR_READ_FAILED; /* somewhat bogus error code */
    +                return GIF_ERROR;          /* Failed to read Code size. */
    +        }
     
    -    Private->Buf[0] = 0; /* Input Buffer empty. */
    -    Private->BitsPerPixel = BitsPerPixel;
    -    Private->ClearCode = (1 << BitsPerPixel);
    -    Private->EOFCode = Private->ClearCode + 1;
    -    Private->RunningCode = Private->EOFCode + 1;
    -    Private->RunningBits = BitsPerPixel + 1; /* Number of bits per code. */
    -    Private->MaxCode1 = 1 << Private->RunningBits; /* Max. code + 1. */
    -    Private->StackPtr = 0; /* No pixels on the pixel stack. */
    -    Private->LastCode = NO_SUCH_CODE;
    -    Private->CrntShiftState = 0; /* No information in CrntShiftDWord. */
    -    Private->CrntShiftDWord = 0;
    +        Private->Buf[0] = 0; /* Input Buffer empty. */
    +        Private->BitsPerPixel = BitsPerPixel;
    +        Private->ClearCode = (1 << BitsPerPixel);
    +        Private->EOFCode = Private->ClearCode + 1;
    +        Private->RunningCode = Private->EOFCode + 1;
    +        Private->RunningBits = BitsPerPixel + 1; /* Number of bits per code. */
    +        Private->MaxCode1 = 1 << Private->RunningBits; /* Max. code + 1. */
    +        Private->StackPtr = 0; /* No pixels on the pixel stack. */
    +        Private->LastCode = NO_SUCH_CODE;
    +        Private->CrntShiftState = 0; /* No information in CrntShiftDWord. */
    +        Private->CrntShiftDWord = 0;
     
    -    Prefix = Private->Prefix;
    -    for (i = 0; i <= LZ_MAX_CODE; i++) {
    -        Prefix[i] = NO_SUCH_CODE;
    -    }
    +        Prefix = Private->Prefix;
    +        for (i = 0; i <= LZ_MAX_CODE; i++) {
    +                Prefix[i] = NO_SUCH_CODE;
    +        }
     
    -    return GIF_OK;
    +        return GIF_OK;
     }
     
     /******************************************************************************
    @@ -880,147 +882,147 @@ static int DGifSetupDecompress(GifFileType *GifFile) {
     ******************************************************************************/
     static int DGifDecompressLine(GifFileType *GifFile, GifPixelType *Line,
                                   int LineLen) {
    -    int i = 0;
    -    int j, CrntCode, EOFCode, ClearCode, CrntPrefix, LastCode, StackPtr;
    -    GifByteType *Stack, *Suffix;
    -    GifPrefixType *Prefix;
    -    GifFilePrivateType *Private = (GifFilePrivateType *)GifFile->Private;
    +        int i = 0;
    +        int j, CrntCode, EOFCode, ClearCode, CrntPrefix, LastCode, StackPtr;
    +        GifByteType *Stack, *Suffix;
    +        GifPrefixType *Prefix;
    +        GifFilePrivateType *Private = (GifFilePrivateType *)GifFile->Private;
     
    -    StackPtr = Private->StackPtr;
    -    Prefix = Private->Prefix;
    -    Suffix = Private->Suffix;
    -    Stack = Private->Stack;
    -    EOFCode = Private->EOFCode;
    -    ClearCode = Private->ClearCode;
    -    LastCode = Private->LastCode;
    +        StackPtr = Private->StackPtr;
    +        Prefix = Private->Prefix;
    +        Suffix = Private->Suffix;
    +        Stack = Private->Stack;
    +        EOFCode = Private->EOFCode;
    +        ClearCode = Private->ClearCode;
    +        LastCode = Private->LastCode;
     
    -    if (StackPtr > LZ_MAX_CODE) {
    -        return GIF_ERROR;
    -    }
    -
    -    if (StackPtr != 0) {
    -        /* Let pop the stack off before continueing to read the GIF
    -         * file: */
    -        while (StackPtr != 0 && i < LineLen) {
    -            Line[i++] = Stack[--StackPtr];
    -        }
    -    }
    -
    -    while (i < LineLen) { /* Decode LineLen items. */
    -        if (DGifDecompressInput(GifFile, &CrntCode) == GIF_ERROR) {
    -            return GIF_ERROR;
    +        if (StackPtr > LZ_MAX_CODE) {
    +                return GIF_ERROR;
             }
     
    -        if (CrntCode == EOFCode) {
    -            /* Note however that usually we will not be here as we
    -             * will stop decoding as soon as we got all the pixel,
    -             * or EOF code will not be read at all, and
    -             * DGifGetLine/Pixel clean everything.  */
    -            GifFile->Error = D_GIF_ERR_EOF_TOO_SOON;
    -            return GIF_ERROR;
    -        } else if (CrntCode == ClearCode) {
    -            /* We need to start over again: */
    -            for (j = 0; j <= LZ_MAX_CODE; j++) {
    -                Prefix[j] = NO_SUCH_CODE;
    -            }
    -            Private->RunningCode = Private->EOFCode + 1;
    -            Private->RunningBits = Private->BitsPerPixel + 1;
    -            Private->MaxCode1 = 1 << Private->RunningBits;
    -            LastCode = Private->LastCode = NO_SUCH_CODE;
    -        } else {
    -            /* Its regular code - if in pixel range simply add it to
    -             * output stream, otherwise trace to codes linked list
    -             * until the prefix is in pixel range: */
    -            if (CrntCode < ClearCode) {
    -                /* This is simple - its pixel scalar, so add it
    -                 * to output: */
    -                Line[i++] = CrntCode;
    -            } else {
    -                /* Its a code to needed to be traced: trace the
    -                 * linked list until the prefix is a pixel,
    -                 * while pushing the suffix pixels on our stack.
    -                 * If we done, pop the stack in reverse (thats
    -                 * what stack is good for!) order to output.  */
    -                if (Prefix[CrntCode] == NO_SUCH_CODE) {
    -                    CrntPrefix = LastCode;
    -
    -                    /* Only allowed if CrntCode is exactly
    -                     * the running code: In that case
    -                     * CrntCode = XXXCode, CrntCode or the
    -                     * prefix code is last code and the
    -                     * suffix char is exactly the prefix of
    -                     * last code! */
    -                    if (CrntCode ==
    -                        Private->RunningCode - 2) {
    -                        Suffix[Private->RunningCode -
    -                               2] = Stack[StackPtr++] =
    -                            DGifGetPrefixChar(
    -                                Prefix, LastCode,
    -                                ClearCode);
    -                    } else {
    -                        Suffix[Private->RunningCode -
    -                               2] = Stack[StackPtr++] =
    -                            DGifGetPrefixChar(
    -                                Prefix, CrntCode,
    -                                ClearCode);
    -                    }
    -                } else {
    -                    CrntPrefix = CrntCode;
    -                }
    -
    -                /* Now (if image is O.K.) we should not get a
    -                 * NO_SUCH_CODE during the trace. As we might
    -                 * loop forever, in case of defective image, we
    -                 * use StackPtr as loop counter and stop before
    -                 * overflowing Stack[]. */
    -                while (StackPtr < LZ_MAX_CODE &&
    -                       CrntPrefix > ClearCode &&
    -                       CrntPrefix <= LZ_MAX_CODE) {
    -                    Stack[StackPtr++] = Suffix[CrntPrefix];
    -                    CrntPrefix = Prefix[CrntPrefix];
    -                }
    -                if (StackPtr >= LZ_MAX_CODE ||
    -                    CrntPrefix > LZ_MAX_CODE) {
    -                    GifFile->Error = D_GIF_ERR_IMAGE_DEFECT;
    -                    return GIF_ERROR;
    -                }
    -                /* Push the last character on stack: */
    -                Stack[StackPtr++] = CrntPrefix;
    -
    -                /* Now lets pop all the stack into output: */
    +        if (StackPtr != 0) {
    +                /* Let pop the stack off before continueing to read the GIF
    +                 * file: */
                     while (StackPtr != 0 && i < LineLen) {
    -                    Line[i++] = Stack[--StackPtr];
    +                        Line[i++] = Stack[--StackPtr];
                     }
    -            }
    -            if (LastCode != NO_SUCH_CODE &&
    -                Private->RunningCode - 2 < (LZ_MAX_CODE + 1) &&
    -                Prefix[Private->RunningCode - 2] == NO_SUCH_CODE) {
    -                Prefix[Private->RunningCode - 2] = LastCode;
    -
    -                if (CrntCode == Private->RunningCode - 2) {
    -                    /* Only allowed if CrntCode is exactly
    -                     * the running code: In that case
    -                     * CrntCode = XXXCode, CrntCode or the
    -                     * prefix code is last code and the
    -                     * suffix char is exactly the prefix of
    -                     * last code! */
    -                    Suffix[Private->RunningCode - 2] =
    -                        DGifGetPrefixChar(Prefix, LastCode,
    -                                          ClearCode);
    -                } else {
    -                    Suffix[Private->RunningCode - 2] =
    -                        DGifGetPrefixChar(Prefix, CrntCode,
    -                                          ClearCode);
    -                }
    -            }
    -            LastCode = CrntCode;
             }
    -    }
     
    -    Private->LastCode = LastCode;
    -    Private->StackPtr = StackPtr;
    +        while (i < LineLen) { /* Decode LineLen items. */
    +                if (DGifDecompressInput(GifFile, &CrntCode) == GIF_ERROR) {
    +                        return GIF_ERROR;
    +                }
     
    -    return GIF_OK;
    +                if (CrntCode == EOFCode) {
    +                        /* Note however that usually we will not be here as we
    +                         * will stop decoding as soon as we got all the pixel,
    +                         * or EOF code will not be read at all, and
    +                         * DGifGetLine/Pixel clean everything.  */
    +                        GifFile->Error = D_GIF_ERR_EOF_TOO_SOON;
    +                        return GIF_ERROR;
    +                } else if (CrntCode == ClearCode) {
    +                        /* We need to start over again: */
    +                        for (j = 0; j <= LZ_MAX_CODE; j++) {
    +                                Prefix[j] = NO_SUCH_CODE;
    +                        }
    +                        Private->RunningCode = Private->EOFCode + 1;
    +                        Private->RunningBits = Private->BitsPerPixel + 1;
    +                        Private->MaxCode1 = 1 << Private->RunningBits;
    +                        LastCode = Private->LastCode = NO_SUCH_CODE;
    +                } else {
    +                        /* Its regular code - if in pixel range simply add it to
    +                         * output stream, otherwise trace to codes linked list
    +                         * until the prefix is in pixel range: */
    +                        if (CrntCode < ClearCode) {
    +                                /* This is simple - its pixel scalar, so add it
    +                                 * to output: */
    +                                Line[i++] = CrntCode;
    +                        } else {
    +                                /* Its a code to needed to be traced: trace the
    +                                 * linked list until the prefix is a pixel,
    +                                 * while pushing the suffix pixels on our stack.
    +                                 * If we done, pop the stack in reverse (thats
    +                                 * what stack is good for!) order to output.  */
    +                                if (Prefix[CrntCode] == NO_SUCH_CODE) {
    +                                        CrntPrefix = LastCode;
    +
    +                                        /* Only allowed if CrntCode is exactly
    +                                         * the running code: In that case
    +                                         * CrntCode = XXXCode, CrntCode or the
    +                                         * prefix code is last code and the
    +                                         * suffix char is exactly the prefix of
    +                                         * last code! */
    +                                        if (CrntCode ==
    +                                            Private->RunningCode - 2) {
    +                                                Suffix[Private->RunningCode -
    +                                                       2] = Stack[StackPtr++] =
    +                                                    DGifGetPrefixChar(
    +                                                        Prefix, LastCode,
    +                                                        ClearCode);
    +                                        } else {
    +                                                Suffix[Private->RunningCode -
    +                                                       2] = Stack[StackPtr++] =
    +                                                    DGifGetPrefixChar(
    +                                                        Prefix, CrntCode,
    +                                                        ClearCode);
    +                                        }
    +                                } else {
    +                                        CrntPrefix = CrntCode;
    +                                }
    +
    +                                /* Now (if image is O.K.) we should not get a
    +                                 * NO_SUCH_CODE during the trace. As we might
    +                                 * loop forever, in case of defective image, we
    +                                 * use StackPtr as loop counter and stop before
    +                                 * overflowing Stack[]. */
    +                                while (StackPtr < LZ_MAX_CODE &&
    +                                       CrntPrefix > ClearCode &&
    +                                       CrntPrefix <= LZ_MAX_CODE) {
    +                                        Stack[StackPtr++] = Suffix[CrntPrefix];
    +                                        CrntPrefix = Prefix[CrntPrefix];
    +                                }
    +                                if (StackPtr >= LZ_MAX_CODE ||
    +                                    CrntPrefix > LZ_MAX_CODE) {
    +                                        GifFile->Error = D_GIF_ERR_IMAGE_DEFECT;
    +                                        return GIF_ERROR;
    +                                }
    +                                /* Push the last character on stack: */
    +                                Stack[StackPtr++] = CrntPrefix;
    +
    +                                /* Now lets pop all the stack into output: */
    +                                while (StackPtr != 0 && i < LineLen) {
    +                                        Line[i++] = Stack[--StackPtr];
    +                                }
    +                        }
    +                        if (LastCode != NO_SUCH_CODE &&
    +                            Private->RunningCode - 2 < (LZ_MAX_CODE + 1) &&
    +                            Prefix[Private->RunningCode - 2] == NO_SUCH_CODE) {
    +                                Prefix[Private->RunningCode - 2] = LastCode;
    +
    +                                if (CrntCode == Private->RunningCode - 2) {
    +                                        /* Only allowed if CrntCode is exactly
    +                                         * the running code: In that case
    +                                         * CrntCode = XXXCode, CrntCode or the
    +                                         * prefix code is last code and the
    +                                         * suffix char is exactly the prefix of
    +                                         * last code! */
    +                                        Suffix[Private->RunningCode - 2] =
    +                                            DGifGetPrefixChar(Prefix, LastCode,
    +                                                              ClearCode);
    +                                } else {
    +                                        Suffix[Private->RunningCode - 2] =
    +                                            DGifGetPrefixChar(Prefix, CrntCode,
    +                                                              ClearCode);
    +                                }
    +                        }
    +                        LastCode = CrntCode;
    +                }
    +        }
    +
    +        Private->LastCode = LastCode;
    +        Private->StackPtr = StackPtr;
    +
    +        return GIF_OK;
     }
     
     /******************************************************************************
    @@ -1031,15 +1033,15 @@ static int DGifDecompressLine(GifFileType *GifFile, GifPixelType *Line,
     ******************************************************************************/
     static int DGifGetPrefixChar(const GifPrefixType *Prefix, int Code,
                                  int ClearCode) {
    -    int i = 0;
    +        int i = 0;
     
    -    while (Code > ClearCode && i++ <= LZ_MAX_CODE) {
    -        if (Code > LZ_MAX_CODE) {
    -            return NO_SUCH_CODE;
    +        while (Code > ClearCode && i++ <= LZ_MAX_CODE) {
    +                if (Code > LZ_MAX_CODE) {
    +                        return NO_SUCH_CODE;
    +                }
    +                Code = Prefix[Code];
             }
    -        Code = Prefix[Code];
    -    }
    -    return Code;
    +        return Code;
     }
     
     /******************************************************************************
    @@ -1047,37 +1049,37 @@ static int DGifGetPrefixChar(const GifPrefixType *Prefix, int Code,
      (12bits), or to -1 if EOF code is returned.
     ******************************************************************************/
     int DGifGetLZCodes(GifFileType *GifFile, int *Code) {
    -    GifByteType *CodeBlock;
    -    GifFilePrivateType *Private = (GifFilePrivateType *)GifFile->Private;
    +        GifByteType *CodeBlock;
    +        GifFilePrivateType *Private = (GifFilePrivateType *)GifFile->Private;
     
    -    if (!IS_READABLE(Private)) {
    -        /* This file was NOT open for reading: */
    -        GifFile->Error = D_GIF_ERR_NOT_READABLE;
    -        return GIF_ERROR;
    -    }
    -
    -    if (DGifDecompressInput(GifFile, Code) == GIF_ERROR) {
    -        return GIF_ERROR;
    -    }
    -
    -    if (*Code == Private->EOFCode) {
    -        /* Skip rest of codes (hopefully only NULL terminating block):
    -         */
    -        do {
    -            if (DGifGetCodeNext(GifFile, &CodeBlock) == GIF_ERROR) {
    +        if (!IS_READABLE(Private)) {
    +                /* This file was NOT open for reading: */
    +                GifFile->Error = D_GIF_ERR_NOT_READABLE;
                     return GIF_ERROR;
    -            }
    -        } while (CodeBlock != NULL);
    +        }
     
    -        *Code = -1;
    -    } else if (*Code == Private->ClearCode) {
    -        /* We need to start over again: */
    -        Private->RunningCode = Private->EOFCode + 1;
    -        Private->RunningBits = Private->BitsPerPixel + 1;
    -        Private->MaxCode1 = 1 << Private->RunningBits;
    -    }
    +        if (DGifDecompressInput(GifFile, Code) == GIF_ERROR) {
    +                return GIF_ERROR;
    +        }
     
    -    return GIF_OK;
    +        if (*Code == Private->EOFCode) {
    +                /* Skip rest of codes (hopefully only NULL terminating block):
    +                 */
    +                do {
    +                        if (DGifGetCodeNext(GifFile, &CodeBlock) == GIF_ERROR) {
    +                                return GIF_ERROR;
    +                        }
    +                } while (CodeBlock != NULL);
    +
    +                *Code = -1;
    +        } else if (*Code == Private->ClearCode) {
    +                /* We need to start over again: */
    +                Private->RunningCode = Private->EOFCode + 1;
    +                Private->RunningBits = Private->BitsPerPixel + 1;
    +                Private->MaxCode1 = 1 << Private->RunningBits;
    +        }
    +
    +        return GIF_OK;
     }
     
     /******************************************************************************
    @@ -1087,47 +1089,47 @@ int DGifGetLZCodes(GifFileType *GifFile, int *Code) {
      Returns GIF_OK if read successfully.
     ******************************************************************************/
     static int DGifDecompressInput(GifFileType *GifFile, int *Code) {
    -    static const unsigned short CodeMasks[] = {
    -        0x0000, 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f,
    -        0x007f, 0x00ff, 0x01ff, 0x03ff, 0x07ff, 0x0fff};
    +        static const unsigned short CodeMasks[] = {
    +            0x0000, 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f,
    +            0x007f, 0x00ff, 0x01ff, 0x03ff, 0x07ff, 0x0fff};
     
    -    GifFilePrivateType *Private = (GifFilePrivateType *)GifFile->Private;
    +        GifFilePrivateType *Private = (GifFilePrivateType *)GifFile->Private;
     
    -    GifByteType NextByte;
    +        GifByteType NextByte;
     
    -    /* The image can't contain more than LZ_BITS per code. */
    -    if (Private->RunningBits > LZ_BITS) {
    -        GifFile->Error = D_GIF_ERR_IMAGE_DEFECT;
    -        return GIF_ERROR;
    -    }
    -
    -    while (Private->CrntShiftState < Private->RunningBits) {
    -        /* Needs to get more bytes from input stream for next code: */
    -        if (DGifBufferedInput(GifFile, Private->Buf, &NextByte) ==
    -            GIF_ERROR) {
    -            return GIF_ERROR;
    +        /* The image can't contain more than LZ_BITS per code. */
    +        if (Private->RunningBits > LZ_BITS) {
    +                GifFile->Error = D_GIF_ERR_IMAGE_DEFECT;
    +                return GIF_ERROR;
             }
    -        Private->CrntShiftDWord |= ((unsigned long)NextByte)
    -                                   << Private->CrntShiftState;
    -        Private->CrntShiftState += 8;
    -    }
    -    *Code = Private->CrntShiftDWord & CodeMasks[Private->RunningBits];
     
    -    Private->CrntShiftDWord >>= Private->RunningBits;
    -    Private->CrntShiftState -= Private->RunningBits;
    +        while (Private->CrntShiftState < Private->RunningBits) {
    +                /* Needs to get more bytes from input stream for next code: */
    +                if (DGifBufferedInput(GifFile, Private->Buf, &NextByte) ==
    +                    GIF_ERROR) {
    +                        return GIF_ERROR;
    +                }
    +                Private->CrntShiftDWord |= ((unsigned long)NextByte)
    +                                           << Private->CrntShiftState;
    +                Private->CrntShiftState += 8;
    +        }
    +        *Code = Private->CrntShiftDWord & CodeMasks[Private->RunningBits];
     
    -    /* If code cannot fit into RunningBits bits, must raise its size. Note
    -     * however that codes above 4095 are used for special signaling.
    -     * If we're using LZ_BITS bits already and we're at the max code, just
    -     * keep using the table as it is, don't increment Private->RunningCode.
    -     */
    -    if (Private->RunningCode < LZ_MAX_CODE + 2 &&
    -        ++Private->RunningCode > Private->MaxCode1 &&
    -        Private->RunningBits < LZ_BITS) {
    -        Private->MaxCode1 <<= 1;
    -        Private->RunningBits++;
    -    }
    -    return GIF_OK;
    +        Private->CrntShiftDWord >>= Private->RunningBits;
    +        Private->CrntShiftState -= Private->RunningBits;
    +
    +        /* If code cannot fit into RunningBits bits, must raise its size. Note
    +         * however that codes above 4095 are used for special signaling.
    +         * If we're using LZ_BITS bits already and we're at the max code, just
    +         * keep using the table as it is, don't increment Private->RunningCode.
    +         */
    +        if (Private->RunningCode < LZ_MAX_CODE + 2 &&
    +            ++Private->RunningCode > Private->MaxCode1 &&
    +            Private->RunningBits < LZ_BITS) {
    +                Private->MaxCode1 <<= 1;
    +                Private->RunningBits++;
    +        }
    +        return GIF_OK;
     }
     
     /******************************************************************************
    @@ -1138,34 +1140,34 @@ static int DGifDecompressInput(GifFileType *GifFile, int *Code) {
     ******************************************************************************/
     static int DGifBufferedInput(GifFileType *GifFile, GifByteType *Buf,
                                  GifByteType *NextByte) {
    -    if (Buf[0] == 0) {
    -        /* Needs to read the next buffer - this one is empty: */
    -        /* coverity[check_return] */
    -        if (InternalRead(GifFile, Buf, 1) != 1) {
    -            GifFile->Error = D_GIF_ERR_READ_FAILED;
    -            return GIF_ERROR;
    -        }
    -        /* There shouldn't be any empty data blocks here as the LZW spec
    -         * says the LZW termination code should come first.  Therefore
    -         * we shouldn't be inside this routine at that point.
    -         */
             if (Buf[0] == 0) {
    -            GifFile->Error = D_GIF_ERR_IMAGE_DEFECT;
    -            return GIF_ERROR;
    +                /* Needs to read the next buffer - this one is empty: */
    +                /* coverity[check_return] */
    +                if (InternalRead(GifFile, Buf, 1) != 1) {
    +                        GifFile->Error = D_GIF_ERR_READ_FAILED;
    +                        return GIF_ERROR;
    +                }
    +                /* There shouldn't be any empty data blocks here as the LZW spec
    +                 * says the LZW termination code should come first.  Therefore
    +                 * we shouldn't be inside this routine at that point.
    +                 */
    +                if (Buf[0] == 0) {
    +                        GifFile->Error = D_GIF_ERR_IMAGE_DEFECT;
    +                        return GIF_ERROR;
    +                }
    +                if (InternalRead(GifFile, &Buf[1], Buf[0]) != Buf[0]) {
    +                        GifFile->Error = D_GIF_ERR_READ_FAILED;
    +                        return GIF_ERROR;
    +                }
    +                *NextByte = Buf[1];
    +                Buf[1] = 2; /* We use now the second place as last char read! */
    +                Buf[0]--;
    +        } else {
    +                *NextByte = Buf[Buf[1]++];
    +                Buf[0]--;
             }
    -        if (InternalRead(GifFile, &Buf[1], Buf[0]) != Buf[0]) {
    -            GifFile->Error = D_GIF_ERR_READ_FAILED;
    -            return GIF_ERROR;
    -        }
    -        *NextByte = Buf[1];
    -        Buf[1] = 2; /* We use now the second place as last char read! */
    -        Buf[0]--;
    -    } else {
    -        *NextByte = Buf[Buf[1]++];
    -        Buf[0]--;
    -    }
     
    -    return GIF_OK;
    +        return GIF_OK;
     }
     
     /******************************************************************************
    @@ -1175,17 +1177,20 @@ static int DGifBufferedInput(GifFileType *GifFile, GifByteType *Buf,
      SavedImages may point to the spoilt image and null pointer buffers.
     *******************************************************************************/
     void DGifDecreaseImageCounter(GifFileType *GifFile) {
    -    GifFile->ImageCount--;
    -    if (GifFile->SavedImages[GifFile->ImageCount].RasterBits != NULL) {
    -        free(GifFile->SavedImages[GifFile->ImageCount].RasterBits);
    -    }
    +        GifFile->ImageCount--;
    +        if (GifFile->SavedImages[GifFile->ImageCount].RasterBits != NULL) {
    +                free(GifFile->SavedImages[GifFile->ImageCount].RasterBits);
    +        }
    +        if (GifFile->SavedImages[GifFile->ImageCount].ImageDesc.ColorMap != NULL) {
    +                GifFreeMapObject(GifFile->SavedImages[GifFile->ImageCount].ImageDesc.ColorMap);
    +        }
     
    -    // Realloc array according to the new image counter.
    -    SavedImage *correct_saved_images = (SavedImage *)reallocarray(
    -        GifFile->SavedImages, GifFile->ImageCount, sizeof(SavedImage));
    -    if (correct_saved_images != NULL) {
    -        GifFile->SavedImages = correct_saved_images;
    -    }
    +        // Realloc array according to the new image counter.
    +        SavedImage *correct_saved_images = (SavedImage *)reallocarray(
    +            GifFile->SavedImages, GifFile->ImageCount, sizeof(SavedImage));
    +        if (correct_saved_images != NULL) {
    +                GifFile->SavedImages = correct_saved_images;
    +        }
     }
     
     /******************************************************************************
    @@ -1194,143 +1199,143 @@ void DGifDecreaseImageCounter(GifFileType *GifFile) {
      first to initialize I/O.  Its inverse is EGifSpew().
     *******************************************************************************/
     int DGifSlurp(GifFileType *GifFile) {
    -    size_t ImageSize;
    -    GifRecordType RecordType;
    -    SavedImage *sp;
    -    GifByteType *ExtData;
    -    int ExtFunction;
    +        size_t ImageSize;
    +        GifRecordType RecordType;
    +        SavedImage *sp;
    +        GifByteType *ExtData;
    +        int ExtFunction;
     
    -    GifFile->ExtensionBlocks = NULL;
    -    GifFile->ExtensionBlockCount = 0;
    +        GifFile->ExtensionBlocks = NULL;
    +        GifFile->ExtensionBlockCount = 0;
     
    -    do {
    -        if (DGifGetRecordType(GifFile, &RecordType) == GIF_ERROR) {
    -            return (GIF_ERROR);
    -        }
    +        do {
    +                if (DGifGetRecordType(GifFile, &RecordType) == GIF_ERROR) {
    +                        return (GIF_ERROR);
    +                }
     
    -        switch (RecordType) {
    -        case IMAGE_DESC_RECORD_TYPE:
    -            if (DGifGetImageDesc(GifFile) == GIF_ERROR) {
    -                return (GIF_ERROR);
    -            }
    -
    -            sp = &GifFile->SavedImages[GifFile->ImageCount - 1];
    -            /* Allocate memory for the image */
    -            if (sp->ImageDesc.Width <= 0 ||
    -                sp->ImageDesc.Height <= 0 ||
    -                sp->ImageDesc.Width >
    -                    (INT_MAX / sp->ImageDesc.Height)) {
    -                DGifDecreaseImageCounter(GifFile);
    -                return GIF_ERROR;
    -            }
    -            ImageSize = sp->ImageDesc.Width * sp->ImageDesc.Height;
    -
    -            if (ImageSize > (SIZE_MAX / sizeof(GifPixelType))) {
    -                DGifDecreaseImageCounter(GifFile);
    -                return GIF_ERROR;
    -            }
    -            sp->RasterBits = (unsigned char *)reallocarray(
    -                NULL, ImageSize, sizeof(GifPixelType));
    -
    -            if (sp->RasterBits == NULL) {
    -                DGifDecreaseImageCounter(GifFile);
    -                return GIF_ERROR;
    -            }
    -
    -            if (sp->ImageDesc.Interlace) {
    -                int i, j;
    -                /*
    -                 * The way an interlaced image should be read -
    -                 * offsets and jumps...
    -                 */
    -                static const int InterlacedOffset[] = {0, 4, 2,
    -                                                       1};
    -                static const int InterlacedJumps[] = {8, 8, 4,
    -                                                      2};
    -                /* Need to perform 4 passes on the image */
    -                for (i = 0; i < 4; i++) {
    -                    for (j = InterlacedOffset[i];
    -                         j < sp->ImageDesc.Height;
    -                         j += InterlacedJumps[i]) {
    -                        if (DGifGetLine(
    -                                GifFile,
    -                                sp->RasterBits +
    -                                    j * sp->ImageDesc
    -                                            .Width,
    -                                sp->ImageDesc.Width) ==
    -                            GIF_ERROR) {
    -                            DGifDecreaseImageCounter(
    -                                GifFile);
    -                            return GIF_ERROR;
    +                switch (RecordType) {
    +                case IMAGE_DESC_RECORD_TYPE:
    +                        if (DGifGetImageDesc(GifFile) == GIF_ERROR) {
    +                                return (GIF_ERROR);
                             }
    -                    }
    +
    +                        sp = &GifFile->SavedImages[GifFile->ImageCount - 1];
    +                        /* Allocate memory for the image */
    +                        if (sp->ImageDesc.Width <= 0 ||
    +                            sp->ImageDesc.Height <= 0 ||
    +                            sp->ImageDesc.Width >
    +                                (INT_MAX / sp->ImageDesc.Height)) {
    +                                DGifDecreaseImageCounter(GifFile);
    +                                return GIF_ERROR;
    +                        }
    +                        ImageSize = sp->ImageDesc.Width * sp->ImageDesc.Height;
    +
    +                        if (ImageSize > (SIZE_MAX / sizeof(GifPixelType))) {
    +                                DGifDecreaseImageCounter(GifFile);
    +                                return GIF_ERROR;
    +                        }
    +                        sp->RasterBits = (unsigned char *)reallocarray(
    +                            NULL, ImageSize, sizeof(GifPixelType));
    +
    +                        if (sp->RasterBits == NULL) {
    +                                DGifDecreaseImageCounter(GifFile);
    +                                return GIF_ERROR;
    +                        }
    +
    +                        if (sp->ImageDesc.Interlace) {
    +                                int i, j;
    +                                /*
    +                                 * The way an interlaced image should be read -
    +                                 * offsets and jumps...
    +                                 */
    +                                static const int InterlacedOffset[] = {0, 4, 2,
    +                                                                       1};
    +                                static const int InterlacedJumps[] = {8, 8, 4,
    +                                                                      2};
    +                                /* Need to perform 4 passes on the image */
    +                                for (i = 0; i < 4; i++) {
    +                                        for (j = InterlacedOffset[i];
    +                                             j < sp->ImageDesc.Height;
    +                                             j += InterlacedJumps[i]) {
    +                                                if (DGifGetLine(
    +                                                        GifFile,
    +                                                        sp->RasterBits +
    +                                                            j * sp->ImageDesc
    +                                                                    .Width,
    +                                                        sp->ImageDesc.Width) ==
    +                                                    GIF_ERROR) {
    +                                                        DGifDecreaseImageCounter(
    +                                                            GifFile);
    +                                                        return GIF_ERROR;
    +                                                }
    +                                        }
    +                                }
    +                        } else {
    +                                if (DGifGetLine(GifFile, sp->RasterBits,
    +                                                ImageSize) == GIF_ERROR) {
    +                                        DGifDecreaseImageCounter(GifFile);
    +                                        return GIF_ERROR;
    +                                }
    +                        }
    +
    +                        if (GifFile->ExtensionBlocks) {
    +                                sp->ExtensionBlocks = GifFile->ExtensionBlocks;
    +                                sp->ExtensionBlockCount =
    +                                    GifFile->ExtensionBlockCount;
    +
    +                                GifFile->ExtensionBlocks = NULL;
    +                                GifFile->ExtensionBlockCount = 0;
    +                        }
    +                        break;
    +
    +                case EXTENSION_RECORD_TYPE:
    +                        if (DGifGetExtension(GifFile, &ExtFunction, &ExtData) ==
    +                            GIF_ERROR) {
    +                                return (GIF_ERROR);
    +                        }
    +                        /* Create an extension block with our data */
    +                        if (ExtData != NULL) {
    +                                if (GifAddExtensionBlock(
    +                                        &GifFile->ExtensionBlockCount,
    +                                        &GifFile->ExtensionBlocks, ExtFunction,
    +                                        ExtData[0], &ExtData[1]) == GIF_ERROR) {
    +                                        return (GIF_ERROR);
    +                                }
    +                        }
    +                        for (;;) {
    +                                if (DGifGetExtensionNext(GifFile, &ExtData) ==
    +                                    GIF_ERROR) {
    +                                        return (GIF_ERROR);
    +                                }
    +                                if (ExtData == NULL) {
    +                                        break;
    +                                }
    +                                /* Continue the extension block */
    +                                if (GifAddExtensionBlock(
    +                                        &GifFile->ExtensionBlockCount,
    +                                        &GifFile->ExtensionBlocks,
    +                                        CONTINUE_EXT_FUNC_CODE, ExtData[0],
    +                                        &ExtData[1]) == GIF_ERROR) {
    +                                        return (GIF_ERROR);
    +                                }
    +                        }
    +                        break;
    +
    +                case TERMINATE_RECORD_TYPE:
    +                        break;
    +
    +                default: /* Should be trapped by DGifGetRecordType */
    +                        break;
                     }
    -            } else {
    -                if (DGifGetLine(GifFile, sp->RasterBits,
    -                                ImageSize) == GIF_ERROR) {
    -                    DGifDecreaseImageCounter(GifFile);
    -                    return GIF_ERROR;
    -                }
    -            }
    +        } while (RecordType != TERMINATE_RECORD_TYPE);
     
    -            if (GifFile->ExtensionBlocks) {
    -                sp->ExtensionBlocks = GifFile->ExtensionBlocks;
    -                sp->ExtensionBlockCount =
    -                    GifFile->ExtensionBlockCount;
    -
    -                GifFile->ExtensionBlocks = NULL;
    -                GifFile->ExtensionBlockCount = 0;
    -            }
    -            break;
    -
    -        case EXTENSION_RECORD_TYPE:
    -            if (DGifGetExtension(GifFile, &ExtFunction, &ExtData) ==
    -                GIF_ERROR) {
    +        /* Sanity check for corrupted file */
    +        if (GifFile->ImageCount == 0) {
    +                GifFile->Error = D_GIF_ERR_NO_IMAG_DSCR;
                     return (GIF_ERROR);
    -            }
    -            /* Create an extension block with our data */
    -            if (ExtData != NULL) {
    -                if (GifAddExtensionBlock(
    -                        &GifFile->ExtensionBlockCount,
    -                        &GifFile->ExtensionBlocks, ExtFunction,
    -                        ExtData[0], &ExtData[1]) == GIF_ERROR) {
    -                    return (GIF_ERROR);
    -                }
    -            }
    -            for (;;) {
    -                if (DGifGetExtensionNext(GifFile, &ExtData) ==
    -                    GIF_ERROR) {
    -                    return (GIF_ERROR);
    -                }
    -                if (ExtData == NULL) {
    -                    break;
    -                }
    -                /* Continue the extension block */
    -                if (GifAddExtensionBlock(
    -                        &GifFile->ExtensionBlockCount,
    -                        &GifFile->ExtensionBlocks,
    -                        CONTINUE_EXT_FUNC_CODE, ExtData[0],
    -                        &ExtData[1]) == GIF_ERROR) {
    -                    return (GIF_ERROR);
    -                }
    -            }
    -            break;
    -
    -        case TERMINATE_RECORD_TYPE:
    -            break;
    -
    -        default: /* Should be trapped by DGifGetRecordType */
    -            break;
             }
    -    } while (RecordType != TERMINATE_RECORD_TYPE);
     
    -    /* Sanity check for corrupted file */
    -    if (GifFile->ImageCount == 0) {
    -        GifFile->Error = D_GIF_ERR_NO_IMAG_DSCR;
    -        return (GIF_ERROR);
    -    }
    -
    -    return (GIF_OK);
    +        return (GIF_OK);
     }
     
     /* end */
    diff --git a/src/java.desktop/share/native/libsplashscreen/giflib/gif_err.c b/src/java.desktop/share/native/libsplashscreen/giflib/gif_err.c
    index 3b6785f7c63..a3cc03b8865 100644
    --- a/src/java.desktop/share/native/libsplashscreen/giflib/gif_err.c
    +++ b/src/java.desktop/share/native/libsplashscreen/giflib/gif_err.c
    @@ -26,9 +26,9 @@
     
     gif_err.c - handle error reporting for the GIF library.
     
    -SPDX-License-Identifier: MIT
    -
     ****************************************************************************/
    +// SPDX-License-Identifier: MIT
    +// SPDX-File-Copyright-Txt: (C) Copyright 1989 Gershon Elber
     
     #include 
     
    @@ -39,83 +39,83 @@ SPDX-License-Identifier: MIT
      Return a string description of  the last GIF error
     *****************************************************************************/
     const char *GifErrorString(int ErrorCode) {
    -    const char *Err;
    +        const char *Err;
     
    -    switch (ErrorCode) {
    -    case E_GIF_ERR_OPEN_FAILED:
    -        Err = "Failed to open given file";
    -        break;
    -    case E_GIF_ERR_WRITE_FAILED:
    -        Err = "Failed to write to given file";
    -        break;
    -    case E_GIF_ERR_HAS_SCRN_DSCR:
    -        Err = "Screen descriptor has already been set";
    -        break;
    -    case E_GIF_ERR_HAS_IMAG_DSCR:
    -        Err = "Image descriptor is still active";
    -        break;
    -    case E_GIF_ERR_NO_COLOR_MAP:
    -        Err = "Neither global nor local color map";
    -        break;
    -    case E_GIF_ERR_DATA_TOO_BIG:
    -        Err = "Number of pixels bigger than width * height";
    -        break;
    -    case E_GIF_ERR_NOT_ENOUGH_MEM:
    -        Err = "Failed to allocate required memory";
    -        break;
    -    case E_GIF_ERR_DISK_IS_FULL:
    -        Err = "Write failed (disk full?)";
    -        break;
    -    case E_GIF_ERR_CLOSE_FAILED:
    -        Err = "Failed to close given file";
    -        break;
    -    case E_GIF_ERR_NOT_WRITEABLE:
    -        Err = "Given file was not opened for write";
    -        break;
    -    case D_GIF_ERR_OPEN_FAILED:
    -        Err = "Failed to open given file";
    -        break;
    -    case D_GIF_ERR_READ_FAILED:
    -        Err = "Failed to read from given file";
    -        break;
    -    case D_GIF_ERR_NOT_GIF_FILE:
    -        Err = "Data is not in GIF format";
    -        break;
    -    case D_GIF_ERR_NO_SCRN_DSCR:
    -        Err = "No screen descriptor detected";
    -        break;
    -    case D_GIF_ERR_NO_IMAG_DSCR:
    -        Err = "No Image Descriptor detected";
    -        break;
    -    case D_GIF_ERR_NO_COLOR_MAP:
    -        Err = "Neither global nor local color map";
    -        break;
    -    case D_GIF_ERR_WRONG_RECORD:
    -        Err = "Wrong record type detected";
    -        break;
    -    case D_GIF_ERR_DATA_TOO_BIG:
    -        Err = "Number of pixels bigger than width * height";
    -        break;
    -    case D_GIF_ERR_NOT_ENOUGH_MEM:
    -        Err = "Failed to allocate required memory";
    -        break;
    -    case D_GIF_ERR_CLOSE_FAILED:
    -        Err = "Failed to close given file";
    -        break;
    -    case D_GIF_ERR_NOT_READABLE:
    -        Err = "Given file was not opened for read";
    -        break;
    -    case D_GIF_ERR_IMAGE_DEFECT:
    -        Err = "Image is defective, decoding aborted";
    -        break;
    -    case D_GIF_ERR_EOF_TOO_SOON:
    -        Err = "Image EOF detected before image complete";
    -        break;
    -    default:
    -        Err = NULL;
    -        break;
    -    }
    -    return Err;
    +        switch (ErrorCode) {
    +        case E_GIF_ERR_OPEN_FAILED:
    +                Err = "Failed to open given file";
    +                break;
    +        case E_GIF_ERR_WRITE_FAILED:
    +                Err = "Failed to write to given file";
    +                break;
    +        case E_GIF_ERR_HAS_SCRN_DSCR:
    +                Err = "Screen descriptor has already been set";
    +                break;
    +        case E_GIF_ERR_HAS_IMAG_DSCR:
    +                Err = "Image descriptor is still active";
    +                break;
    +        case E_GIF_ERR_NO_COLOR_MAP:
    +                Err = "Neither global nor local color map";
    +                break;
    +        case E_GIF_ERR_DATA_TOO_BIG:
    +                Err = "Number of pixels bigger than width * height";
    +                break;
    +        case E_GIF_ERR_NOT_ENOUGH_MEM:
    +                Err = "Failed to allocate required memory";
    +                break;
    +        case E_GIF_ERR_DISK_IS_FULL:
    +                Err = "Write failed (disk full?)";
    +                break;
    +        case E_GIF_ERR_CLOSE_FAILED:
    +                Err = "Failed to close given file";
    +                break;
    +        case E_GIF_ERR_NOT_WRITEABLE:
    +                Err = "Given file was not opened for write";
    +                break;
    +        case D_GIF_ERR_OPEN_FAILED:
    +                Err = "Failed to open given file";
    +                break;
    +        case D_GIF_ERR_READ_FAILED:
    +                Err = "Failed to read from given file";
    +                break;
    +        case D_GIF_ERR_NOT_GIF_FILE:
    +                Err = "Data is not in GIF format";
    +                break;
    +        case D_GIF_ERR_NO_SCRN_DSCR:
    +                Err = "No screen descriptor detected";
    +                break;
    +        case D_GIF_ERR_NO_IMAG_DSCR:
    +                Err = "No Image Descriptor detected";
    +                break;
    +        case D_GIF_ERR_NO_COLOR_MAP:
    +                Err = "Neither global nor local color map";
    +                break;
    +        case D_GIF_ERR_WRONG_RECORD:
    +                Err = "Wrong record type detected";
    +                break;
    +        case D_GIF_ERR_DATA_TOO_BIG:
    +                Err = "Number of pixels bigger than width * height";
    +                break;
    +        case D_GIF_ERR_NOT_ENOUGH_MEM:
    +                Err = "Failed to allocate required memory";
    +                break;
    +        case D_GIF_ERR_CLOSE_FAILED:
    +                Err = "Failed to close given file";
    +                break;
    +        case D_GIF_ERR_NOT_READABLE:
    +                Err = "Given file was not opened for read";
    +                break;
    +        case D_GIF_ERR_IMAGE_DEFECT:
    +                Err = "Image is defective, decoding aborted";
    +                break;
    +        case D_GIF_ERR_EOF_TOO_SOON:
    +                Err = "Image EOF detected before image complete";
    +                break;
    +        default:
    +                Err = NULL;
    +                break;
    +        }
    +        return Err;
     }
     
     /* end */
    diff --git a/src/java.desktop/share/native/libsplashscreen/giflib/gif_hash.h b/src/java.desktop/share/native/libsplashscreen/giflib/gif_hash.h
    index bd00af64161..eb3cba3135f 100644
    --- a/src/java.desktop/share/native/libsplashscreen/giflib/gif_hash.h
    +++ b/src/java.desktop/share/native/libsplashscreen/giflib/gif_hash.h
    @@ -26,9 +26,8 @@
     
     gif_hash.h - magfic constants and declarations for GIF LZW
     
    -SPDX-License-Identifier: MIT
    -
     ******************************************************************************/
    +// SPDX-License-Identifier: MIT
     
     #ifndef _GIF_HASH_H_
     #define _GIF_HASH_H_
    @@ -46,7 +45,7 @@ SPDX-License-Identifier: MIT
     
     /* The 32 bits of the long are divided into two parts for the key & code:   */
     /* 1. The code is 12 bits as our compression algorithm is limited to 12bits */
    -/* 2. The key is 12 bits Prefix code + 8 bit new char or 20 bits.        */
    +/* 2. The key is 12 bits Prefix code + 8 bit new char or 20 bits.           */
     /* The key is the upper 20 bits.  The code is the lower 12. */
     #define HT_GET_KEY(l) (l >> 12)
     #define HT_GET_CODE(l) (l & 0x0FFF)
    @@ -54,7 +53,7 @@ SPDX-License-Identifier: MIT
     #define HT_PUT_CODE(l) (l & 0x0FFF)
     
     typedef struct GifHashTableType {
    -    uint32_t HTable[HT_SIZE];
    +        uint32_t HTable[HT_SIZE];
     } GifHashTableType;
     
     GifHashTableType *_InitHashTable(void);
    diff --git a/src/java.desktop/share/native/libsplashscreen/giflib/gif_lib.h b/src/java.desktop/share/native/libsplashscreen/giflib/gif_lib.h
    index 74a2e969c0d..64b33beefa7 100644
    --- a/src/java.desktop/share/native/libsplashscreen/giflib/gif_lib.h
    +++ b/src/java.desktop/share/native/libsplashscreen/giflib/gif_lib.h
    @@ -37,8 +37,8 @@ SPDX-License-Identifier: MIT
     extern "C" {
     #endif /* __cplusplus */
     
    -#define GIFLIB_MAJOR 5
    -#define GIFLIB_MINOR 2
    +#define GIFLIB_MAJOR 6
    +#define GIFLIB_MINOR 1
     #define GIFLIB_RELEASE 2
     
     #define GIF_ERROR 0
    @@ -60,26 +60,26 @@ typedef unsigned int GifPrefixType;
     typedef int GifWord;
     
     typedef struct GifColorType {
    -    GifByteType Red, Green, Blue;
    +        GifByteType Red, Green, Blue;
     } GifColorType;
     
     typedef struct ColorMapObject {
    -    int ColorCount;
    -    int BitsPerPixel;
    -    bool SortFlag;
    -    GifColorType *Colors; /* on malloc(3) heap */
    +        int ColorCount;
    +        int BitsPerPixel;
    +        bool SortFlag;
    +        GifColorType *Colors; /* on malloc(3) heap */
     } ColorMapObject;
     
     typedef struct GifImageDesc {
    -    GifWord Left, Top, Width, Height; /* Current image dimensions. */
    -    bool Interlace;                   /* Sequential/Interlaced lines. */
    -    ColorMapObject *ColorMap;         /* The local color map */
    +        GifWord Left, Top, Width, Height; /* Current image dimensions. */
    +        bool Interlace;                   /* Sequential/Interlaced lines. */
    +        ColorMapObject *ColorMap;         /* The local color map */
     } GifImageDesc;
     
     typedef struct ExtensionBlock {
    -    int ByteCount;
    -    GifByteType *Bytes;            /* on malloc(3) heap */
    -    int Function;                  /* The block function code */
    +        int ByteCount;
    +        GifByteType *Bytes;            /* on malloc(3) heap */
    +        int Function;                  /* The block function code */
     #define CONTINUE_EXT_FUNC_CODE 0x00    /* continuation subblock */
     #define COMMENT_EXT_FUNC_CODE 0xfe     /* comment */
     #define GRAPHICS_EXT_FUNC_CODE 0xf9    /* graphics control (GIF89) */
    @@ -88,36 +88,36 @@ typedef struct ExtensionBlock {
     } ExtensionBlock;
     
     typedef struct SavedImage {
    -    GifImageDesc ImageDesc;
    -    GifByteType *RasterBits;         /* on malloc(3) heap */
    -    int ExtensionBlockCount;         /* Count of extensions before image */
    -    ExtensionBlock *ExtensionBlocks; /* Extensions before image */
    +        GifImageDesc ImageDesc;
    +        GifByteType *RasterBits;         /* on malloc(3) heap */
    +        int ExtensionBlockCount;         /* Count of extensions before image */
    +        ExtensionBlock *ExtensionBlocks; /* Extensions before image */
     } SavedImage;
     
     typedef struct GifFileType {
    -    GifWord SWidth, SHeight;   /* Size of virtual canvas */
    -    GifWord SColorResolution;  /* How many colors can we generate? */
    -    GifWord SBackGroundColor;  /* Background color for virtual canvas */
    -    GifByteType AspectByte;    /* Used to compute pixel aspect ratio */
    -    ColorMapObject *SColorMap; /* Global colormap, NULL if nonexistent. */
    -    int ImageCount;            /* Number of current image (both APIs) */
    -    GifImageDesc Image;        /* Current image (low-level API) */
    -    SavedImage *SavedImages;   /* Image sequence (high-level API) */
    -    int ExtensionBlockCount;   /* Count extensions past last image */
    -    ExtensionBlock *ExtensionBlocks; /* Extensions past last image */
    -    int Error;                       /* Last error condition reported */
    -    void *UserData;                  /* hook to attach user data (TVT) */
    -    void *Private;                   /* Don't mess with this! */
    +        GifWord SWidth, SHeight;   /* Size of virtual canvas */
    +        GifWord SColorResolution;  /* How many colors can we generate? */
    +        GifWord SBackGroundColor;  /* Background color for virtual canvas */
    +        GifByteType AspectByte;    /* Used to compute pixel aspect ratio */
    +        ColorMapObject *SColorMap; /* Global colormap, NULL if nonexistent. */
    +        int ImageCount;            /* Number of current image (both APIs) */
    +        GifImageDesc Image;        /* Current image (low-level API) */
    +        SavedImage *SavedImages;   /* Image sequence (high-level API) */
    +        int ExtensionBlockCount;   /* Count extensions past last image */
    +        ExtensionBlock *ExtensionBlocks; /* Extensions past last image */
    +        int Error;                       /* Last error condition reported */
    +        void *UserData;                  /* hook to attach user data (TVT) */
    +        void *Private;                   /* Don't mess with this! */
     } GifFileType;
     
     #define GIF_ASPECT_RATIO(n) ((n) + 15.0 / 64.0)
     
     typedef enum {
    -    UNDEFINED_RECORD_TYPE,
    -    SCREEN_DESC_RECORD_TYPE,
    -    IMAGE_DESC_RECORD_TYPE, /* Begin with ',' */
    -    EXTENSION_RECORD_TYPE,  /* Begin with '!' */
    -    TERMINATE_RECORD_TYPE   /* Begin with ';' */
    +        UNDEFINED_RECORD_TYPE,
    +        SCREEN_DESC_RECORD_TYPE,
    +        IMAGE_DESC_RECORD_TYPE, /* Begin with ',' */
    +        EXTENSION_RECORD_TYPE,  /* Begin with '!' */
    +        TERMINATE_RECORD_TYPE   /* Begin with ';' */
     } GifRecordType;
     
     /* func type to read gif data from arbitrary sources (TVT) */
    @@ -133,14 +133,14 @@ typedef int (*OutputFunc)(GifFileType *, const GifByteType *, int);
     ******************************************************************************/
     
     typedef struct GraphicsControlBlock {
    -    int DisposalMode;
    +        int DisposalMode;
     #define DISPOSAL_UNSPECIFIED 0 /* No disposal specified. */
     #define DISPOSE_DO_NOT 1       /* Leave image in place */
     #define DISPOSE_BACKGROUND 2   /* Set area too background color */
     #define DISPOSE_PREVIOUS 3     /* Restore to previous content */
    -    bool UserInputFlag;    /* User confirmation required before disposal */
    -    int DelayTime;         /* pre-display delay in 0.01sec units */
    -    int TransparentColor;  /* Palette index for transparency, -1 if none */
    +        bool UserInputFlag;    /* User confirmation required before disposal */
    +        int DelayTime;         /* pre-display delay in 0.01sec units */
    +        int TransparentColor;  /* Palette index for transparency, -1 if none */
     #define NO_TRANSPARENT_COLOR -1
     } GraphicsControlBlock;
     
    @@ -153,21 +153,21 @@ GifFileType *EGifOpenFileName(const char *GifFileName,
                                   const bool GifTestExistence, int *Error);
     GifFileType *EGifOpenFileHandle(const int GifFileHandle, int *Error);
     GifFileType *EGifOpen(void *userPtr, OutputFunc writeFunc, int *Error);
    -int EGifSpew(GifFileType *GifFile);
    +int EGifSpew(GifFileType *GifFile, int *ErrorCode);
     const char *EGifGetGifVersion(GifFileType *GifFile); /* new in 5.x */
     int EGifCloseFile(GifFileType *GifFile, int *ErrorCode);
     
     #define E_GIF_SUCCEEDED 0
    -#define E_GIF_ERR_OPEN_FAILED 1 /* And EGif possible errors. */
    -#define E_GIF_ERR_WRITE_FAILED 2
    -#define E_GIF_ERR_HAS_SCRN_DSCR 3
    -#define E_GIF_ERR_HAS_IMAG_DSCR 4
    -#define E_GIF_ERR_NO_COLOR_MAP 5
    -#define E_GIF_ERR_DATA_TOO_BIG 6
    -#define E_GIF_ERR_NOT_ENOUGH_MEM 7
    -#define E_GIF_ERR_DISK_IS_FULL 8
    -#define E_GIF_ERR_CLOSE_FAILED 9
    -#define E_GIF_ERR_NOT_WRITEABLE 10
    +#define E_GIF_ERR_OPEN_FAILED 201 /* And EGif possible errors. */
    +#define E_GIF_ERR_WRITE_FAILED 202
    +#define E_GIF_ERR_HAS_SCRN_DSCR 203
    +#define E_GIF_ERR_HAS_IMAG_DSCR 204
    +#define E_GIF_ERR_NO_COLOR_MAP 205
    +#define E_GIF_ERR_DATA_TOO_BIG 206
    +#define E_GIF_ERR_NOT_ENOUGH_MEM 207
    +#define E_GIF_ERR_DISK_IS_FULL 208
    +#define E_GIF_ERR_CLOSE_FAILED 209
    +#define E_GIF_ERR_NOT_WRITEABLE 210
     
     /* These are legacy.  You probably do not want to call them directly */
     int EGifPutScreenDesc(GifFileType *GifFile, const int GifWidth,
    diff --git a/src/java.desktop/share/native/libsplashscreen/giflib/gif_lib_private.h b/src/java.desktop/share/native/libsplashscreen/giflib/gif_lib_private.h
    index f905e0d7b48..079d05898b4 100644
    --- a/src/java.desktop/share/native/libsplashscreen/giflib/gif_lib_private.h
    +++ b/src/java.desktop/share/native/libsplashscreen/giflib/gif_lib_private.h
    @@ -60,30 +60,30 @@ SPDX-License-Identifier: MIT
     #define IS_WRITEABLE(Private) (Private->FileState & FILE_STATE_WRITE)
     
     typedef struct GifFilePrivateType {
    -    GifWord FileState, FileHandle, /* Where all this data goes to! */
    -        BitsPerPixel, /* Bits per pixel (Codes uses at least this + 1). */
    -        ClearCode,    /* The CLEAR LZ code. */
    -        EOFCode,      /* The EOF LZ code. */
    -        RunningCode,  /* The next code algorithm can generate. */
    -        RunningBits,  /* The number of bits required to represent
    -                         RunningCode. */
    -        MaxCode1, /* 1 bigger than max. possible code, in RunningBits bits.
    -                   */
    -        LastCode, /* The code before the current code. */
    -        CrntCode, /* Current algorithm code. */
    -        StackPtr, /* For character stack (see below). */
    -        CrntShiftState;           /* Number of bits in CrntShiftDWord. */
    -    unsigned long CrntShiftDWord; /* For bytes decomposition into codes. */
    -    unsigned long PixelCount;     /* Number of pixels in image. */
    -    FILE *File;                   /* File as stream. */
    -    InputFunc Read;               /* function to read gif input (TVT) */
    -    OutputFunc Write;             /* function to write gif output (MRB) */
    -    GifByteType Buf[256];         /* Compressed input is buffered here. */
    -    GifByteType Stack[LZ_MAX_CODE]; /* Decoded pixels are stacked here. */
    -    GifByteType Suffix[LZ_MAX_CODE + 1]; /* So we can trace the codes. */
    -    GifPrefixType Prefix[LZ_MAX_CODE + 1];
    -    GifHashTableType *HashTable;
    -    bool gif89;
    +        GifWord FileState, FileHandle, /* Where all this data goes to! */
    +            BitsPerPixel, /* Bits per pixel (Codes uses at least this + 1). */
    +            ClearCode,    /* The CLEAR LZ code. */
    +            EOFCode,      /* The EOF LZ code. */
    +            RunningCode,  /* The next code algorithm can generate. */
    +            RunningBits,  /* The number of bits required to represent
    +                             RunningCode. */
    +            MaxCode1, /* 1 bigger than max. possible code, in RunningBits bits.
    +                       */
    +            LastCode, /* The code before the current code. */
    +            CrntCode, /* Current algorithm code. */
    +            StackPtr, /* For character stack (see below). */
    +            CrntShiftState;           /* Number of bits in CrntShiftDWord. */
    +        unsigned long CrntShiftDWord; /* For bytes decomposition into codes. */
    +        unsigned long PixelCount;     /* Number of pixels in image. */
    +        FILE *File;                   /* File as stream. */
    +        InputFunc Read;               /* function to read gif input (TVT) */
    +        OutputFunc Write;             /* function to write gif output (MRB) */
    +        GifByteType Buf[256];         /* Compressed input is buffered here. */
    +        GifByteType Stack[LZ_MAX_CODE]; /* Decoded pixels are stacked here. */
    +        GifByteType Suffix[LZ_MAX_CODE + 1]; /* So we can trace the codes. */
    +        GifPrefixType Prefix[LZ_MAX_CODE + 1];
    +        GifHashTableType *HashTable;
    +        bool gif89;
     } GifFilePrivateType;
     
     #ifndef HAVE_REALLOCARRAY
    diff --git a/src/java.desktop/share/native/libsplashscreen/giflib/gifalloc.c b/src/java.desktop/share/native/libsplashscreen/giflib/gifalloc.c
    index 5aef3044558..25e03914496 100644
    --- a/src/java.desktop/share/native/libsplashscreen/giflib/gifalloc.c
    +++ b/src/java.desktop/share/native/libsplashscreen/giflib/gifalloc.c
    @@ -26,9 +26,9 @@
     
      GIF construction tools
     
    -SPDX-License-Identifier: MIT
    -
     ****************************************************************************/
    +// SPDX-License-Identifier: MIT
    +// SPDX-FileCopyrightText: Copyright (C) Eric S. Raymond 
     
     #include 
     #include 
    @@ -45,14 +45,14 @@ SPDX-License-Identifier: MIT
     
     /* return smallest bitfield size n will fit in */
     int GifBitSize(int n) {
    -    register int i;
    +        register int i;
     
    -    for (i = 1; i <= 8; i++) {
    -        if ((1 << i) >= n) {
    -            break;
    +        for (i = 1; i <= 8; i++) {
    +                if ((1 << i) >= n) {
    +                        break;
    +                }
             }
    -    }
    -    return (i);
    +        return (i);
     }
     
     /******************************************************************************
    @@ -64,64 +64,64 @@ int GifBitSize(int n) {
      * ColorMap if that pointer is non-NULL.
      */
     ColorMapObject *GifMakeMapObject(int ColorCount, const GifColorType *ColorMap) {
    -    ColorMapObject *Object;
    +        ColorMapObject *Object;
     
    -    /*** FIXME: Our ColorCount has to be a power of two.  Is it necessary to
    -     * make the user know that or should we automatically round up instead?
    -     */
    -    if (ColorCount != (1 << GifBitSize(ColorCount))) {
    -        return ((ColorMapObject *)NULL);
    -    }
    +        /*** FIXME: Our ColorCount has to be a power of two.  Is it necessary to
    +         * make the user know that or should we automatically round up instead?
    +         */
    +        if (ColorCount != (1 << GifBitSize(ColorCount))) {
    +                return ((ColorMapObject *)NULL);
    +        }
     
    -    Object = (ColorMapObject *)malloc(sizeof(ColorMapObject));
    -    if (Object == (ColorMapObject *)NULL) {
    -        return ((ColorMapObject *)NULL);
    -    }
    +        Object = (ColorMapObject *)malloc(sizeof(ColorMapObject));
    +        if (Object == (ColorMapObject *)NULL) {
    +                return ((ColorMapObject *)NULL);
    +        }
     
    -    Object->Colors =
    -        (GifColorType *)calloc(ColorCount, sizeof(GifColorType));
    -    if (Object->Colors == (GifColorType *)NULL) {
    -        free(Object);
    -        return ((ColorMapObject *)NULL);
    -    }
    +        Object->Colors =
    +            (GifColorType *)calloc(ColorCount, sizeof(GifColorType));
    +        if (Object->Colors == (GifColorType *)NULL) {
    +                free(Object);
    +                return ((ColorMapObject *)NULL);
    +        }
     
    -    Object->ColorCount = ColorCount;
    -    Object->BitsPerPixel = GifBitSize(ColorCount);
    -    Object->SortFlag = false;
    +        Object->ColorCount = ColorCount;
    +        Object->BitsPerPixel = GifBitSize(ColorCount);
    +        Object->SortFlag = false;
     
    -    if (ColorMap != NULL) {
    -        memcpy((char *)Object->Colors, (char *)ColorMap,
    -               ColorCount * sizeof(GifColorType));
    -    }
    +        if (ColorMap != NULL) {
    +                memcpy((char *)Object->Colors, (char *)ColorMap,
    +                       ColorCount * sizeof(GifColorType));
    +        }
     
    -    return (Object);
    +        return (Object);
     }
     
     /*******************************************************************************
      Free a color map object
     *******************************************************************************/
     void GifFreeMapObject(ColorMapObject *Object) {
    -    if (Object != NULL) {
    -        (void)free(Object->Colors);
    -        (void)free(Object);
    -    }
    +        if (Object != NULL) {
    +                (void)free(Object->Colors);
    +                (void)free(Object);
    +        }
     }
     
     #ifdef DEBUG
     void DumpColorMap(ColorMapObject *Object, FILE *fp) {
    -    if (Object != NULL) {
    -        int i, j, Len = Object->ColorCount;
    +        if (Object != NULL) {
    +                int i, j, Len = Object->ColorCount;
     
    -        for (i = 0; i < Len; i += 4) {
    -            for (j = 0; j < 4 && j < Len; j++) {
    -                (void)fprintf(fp, "%3d: %02x %02x %02x   ",
    -                              i + j, Object->Colors[i + j].Red,
    -                              Object->Colors[i + j].Green,
    -                              Object->Colors[i + j].Blue);
    -            }
    -            (void)fprintf(fp, "\n");
    +                for (i = 0; i < Len; i += 4) {
    +                        for (j = 0; j < 4 && j < Len; j++) {
    +                                (void)fprintf(fp, "%3d: %02x %02x %02x   ",
    +                                              i + j, Object->Colors[i + j].Red,
    +                                              Object->Colors[i + j].Green,
    +                                              Object->Colors[i + j].Blue);
    +                        }
    +                        (void)fprintf(fp, "\n");
    +                }
             }
    -    }
     }
     #endif /* DEBUG */
     
    @@ -135,112 +135,112 @@ void DumpColorMap(ColorMapObject *Object, FILE *fp) {
     ColorMapObject *GifUnionColorMap(const ColorMapObject *ColorIn1,
                                      const ColorMapObject *ColorIn2,
                                      GifPixelType ColorTransIn2[]) {
    -    int i, j, CrntSlot, RoundUpTo, NewGifBitSize;
    -    ColorMapObject *ColorUnion;
    -
    -    /*
    -     * We don't worry about duplicates within either color map; if
    -     * the caller wants to resolve those, he can perform unions
    -     * with an empty color map.
    -     */
    -
    -    /* Allocate table which will hold the result for sure. */
    -    ColorUnion = GifMakeMapObject(
    -        MAX(ColorIn1->ColorCount, ColorIn2->ColorCount) * 2, NULL);
    -
    -    if (ColorUnion == NULL) {
    -        return (NULL);
    -    }
    -
    -    /*
    -     * Copy ColorIn1 to ColorUnion.
    -     */
    -    for (i = 0; i < ColorIn1->ColorCount; i++) {
    -        ColorUnion->Colors[i] = ColorIn1->Colors[i];
    -    }
    -    CrntSlot = ColorIn1->ColorCount;
    -
    -    /*
    -     * Potentially obnoxious hack:
    -     *
    -     * Back CrntSlot down past all contiguous {0, 0, 0} slots at the end
    -     * of table 1.  This is very useful if your display is limited to
    -     * 16 colors.
    -     */
    -    while (ColorIn1->Colors[CrntSlot - 1].Red == 0 &&
    -           ColorIn1->Colors[CrntSlot - 1].Green == 0 &&
    -           ColorIn1->Colors[CrntSlot - 1].Blue == 0) {
    -        CrntSlot--;
    -    }
    -
    -    /* Copy ColorIn2 to ColorUnion (use old colors if they exist): */
    -    for (i = 0; i < ColorIn2->ColorCount && CrntSlot <= 256; i++) {
    -        /* Let's see if this color already exists: */
    -        for (j = 0; j < ColorIn1->ColorCount; j++) {
    -            if (memcmp(&ColorIn1->Colors[j], &ColorIn2->Colors[i],
    -                       sizeof(GifColorType)) == 0) {
    -                break;
    -            }
    -        }
    -
    -        if (j < ColorIn1->ColorCount) {
    -            ColorTransIn2[i] = j; /* color exists in Color1 */
    -        } else {
    -            /* Color is new - copy it to a new slot: */
    -            ColorUnion->Colors[CrntSlot] = ColorIn2->Colors[i];
    -            ColorTransIn2[i] = CrntSlot++;
    -        }
    -    }
    -
    -    if (CrntSlot > 256) {
    -        GifFreeMapObject(ColorUnion);
    -        return ((ColorMapObject *)NULL);
    -    }
    -
    -    NewGifBitSize = GifBitSize(CrntSlot);
    -    RoundUpTo = (1 << NewGifBitSize);
    -
    -    if (RoundUpTo != ColorUnion->ColorCount) {
    -        register GifColorType *Map = ColorUnion->Colors;
    +        int i, j, CrntSlot, RoundUpTo, NewGifBitSize;
    +        ColorMapObject *ColorUnion;
     
             /*
    -         * Zero out slots up to next power of 2.
    -         * We know these slots exist because of the way ColorUnion's
    -         * start dimension was computed.
    +         * We don't worry about duplicates within either color map; if
    +         * the caller wants to resolve those, he can perform unions
    +         * with an empty color map.
              */
    -        for (j = CrntSlot; j < RoundUpTo; j++) {
    -            Map[j].Red = Map[j].Green = Map[j].Blue = 0;
    +
    +        /* Allocate table which will hold the result for sure. */
    +        ColorUnion = GifMakeMapObject(
    +            MAX(ColorIn1->ColorCount, ColorIn2->ColorCount) * 2, NULL);
    +
    +        if (ColorUnion == NULL) {
    +                return (NULL);
             }
     
    -        /* perhaps we can shrink the map? */
    -        if (RoundUpTo < ColorUnion->ColorCount) {
    -            GifColorType *new_map = (GifColorType *)reallocarray(
    -                Map, RoundUpTo, sizeof(GifColorType));
    -            if (new_map == NULL) {
    +        /*
    +         * Copy ColorIn1 to ColorUnion.
    +         */
    +        for (i = 0; i < ColorIn1->ColorCount; i++) {
    +                ColorUnion->Colors[i] = ColorIn1->Colors[i];
    +        }
    +        CrntSlot = ColorIn1->ColorCount;
    +
    +        /*
    +         * Potentially obnoxious hack:
    +         *
    +         * Back CrntSlot down past all contiguous {0, 0, 0} slots at the end
    +         * of table 1.  This is very useful if your display is limited to
    +         * 16 colors.
    +         */
    +        while (ColorIn1->Colors[CrntSlot - 1].Red == 0 &&
    +               ColorIn1->Colors[CrntSlot - 1].Green == 0 &&
    +               ColorIn1->Colors[CrntSlot - 1].Blue == 0) {
    +                CrntSlot--;
    +        }
    +
    +        /* Copy ColorIn2 to ColorUnion (use old colors if they exist): */
    +        for (i = 0; i < ColorIn2->ColorCount && CrntSlot <= 256; i++) {
    +                /* Let's see if this color already exists: */
    +                for (j = 0; j < ColorIn1->ColorCount; j++) {
    +                        if (memcmp(&ColorIn1->Colors[j], &ColorIn2->Colors[i],
    +                                   sizeof(GifColorType)) == 0) {
    +                                break;
    +                        }
    +                }
    +
    +                if (j < ColorIn1->ColorCount) {
    +                        ColorTransIn2[i] = j; /* color exists in Color1 */
    +                } else {
    +                        /* Color is new - copy it to a new slot: */
    +                        ColorUnion->Colors[CrntSlot] = ColorIn2->Colors[i];
    +                        ColorTransIn2[i] = CrntSlot++;
    +                }
    +        }
    +
    +        if (CrntSlot > 256) {
                     GifFreeMapObject(ColorUnion);
                     return ((ColorMapObject *)NULL);
    -            }
    -            ColorUnion->Colors = new_map;
             }
    -    }
     
    -    ColorUnion->ColorCount = RoundUpTo;
    -    ColorUnion->BitsPerPixel = NewGifBitSize;
    +        NewGifBitSize = GifBitSize(CrntSlot);
    +        RoundUpTo = (1 << NewGifBitSize);
     
    -    return (ColorUnion);
    +        if (RoundUpTo != ColorUnion->ColorCount) {
    +                register GifColorType *Map = ColorUnion->Colors;
    +
    +                /*
    +                 * Zero out slots up to next power of 2.
    +                 * We know these slots exist because of the way ColorUnion's
    +                 * start dimension was computed.
    +                 */
    +                for (j = CrntSlot; j < RoundUpTo; j++) {
    +                        Map[j].Red = Map[j].Green = Map[j].Blue = 0;
    +                }
    +
    +                /* perhaps we can shrink the map? */
    +                if (RoundUpTo < ColorUnion->ColorCount) {
    +                        GifColorType *new_map = (GifColorType *)reallocarray(
    +                            Map, RoundUpTo, sizeof(GifColorType));
    +                        if (new_map == NULL) {
    +                                GifFreeMapObject(ColorUnion);
    +                                return ((ColorMapObject *)NULL);
    +                        }
    +                        ColorUnion->Colors = new_map;
    +                }
    +        }
    +
    +        ColorUnion->ColorCount = RoundUpTo;
    +        ColorUnion->BitsPerPixel = NewGifBitSize;
    +
    +        return (ColorUnion);
     }
     
     /*******************************************************************************
      Apply a given color translation to the raster bits of an image
     *******************************************************************************/
     void GifApplyTranslation(SavedImage *Image, const GifPixelType Translation[]) {
    -    register int i;
    -    register int RasterSize =
    -        Image->ImageDesc.Height * Image->ImageDesc.Width;
    +        register int i;
    +        register int RasterSize =
    +            Image->ImageDesc.Height * Image->ImageDesc.Width;
     
    -    for (i = 0; i < RasterSize; i++) {
    -        Image->RasterBits[i] = Translation[Image->RasterBits[i]];
    -    }
    +        for (i = 0; i < RasterSize; i++) {
    +                Image->RasterBits[i] = Translation[Image->RasterBits[i]];
    +        }
     }
     
     /******************************************************************************
    @@ -249,56 +249,56 @@ void GifApplyTranslation(SavedImage *Image, const GifPixelType Translation[]) {
     int GifAddExtensionBlock(int *ExtensionBlockCount,
                              ExtensionBlock **ExtensionBlocks, int Function,
                              unsigned int Len, unsigned char ExtData[]) {
    -    ExtensionBlock *ep;
    +        ExtensionBlock *ep;
     
    -    if (*ExtensionBlocks == NULL) {
    -        *ExtensionBlocks =
    -            (ExtensionBlock *)malloc(sizeof(ExtensionBlock));
    -    } else {
    -        ExtensionBlock *ep_new = (ExtensionBlock *)reallocarray(
    -            *ExtensionBlocks, (*ExtensionBlockCount + 1),
    -            sizeof(ExtensionBlock));
    -        if (ep_new == NULL) {
    -            return (GIF_ERROR);
    +        if (*ExtensionBlocks == NULL) {
    +                *ExtensionBlocks =
    +                    (ExtensionBlock *)malloc(sizeof(ExtensionBlock));
    +        } else {
    +                ExtensionBlock *ep_new = (ExtensionBlock *)reallocarray(
    +                    *ExtensionBlocks, (*ExtensionBlockCount + 1),
    +                    sizeof(ExtensionBlock));
    +                if (ep_new == NULL) {
    +                        return (GIF_ERROR);
    +                }
    +                *ExtensionBlocks = ep_new;
             }
    -        *ExtensionBlocks = ep_new;
    -    }
     
    -    if (*ExtensionBlocks == NULL) {
    -        return (GIF_ERROR);
    -    }
    +        if (*ExtensionBlocks == NULL) {
    +                return (GIF_ERROR);
    +        }
     
    -    ep = &(*ExtensionBlocks)[(*ExtensionBlockCount)++];
    +        ep = &(*ExtensionBlocks)[(*ExtensionBlockCount)++];
     
    -    ep->Function = Function;
    -    ep->ByteCount = Len;
    -    ep->Bytes = (GifByteType *)malloc(ep->ByteCount);
    -    if (ep->Bytes == NULL) {
    -        return (GIF_ERROR);
    -    }
    +        ep->Function = Function;
    +        ep->ByteCount = Len;
    +        ep->Bytes = (GifByteType *)malloc(ep->ByteCount);
    +        if (ep->Bytes == NULL) {
    +                return (GIF_ERROR);
    +        }
     
    -    if (ExtData != NULL) {
    -        memcpy(ep->Bytes, ExtData, Len);
    -    }
    +        if (ExtData != NULL) {
    +                memcpy(ep->Bytes, ExtData, Len);
    +        }
     
    -    return (GIF_OK);
    +        return (GIF_OK);
     }
     
     void GifFreeExtensions(int *ExtensionBlockCount,
                            ExtensionBlock **ExtensionBlocks) {
    -    ExtensionBlock *ep;
    +        ExtensionBlock *ep;
     
    -    if (*ExtensionBlocks == NULL) {
    -        return;
    -    }
    +        if (*ExtensionBlocks == NULL) {
    +                return;
    +        }
     
    -    for (ep = *ExtensionBlocks;
    -         ep < (*ExtensionBlocks + *ExtensionBlockCount); ep++) {
    -        (void)free((char *)ep->Bytes);
    -    }
    -    (void)free((char *)*ExtensionBlocks);
    -    *ExtensionBlocks = NULL;
    -    *ExtensionBlockCount = 0;
    +        for (ep = *ExtensionBlocks;
    +             ep < (*ExtensionBlocks + *ExtensionBlockCount); ep++) {
    +                (void)free((char *)ep->Bytes);
    +        }
    +        (void)free((char *)*ExtensionBlocks);
    +        *ExtensionBlocks = NULL;
    +        *ExtensionBlockCount = 0;
     }
     
     /******************************************************************************
    @@ -309,37 +309,37 @@ void GifFreeExtensions(int *ExtensionBlockCount,
      * Frees the last image in the GifFile->SavedImages array
      */
     void FreeLastSavedImage(GifFileType *GifFile) {
    -    SavedImage *sp;
    +        SavedImage *sp;
     
    -    if ((GifFile == NULL) || (GifFile->SavedImages == NULL)) {
    -        return;
    -    }
    +        if ((GifFile == NULL) || (GifFile->SavedImages == NULL)) {
    +                return;
    +        }
     
    -    /* Remove one SavedImage from the GifFile */
    -    GifFile->ImageCount--;
    -    sp = &GifFile->SavedImages[GifFile->ImageCount];
    +        /* Remove one SavedImage from the GifFile */
    +        GifFile->ImageCount--;
    +        sp = &GifFile->SavedImages[GifFile->ImageCount];
     
    -    /* Deallocate its Colormap */
    -    if (sp->ImageDesc.ColorMap != NULL) {
    -        GifFreeMapObject(sp->ImageDesc.ColorMap);
    -        sp->ImageDesc.ColorMap = NULL;
    -    }
    +        /* Deallocate its Colormap */
    +        if (sp->ImageDesc.ColorMap != NULL) {
    +                GifFreeMapObject(sp->ImageDesc.ColorMap);
    +                sp->ImageDesc.ColorMap = NULL;
    +        }
     
    -    /* Deallocate the image data */
    -    if (sp->RasterBits != NULL) {
    -        free((char *)sp->RasterBits);
    -    }
    +        /* Deallocate the image data */
    +        if (sp->RasterBits != NULL) {
    +                free((char *)sp->RasterBits);
    +        }
     
    -    /* Deallocate any extensions */
    -    GifFreeExtensions(&sp->ExtensionBlockCount, &sp->ExtensionBlocks);
    +        /* Deallocate any extensions */
    +        GifFreeExtensions(&sp->ExtensionBlockCount, &sp->ExtensionBlocks);
     
    -    /*** FIXME: We could realloc the GifFile->SavedImages structure but is
    -     * there a point to it? Saves some memory but we'd have to do it every
    -     * time.  If this is used in GifFreeSavedImages then it would be
    -     * inefficient (The whole array is going to be deallocated.)  If we just
    -     * use it when we want to free the last Image it's convenient to do it
    -     * here.
    -     */
    +        /*** FIXME: We could realloc the GifFile->SavedImages structure but is
    +         * there a point to it? Saves some memory but we'd have to do it every
    +         * time.  If this is used in GifFreeSavedImages then it would be
    +         * inefficient (The whole array is going to be deallocated.)  If we just
    +         * use it when we want to free the last Image it's convenient to do it
    +         * here.
    +         */
     }
     
     /*
    @@ -347,103 +347,129 @@ void FreeLastSavedImage(GifFileType *GifFile) {
      */
     SavedImage *GifMakeSavedImage(GifFileType *GifFile,
                                   const SavedImage *CopyFrom) {
    -    // cppcheck-suppress ctunullpointer
    -    if (GifFile->SavedImages == NULL) {
    -        GifFile->SavedImages = (SavedImage *)malloc(sizeof(SavedImage));
    -    } else {
    -        SavedImage *newSavedImages = (SavedImage *)reallocarray(
    -            GifFile->SavedImages, (GifFile->ImageCount + 1),
    -            sizeof(SavedImage));
    -        if (newSavedImages == NULL) {
    -            return ((SavedImage *)NULL);
    -        }
    -        GifFile->SavedImages = newSavedImages;
    -    }
    -    if (GifFile->SavedImages == NULL) {
    -        return ((SavedImage *)NULL);
    -    } else {
    -        SavedImage *sp = &GifFile->SavedImages[GifFile->ImageCount++];
    -
    -        if (CopyFrom != NULL) {
    -            memcpy((char *)sp, CopyFrom, sizeof(SavedImage));
    -
    -            /*
    -             * Make our own allocated copies of the heap fields in
    -             * the copied record.  This guards against potential
    -             * aliasing problems.
    -             */
    -
    -            /* first, the local color map */
    -            if (CopyFrom->ImageDesc.ColorMap != NULL) {
    -                sp->ImageDesc.ColorMap = GifMakeMapObject(
    -                    CopyFrom->ImageDesc.ColorMap->ColorCount,
    -                    CopyFrom->ImageDesc.ColorMap->Colors);
    -                if (sp->ImageDesc.ColorMap == NULL) {
    -                    FreeLastSavedImage(GifFile);
    -                    return (SavedImage *)(NULL);
    -                }
    -            }
    -
    -            /* next, the raster */
    -            sp->RasterBits = (unsigned char *)reallocarray(
    -                NULL,
    -                (CopyFrom->ImageDesc.Height *
    -                 CopyFrom->ImageDesc.Width),
    -                sizeof(GifPixelType));
    -            if (sp->RasterBits == NULL) {
    -                FreeLastSavedImage(GifFile);
    -                return (SavedImage *)(NULL);
    -            }
    -            memcpy(sp->RasterBits, CopyFrom->RasterBits,
    -                   sizeof(GifPixelType) *
    -                       CopyFrom->ImageDesc.Height *
    -                       CopyFrom->ImageDesc.Width);
    -
    -            /* finally, the extension blocks */
    -            if (CopyFrom->ExtensionBlocks != NULL) {
    -                sp->ExtensionBlocks =
    -                    (ExtensionBlock *)reallocarray(
    -                        NULL, CopyFrom->ExtensionBlockCount,
    -                        sizeof(ExtensionBlock));
    -                if (sp->ExtensionBlocks == NULL) {
    -                    FreeLastSavedImage(GifFile);
    -                    return (SavedImage *)(NULL);
    -                }
    -                memcpy(sp->ExtensionBlocks,
    -                       CopyFrom->ExtensionBlocks,
    -                       sizeof(ExtensionBlock) *
    -                           CopyFrom->ExtensionBlockCount);
    -            }
    +        // cppcheck-suppress ctunullpointer
    +        if (GifFile->SavedImages == NULL) {
    +                GifFile->SavedImages = (SavedImage *)malloc(sizeof(SavedImage));
             } else {
    -            memset((char *)sp, '\0', sizeof(SavedImage));
    +                SavedImage *newSavedImages = (SavedImage *)reallocarray(
    +                    GifFile->SavedImages, (GifFile->ImageCount + 1),
    +                    sizeof(SavedImage));
    +                if (newSavedImages == NULL) {
    +                        return ((SavedImage *)NULL);
    +                }
    +                GifFile->SavedImages = newSavedImages;
             }
    +        if (GifFile->SavedImages == NULL) {
    +                return ((SavedImage *)NULL);
    +        } else {
    +                SavedImage *sp = &GifFile->SavedImages[GifFile->ImageCount++];
     
    -        return (sp);
    -    }
    +                if (CopyFrom != NULL) {
    +                        memcpy((char *)sp, CopyFrom, sizeof(SavedImage));
    +
    +                        /*
    +                         * Make our own allocated copies of the heap fields in
    +                         * the copied record.  This guards against potential
    +                         * aliasing problems.
    +                         */
    +
    +                        /* Null out aliased pointers before any allocations
    +                         * so that FreeLastSavedImage won't free CopyFrom's
    +                         * data if an allocation fails partway through. */
    +                        sp->ImageDesc.ColorMap = NULL;
    +                        sp->RasterBits = NULL;
    +                        sp->ExtensionBlocks = NULL;
    +                        sp->ExtensionBlockCount = 0;
    +
    +                        /* first, the local color map */
    +                        if (CopyFrom->ImageDesc.ColorMap != NULL) {
    +                                sp->ImageDesc.ColorMap = GifMakeMapObject(
    +                                    CopyFrom->ImageDesc.ColorMap->ColorCount,
    +                                    CopyFrom->ImageDesc.ColorMap->Colors);
    +                                if (sp->ImageDesc.ColorMap == NULL) {
    +                                        FreeLastSavedImage(GifFile);
    +                                        return (SavedImage *)(NULL);
    +                                }
    +                        }
    +
    +                        /* next, the raster */
    +                        sp->RasterBits = (unsigned char *)reallocarray(
    +                            NULL,
    +                            (CopyFrom->ImageDesc.Height *
    +                             CopyFrom->ImageDesc.Width),
    +                            sizeof(GifPixelType));
    +                        if (sp->RasterBits == NULL) {
    +                                FreeLastSavedImage(GifFile);
    +                                return (SavedImage *)(NULL);
    +                        }
    +                        memcpy(sp->RasterBits, CopyFrom->RasterBits,
    +                               sizeof(GifPixelType) *
    +                                   CopyFrom->ImageDesc.Height *
    +                                   CopyFrom->ImageDesc.Width);
    +
    +                        /* finally, the extension blocks */
    +                        if (CopyFrom->ExtensionBlocks != NULL) {
    +                                int k;
    +                                sp->ExtensionBlocks =
    +                                    (ExtensionBlock *)calloc(
    +                                        CopyFrom->ExtensionBlockCount,
    +                                        sizeof(ExtensionBlock));
    +                                if (sp->ExtensionBlocks == NULL) {
    +                                        FreeLastSavedImage(GifFile);
    +                                        return (SavedImage *)(NULL);
    +                                }
    +                                for (k = 0; k < CopyFrom->ExtensionBlockCount;
    +                                     k++) {
    +                                        ExtensionBlock *dst =
    +                                            &sp->ExtensionBlocks[k];
    +                                        ExtensionBlock *src =
    +                                            &CopyFrom->ExtensionBlocks[k];
    +                                        dst->Function = src->Function;
    +                                        dst->ByteCount = src->ByteCount;
    +                                        if (src->ByteCount > 0) {
    +                                                dst->Bytes =
    +                                                    (GifByteType *)malloc(
    +                                                        src->ByteCount);
    +                                                if (dst->Bytes == NULL) {
    +                                                        FreeLastSavedImage(
    +                                                            GifFile);
    +                                                        return (SavedImage *)(NULL);
    +                                                }
    +                                                memcpy(dst->Bytes, src->Bytes,
    +                                                       src->ByteCount);
    +                                        }
    +                                }
    +                        }
    +                } else {
    +                        memset((char *)sp, '\0', sizeof(SavedImage));
    +                }
    +
    +                return (sp);
    +        }
     }
     
     void GifFreeSavedImages(GifFileType *GifFile) {
    -    SavedImage *sp;
    +        SavedImage *sp;
     
    -    if ((GifFile == NULL) || (GifFile->SavedImages == NULL)) {
    -        return;
    -    }
    -    for (sp = GifFile->SavedImages;
    -         sp < GifFile->SavedImages + GifFile->ImageCount; sp++) {
    -        if (sp->ImageDesc.ColorMap != NULL) {
    -            GifFreeMapObject(sp->ImageDesc.ColorMap);
    -            sp->ImageDesc.ColorMap = NULL;
    +        if ((GifFile == NULL) || (GifFile->SavedImages == NULL)) {
    +                return;
             }
    +        for (sp = GifFile->SavedImages;
    +             sp < GifFile->SavedImages + GifFile->ImageCount; sp++) {
    +                if (sp->ImageDesc.ColorMap != NULL) {
    +                        GifFreeMapObject(sp->ImageDesc.ColorMap);
    +                        sp->ImageDesc.ColorMap = NULL;
    +                }
     
    -        if (sp->RasterBits != NULL) {
    -            free((char *)sp->RasterBits);
    +                if (sp->RasterBits != NULL) {
    +                        free((char *)sp->RasterBits);
    +                }
    +
    +                GifFreeExtensions(&sp->ExtensionBlockCount,
    +                                  &sp->ExtensionBlocks);
             }
    -
    -        GifFreeExtensions(&sp->ExtensionBlockCount,
    -                          &sp->ExtensionBlocks);
    -    }
    -    free((char *)GifFile->SavedImages);
    -    GifFile->SavedImages = NULL;
    +        free((char *)GifFile->SavedImages);
    +        GifFile->SavedImages = NULL;
     }
     
     /* end */
    diff --git a/src/java.desktop/share/native/libsplashscreen/giflib/openbsd-reallocarray.c b/src/java.desktop/share/native/libsplashscreen/giflib/openbsd-reallocarray.c
    index 7420af674c5..57504fceaa9 100644
    --- a/src/java.desktop/share/native/libsplashscreen/giflib/openbsd-reallocarray.c
    +++ b/src/java.desktop/share/native/libsplashscreen/giflib/openbsd-reallocarray.c
    @@ -22,9 +22,8 @@
      * questions.
      */
     
    -/*    $OpenBSD: reallocarray.c,v 1.1 2014/05/08 21:43:49 deraadt Exp $    */
     /*
    - * Copyright (c) 2008 Otto Moerbeek 
    + * SPDX-FileCopyrightText: Copyright (C) 2008 Otto Moerbeek 
      * SPDX-License-Identifier: MIT
      */
     
    @@ -44,55 +43,55 @@
     #define MUL_NO_OVERFLOW ((size_t)1 << (sizeof(size_t) * 4))
     
     void *openbsd_reallocarray(void *optr, size_t nmemb, size_t size) {
    -    if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
    -        nmemb > 0 && SIZE_MAX / nmemb < size) {
    -        errno = ENOMEM;
    -        return NULL;
    -    }
    -    /*
    -     * Head off variations in realloc behavior on different
    -     * platforms (reported by MarkR )
    -     *
    -     * The behaviour of reallocarray is implementation-defined if
    -     * nmemb or size is zero. It can return NULL or non-NULL
    -     * depending on the platform.
    -     * https://www.securecoding.cert.org/confluence/display/c/MEM04-C.Beware+of+zero-lengthallocations
    -     *
    -     * Here are some extracts from realloc man pages on different platforms.
    -     *
    -     * void realloc( void memblock, size_t size );
    -     *
    -     * Windows:
    -     *
    -     * If there is not enough available memory to expand the block
    -     * to the given size, the original block is left unchanged,
    -     * and NULL is returned.  If size is zero, then the block
    -     * pointed to by memblock is freed; the return value is NULL,
    -     * and memblock is left pointing at a freed block.
    -     *
    -     * OpenBSD:
    -     *
    -     * If size or nmemb is equal to 0, a unique pointer to an
    -     * access protected, zero sized object is returned. Access via
    -     * this pointer will generate a SIGSEGV exception.
    -     *
    -     * Linux:
    -     *
    -     * If size was equal to 0, either NULL or a pointer suitable
    -     * to be passed to free() is returned.
    -     *
    -     * OS X:
    -     *
    -     * If size is zero and ptr is not NULL, a new, minimum sized
    -     * object is allocated and the original object is freed.
    -     *
    -     * It looks like images with zero width or height can trigger
    -     * this, and fuzzing behaviour will differ by platform, so
    -     * fuzzing on one platform may not detect zero-size allocation
    -     * problems on other platforms.
    -     */
    -    if (size == 0 || nmemb == 0) {
    -        return NULL;
    -    }
    -    return realloc(optr, size * nmemb);
    +        if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
    +            nmemb > 0 && SIZE_MAX / nmemb < size) {
    +                errno = ENOMEM;
    +                return NULL;
    +        }
    +        /*
    +         * Head off variations in realloc behavior on different
    +         * platforms (reported by MarkR )
    +         *
    +         * The behaviour of reallocarray is implementation-defined if
    +         * nmemb or size is zero. It can return NULL or non-NULL
    +         * depending on the platform.
    +         * https://www.securecoding.cert.org/confluence/display/c/MEM04-C.Beware+of+zero-lengthallocations
    +         *
    +         * Here are some extracts from realloc man pages on different platforms.
    +         *
    +         * void realloc( void memblock, size_t size );
    +         *
    +         * Windows:
    +         *
    +         * If there is not enough available memory to expand the block
    +         * to the given size, the original block is left unchanged,
    +         * and NULL is returned.  If size is zero, then the block
    +         * pointed to by memblock is freed; the return value is NULL,
    +         * and memblock is left pointing at a freed block.
    +         *
    +         * OpenBSD:
    +         *
    +         * If size or nmemb is equal to 0, a unique pointer to an
    +         * access protected, zero sized object is returned. Access via
    +         * this pointer will generate a SIGSEGV exception.
    +         *
    +         * Linux:
    +         *
    +         * If size was equal to 0, either NULL or a pointer suitable
    +         * to be passed to free() is returned.
    +         *
    +         * OS X:
    +         *
    +         * If size is zero and ptr is not NULL, a new, minimum sized
    +         * object is allocated and the original object is freed.
    +         *
    +         * It looks like images with zero width or height can trigger
    +         * this, and fuzzing behaviour will differ by platform, so
    +         * fuzzing on one platform may not detect zero-size allocation
    +         * problems on other platforms.
    +         */
    +        if (size == 0 || nmemb == 0) {
    +                return NULL;
    +        }
    +        return realloc(optr, size * nmemb);
     }
    diff --git a/src/java.desktop/share/native/libsplashscreen/libpng/CHANGES b/src/java.desktop/share/native/libsplashscreen/libpng/CHANGES
    index af9fcff6eb3..ba81df0c0e6 100644
    --- a/src/java.desktop/share/native/libsplashscreen/libpng/CHANGES
    +++ b/src/java.desktop/share/native/libsplashscreen/libpng/CHANGES
    @@ -6337,6 +6337,48 @@ Version 1.6.55 [February 9, 2026]
       Resolved an oss-fuzz build issue involving nalloc.
         (Contributed by Philippe Antoine.)
     
    +Version 1.6.56 [March 25, 2026]
    +  Fixed CVE-2026-33416 (high severity):
    +    Use-after-free via pointer aliasing in `png_set_tRNS` and `png_set_PLTE`.
    +    (Reported by Halil Oktay and Ryo Shimada;
    +    fixed by Halil Oktay and Cosmin Truta.)
    +  Fixed CVE-2026-33636 (high severity):
    +    Out-of-bounds read/write in the palette expansion on ARM Neon.
    +    (Reported by Taegu Ha; fixed by Taegu Ha and Cosmin Truta.)
    +  Fixed uninitialized reads beyond `num_trans` in `trans_alpha` buffers.
    +    (Contributed by Halil Oktay.)
    +  Fixed stale `info_ptr->palette` after in-place gamma and background
    +    transforms.
    +  Fixed wrong channel indices in `png_image_read_and_map` RGB_ALPHA path.
    +    (Contributed by Yuelin Wang.)
    +  Fixed wrong background color in colormap read.
    +    (Contributed by Yuelin Wang.)
    +  Fixed dead loop in sPLT write.
    +    (Contributed by Yuelin Wang.)
    +  Added missing null pointer checks in four public API functions.
    +    (Contributed by Yuelin Wang.)
    +  Validated shift bit depths in `png_set_shift` to prevent infinite loop.
    +    (Contributed by Yuelin Wang.)
    +  Avoided undefined behavior in library and tests.
    +  Deprecated the hardly-ever-tested POINTER_INDEXING config option.
    +  Added negative-stride test coverage for the simplified API.
    +  Fixed memory leaks and API misuse in oss-fuzz.
    +    (Contributed by Owen Sanzas.)
    +  Implemented various fixes and improvements in oss-fuzz.
    +    (Contributed by Bob Friesenhahn and Philippe Antoine.)
    +  Performed various refactorings and cleanups.
    +
    +Version 1.6.57 [April 8, 2026]
    +  Fixed CVE-2026-34757 (medium severity):
    +    Use-after-free in `png_set_PLTE`, `png_set_tRNS` and `png_set_hIST`
    +    leading to corrupted chunk data and potential heap information disclosure.
    +    Also hardened the append-style setters (`png_set_text`, `png_set_sPLT`,
    +    `png_set_unknown_chunks`) against a theoretical variant of the same
    +    aliasing pattern.
    +    (Reported by Iv4n .)
    +  Fixed integer overflow in rowbytes computation in read transforms.
    +    (Contributed by Mohammad Seet.)
    +
     Send comments/corrections/commendations to png-mng-implement at lists.sf.net.
     Subscription is required; visit
     
    diff --git a/src/java.desktop/share/native/libsplashscreen/libpng/README b/src/java.desktop/share/native/libsplashscreen/libpng/README
    index 6e0d1e33137..179b8dc8cb4 100644
    --- a/src/java.desktop/share/native/libsplashscreen/libpng/README
    +++ b/src/java.desktop/share/native/libsplashscreen/libpng/README
    @@ -1,4 +1,4 @@
    -README for libpng version 1.6.55
    +README for libpng version 1.6.57
     ================================
     
     See the note about version numbers near the top of `png.h`.
    diff --git a/src/java.desktop/share/native/libsplashscreen/libpng/png.c b/src/java.desktop/share/native/libsplashscreen/libpng/png.c
    index 955fda8dd7e..e4e13b0a684 100644
    --- a/src/java.desktop/share/native/libsplashscreen/libpng/png.c
    +++ b/src/java.desktop/share/native/libsplashscreen/libpng/png.c
    @@ -42,7 +42,7 @@
     #include "pngpriv.h"
     
     /* Generate a compiler error if there is an old png.h in the search path. */
    -typedef png_libpng_version_1_6_55 Your_png_h_is_not_version_1_6_55;
    +typedef png_libpng_version_1_6_57 Your_png_h_is_not_version_1_6_57;
     
     /* Sanity check the chunks definitions - PNG_KNOWN_CHUNKS from pngpriv.h and the
      * corresponding macro definitions.  This causes a compile time failure if
    @@ -849,7 +849,7 @@ png_get_copyright(png_const_structrp png_ptr)
        return PNG_STRING_COPYRIGHT
     #else
        return PNG_STRING_NEWLINE \
    -      "libpng version 1.6.55" PNG_STRING_NEWLINE \
    +      "libpng version 1.6.57" PNG_STRING_NEWLINE \
           "Copyright (c) 2018-2026 Cosmin Truta" PNG_STRING_NEWLINE \
           "Copyright (c) 1998-2002,2004,2006-2018 Glenn Randers-Pehrson" \
           PNG_STRING_NEWLINE \
    @@ -1199,7 +1199,7 @@ png_xy_from_XYZ(png_xy *xy, const png_XYZ *XYZ)
           return 1;
     
        /* The reference white is simply the sum of the end-point (X,Y,Z) vectors so
    -    * the fillowing calculates (X+Y+Z) of the reference white (media white,
    +    * the following calculates (X+Y+Z) of the reference white (media white,
         * encoding white) itself:
         */
        d = dblue;
    @@ -1244,9 +1244,9 @@ png_XYZ_from_xy(png_XYZ *XYZ, const png_xy *xy)
         * (-0.0770) because the PNG spec itself requires the xy values to be
         * unsigned.  whitey is also required to be 5 or more to avoid overflow.
         *
    -    * Instead the upper limits have been relaxed to accomodate ACES AP1 where
    +    * Instead the upper limits have been relaxed to accommodate ACES AP1 where
         * redz ends up as -600 (-0.006).  ProPhotoRGB was already "in range."
    -    * The new limit accomodates the AP0 and AP1 ranges for z but not AP0 redy.
    +    * The new limit accommodates the AP0 and AP1 ranges for z but not AP0 redy.
         */
        const png_fixed_point fpLimit = PNG_FP_1+(PNG_FP_1/10);
        if (xy->redx   < 0 || xy->redx > fpLimit) return 1;
    @@ -1357,7 +1357,7 @@ png_XYZ_from_xy(png_XYZ *XYZ, const png_xy *xy)
         *    red-scale + green-scale + blue-scale = 1/white-y = white-scale
         *
         * So now we have a Cramer's rule solution where the determinants are just
    -    * 3x3 - far more tractible.  Unfortunately 3x3 determinants still involve
    +    * 3x3 - far more tractable.  Unfortunately 3x3 determinants still involve
         * multiplication of three coefficients so we can't guarantee to avoid
         * overflow in the libpng fixed point representation.  Using Cramer's rule in
         * floating point is probably a good choice here, but it's not an option for
    @@ -1726,7 +1726,7 @@ png_icc_check_header(png_const_structrp png_ptr, png_const_charp name,
         * into R, G and B channels.
         *
         * Previously it was suggested that an RGB profile on grayscale data could be
    -    * handled.  However it it is clear that using an RGB profile in this context
    +    * handled.  However it is clear that using an RGB profile in this context
         * must be an error - there is no specification of what it means.  Thus it is
         * almost certainly more correct to ignore the profile.
         */
    @@ -2944,7 +2944,7 @@ png_gamma_significant(png_fixed_point gamma_val)
         *
         *    2.2/(2+51/256) == 1.00035524
         *
    -    * I.e. vanishly small (<4E-4) but still detectable in 16-bit linear (+/-
    +    * I.e. vanishingly small (<4E-4) but still detectable in 16-bit linear (+/-
         * 23).  Note that the Adobe choice seems to be something intended to give an
         * exact number with 8 binary fractional digits - it is the closest to 2.2
         * that is possible a base 2 .8p representation.
    diff --git a/src/java.desktop/share/native/libsplashscreen/libpng/png.h b/src/java.desktop/share/native/libsplashscreen/libpng/png.h
    index e95c0444399..349e7d07383 100644
    --- a/src/java.desktop/share/native/libsplashscreen/libpng/png.h
    +++ b/src/java.desktop/share/native/libsplashscreen/libpng/png.h
    @@ -29,7 +29,7 @@
      * However, the following notice accompanied the original version of this
      * file and, per its terms, should not be removed:
      *
    - * libpng version 1.6.55
    + * libpng version 1.6.57
      *
      * Copyright (c) 2018-2026 Cosmin Truta
      * Copyright (c) 1998-2002,2004,2006-2018 Glenn Randers-Pehrson
    @@ -43,7 +43,7 @@
      *   libpng versions 0.89, June 1996, through 0.96, May 1997: Andreas Dilger
      *   libpng versions 0.97, January 1998, through 1.6.35, July 2018:
      *     Glenn Randers-Pehrson
    - *   libpng versions 1.6.36, December 2018, through 1.6.55, February 2026:
    + *   libpng versions 1.6.36, December 2018, through 1.6.57, April 2026:
      *     Cosmin Truta
      *   See also "Contributing Authors", below.
      */
    @@ -267,7 +267,7 @@
      *    ...
      *    1.5.30                  15    10530  15.so.15.30[.0]
      *    ...
    - *    1.6.55                  16    10655  16.so.16.55[.0]
    + *    1.6.57                  16    10657  16.so.16.57[.0]
      *
      *    Henceforth the source version will match the shared-library major and
      *    minor numbers; the shared-library major version number will be used for
    @@ -303,7 +303,7 @@
      */
     
     /* Version information for png.h - this should match the version in png.c */
    -#define PNG_LIBPNG_VER_STRING "1.6.55"
    +#define PNG_LIBPNG_VER_STRING "1.6.57"
     #define PNG_HEADER_VERSION_STRING " libpng version " PNG_LIBPNG_VER_STRING "\n"
     
     /* The versions of shared library builds should stay in sync, going forward */
    @@ -314,7 +314,7 @@
     /* These should match the first 3 components of PNG_LIBPNG_VER_STRING: */
     #define PNG_LIBPNG_VER_MAJOR   1
     #define PNG_LIBPNG_VER_MINOR   6
    -#define PNG_LIBPNG_VER_RELEASE 55
    +#define PNG_LIBPNG_VER_RELEASE 57
     
     /* This should be zero for a public release, or non-zero for a
      * development version.
    @@ -345,7 +345,7 @@
      * From version 1.0.1 it is:
      * XXYYZZ, where XX=major, YY=minor, ZZ=release
      */
    -#define PNG_LIBPNG_VER 10655 /* 1.6.55 */
    +#define PNG_LIBPNG_VER 10657 /* 1.6.57 */
     
     /* Library configuration: these options cannot be changed after
      * the library has been built.
    @@ -455,7 +455,7 @@ extern "C" {
     /* This triggers a compiler error in png.c, if png.c and png.h
      * do not agree upon the version number.
      */
    -typedef char *png_libpng_version_1_6_55;
    +typedef char *png_libpng_version_1_6_57;
     
     /* Basic control structions.  Read libpng-manual.txt or libpng.3 for more info.
      *
    @@ -2370,7 +2370,7 @@ PNG_EXPORT(162, int, png_get_text,
     #endif
     
     /* Note while png_set_text() will accept a structure whose text,
    - * language, and  translated keywords are NULL pointers, the structure
    + * language, and translated keywords are NULL pointers, the structure
      * returned by png_get_text will always contain regular
      * zero-terminated C strings.  They might be empty strings but
      * they will never be NULL pointers.
    diff --git a/src/java.desktop/share/native/libsplashscreen/libpng/pngconf.h b/src/java.desktop/share/native/libsplashscreen/libpng/pngconf.h
    index b957f8b5061..1a5bb7b60f8 100644
    --- a/src/java.desktop/share/native/libsplashscreen/libpng/pngconf.h
    +++ b/src/java.desktop/share/native/libsplashscreen/libpng/pngconf.h
    @@ -29,7 +29,7 @@
      * However, the following notice accompanied the original version of this
      * file and, per its terms, should not be removed:
      *
    - * libpng version 1.6.55
    + * libpng version 1.6.57
      *
      * Copyright (c) 2018-2026 Cosmin Truta
      * Copyright (c) 1998-2002,2004,2006-2016,2018 Glenn Randers-Pehrson
    diff --git a/src/java.desktop/share/native/libsplashscreen/libpng/pnglibconf.h b/src/java.desktop/share/native/libsplashscreen/libpng/pnglibconf.h
    index ae1ab462072..de63c998927 100644
    --- a/src/java.desktop/share/native/libsplashscreen/libpng/pnglibconf.h
    +++ b/src/java.desktop/share/native/libsplashscreen/libpng/pnglibconf.h
    @@ -31,7 +31,7 @@
      * However, the following notice accompanied the original version of this
      * file and, per its terms, should not be removed:
      */
    -/* libpng version 1.6.55 */
    +/* libpng version 1.6.57 */
     
     /* Copyright (c) 2018-2026 Cosmin Truta */
     /* Copyright (c) 1998-2002,2004,2006-2018 Glenn Randers-Pehrson */
    diff --git a/src/java.desktop/share/native/libsplashscreen/libpng/pngpriv.h b/src/java.desktop/share/native/libsplashscreen/libpng/pngpriv.h
    index ee91f58d4ba..086a2f76ee6 100644
    --- a/src/java.desktop/share/native/libsplashscreen/libpng/pngpriv.h
    +++ b/src/java.desktop/share/native/libsplashscreen/libpng/pngpriv.h
    @@ -986,7 +986,7 @@
      *
      * At present these index values are not exported (not part of the public API)
      * so can be changed at will.  For convenience the names are in lexical sort
    - * order but with the critical chunks at the start in the order of occurence in
    + * order but with the critical chunks at the start in the order of occurrence in
      * a PNG.
      *
      * PNG_INFO_ values do not exist for every one of these chunk handles; for
    @@ -2115,7 +2115,7 @@ PNG_INTERNAL_FUNCTION(void, png_ascii_from_fixed,
      * not valid it will be the index of a character in the supposed number.
      *
      * The format of a number is defined in the PNG extensions specification
    - * and this API is strictly conformant to that spec, not anyone elses!
    + * and this API is strictly conformant to that spec, not anyone else's!
      *
      * The format as a regular expression is:
      *
    diff --git a/src/java.desktop/share/native/libsplashscreen/libpng/pngread.c b/src/java.desktop/share/native/libsplashscreen/libpng/pngread.c
    index 79fd9ad6a82..70df18926f5 100644
    --- a/src/java.desktop/share/native/libsplashscreen/libpng/pngread.c
    +++ b/src/java.desktop/share/native/libsplashscreen/libpng/pngread.c
    @@ -720,7 +720,7 @@ png_read_end(png_structrp png_ptr, png_inforp info_ptr)
           png_read_finish_IDAT(png_ptr);
     
     #ifdef PNG_READ_CHECK_FOR_INVALID_INDEX_SUPPORTED
    -   /* Report invalid palette index; added at libng-1.5.10 */
    +   /* Report invalid palette index; added at libpng-1.5.10 */
        if (png_ptr->color_type == PNG_COLOR_TYPE_PALETTE &&
            png_ptr->num_palette_max >= png_ptr->num_palette)
           png_benign_error(png_ptr, "Read palette index exceeding num_palette");
    @@ -808,21 +808,19 @@ png_read_destroy(png_structrp png_ptr)
        png_ptr->quantize_index = NULL;
     #endif
     
    -   if ((png_ptr->free_me & PNG_FREE_PLTE) != 0)
    -   {
    -      png_zfree(png_ptr, png_ptr->palette);
    -      png_ptr->palette = NULL;
    -   }
    -   png_ptr->free_me &= ~PNG_FREE_PLTE;
    +   /* png_ptr->palette is always independently allocated (not aliased
    +    * with info_ptr->palette), so free it unconditionally.
    +    */
    +   png_free(png_ptr, png_ptr->palette);
    +   png_ptr->palette = NULL;
     
     #if defined(PNG_tRNS_SUPPORTED) || \
         defined(PNG_READ_EXPAND_SUPPORTED) || defined(PNG_READ_BACKGROUND_SUPPORTED)
    -   if ((png_ptr->free_me & PNG_FREE_TRNS) != 0)
    -   {
    -      png_free(png_ptr, png_ptr->trans_alpha);
    -      png_ptr->trans_alpha = NULL;
    -   }
    -   png_ptr->free_me &= ~PNG_FREE_TRNS;
    +   /* png_ptr->trans_alpha is always independently allocated (not aliased
    +    * with info_ptr->trans_alpha), so free it unconditionally.
    +    */
    +   png_free(png_ptr, png_ptr->trans_alpha);
    +   png_ptr->trans_alpha = NULL;
     #endif
     
        inflateEnd(&png_ptr->zstream);
    @@ -1285,7 +1283,7 @@ png_image_is_not_sRGB(png_const_structrp png_ptr)
         * png_struct::chromaticities always exists since the simplified API
         * requires rgb-to-gray.  The mDCV, cICP and cHRM chunks may all set it to
         * a non-sRGB value, so it needs to be checked but **only** if one of
    -    * those chunks occured in the file.
    +    * those chunks occurred in the file.
         */
        /* Highest priority: check to be safe. */
        if (png_has_chunk(png_ptr, cICP) || png_has_chunk(png_ptr, mDCV))
    @@ -2625,7 +2623,7 @@ png_image_read_colormap(png_voidp argument)
                       {
                          r = back_r;
                          g = back_g;
    -                     b = back_g;
    +                     b = back_b;
                       }
     
                       /* Compare the newly-created color-map entry with the one the
    @@ -2903,9 +2901,9 @@ png_image_read_and_map(png_voidp argument)
              {
                 png_bytep inrow = png_voidcast(png_bytep, display->local_row);
                 png_bytep outrow = first_row + y * row_step;
    -            png_const_bytep end_row = outrow + width;
    +            png_const_bytep row_end = outrow + width;
     
    -            /* Read read the libpng data into the temporary buffer. */
    +            /* Read the libpng data into the temporary buffer. */
                 png_read_row(png_ptr, inrow, NULL);
     
                 /* Now process the row according to the processing option, note
    @@ -2916,7 +2914,7 @@ png_image_read_and_map(png_voidp argument)
                 switch (proc)
                 {
                    case PNG_CMAP_GA:
    -                  for (; outrow < end_row; outrow += stepx)
    +                  for (; outrow < row_end; outrow += stepx)
                       {
                          /* The data is always in the PNG order */
                          unsigned int gray = *inrow++;
    @@ -2945,7 +2943,7 @@ png_image_read_and_map(png_voidp argument)
                       break;
     
                    case PNG_CMAP_TRANS:
    -                  for (; outrow < end_row; outrow += stepx)
    +                  for (; outrow < row_end; outrow += stepx)
                       {
                          png_byte gray = *inrow++;
                          png_byte alpha = *inrow++;
    @@ -2962,7 +2960,7 @@ png_image_read_and_map(png_voidp argument)
                       break;
     
                    case PNG_CMAP_RGB:
    -                  for (; outrow < end_row; outrow += stepx)
    +                  for (; outrow < row_end; outrow += stepx)
                       {
                          *outrow = PNG_RGB_INDEX(inrow[0], inrow[1], inrow[2]);
                          inrow += 3;
    @@ -2970,7 +2968,7 @@ png_image_read_and_map(png_voidp argument)
                       break;
     
                    case PNG_CMAP_RGB_ALPHA:
    -                  for (; outrow < end_row; outrow += stepx)
    +                  for (; outrow < row_end; outrow += stepx)
                       {
                          unsigned int alpha = inrow[3];
     
    @@ -3007,10 +3005,10 @@ png_image_read_and_map(png_voidp argument)
                              */
                             if (inrow[0] & 0x80) back_i += 9; /* red */
                             if (inrow[0] & 0x40) back_i += 9;
    -                        if (inrow[0] & 0x80) back_i += 3; /* green */
    -                        if (inrow[0] & 0x40) back_i += 3;
    -                        if (inrow[0] & 0x80) back_i += 1; /* blue */
    -                        if (inrow[0] & 0x40) back_i += 1;
    +                        if (inrow[1] & 0x80) back_i += 3; /* green */
    +                        if (inrow[1] & 0x40) back_i += 3;
    +                        if (inrow[2] & 0x80) back_i += 1; /* blue */
    +                        if (inrow[2] & 0x40) back_i += 1;
     
                             *outrow = (png_byte)back_i;
                          }
    @@ -3277,18 +3275,18 @@ png_image_read_composite(png_voidp argument)
              {
                 png_bytep inrow = png_voidcast(png_bytep, display->local_row);
                 png_bytep outrow;
    -            png_const_bytep end_row;
    +            png_const_bytep row_end;
     
                 /* Read the row, which is packed: */
                 png_read_row(png_ptr, inrow, NULL);
     
                 outrow = png_voidcast(png_bytep, display->first_row);
                 outrow += y * row_step;
    -            end_row = outrow + width * channels;
    +            row_end = outrow + width * channels;
     
                 /* Now do the composition on each pixel in this row. */
                 outrow += startx;
    -            for (; outrow < end_row; outrow += stepx)
    +            for (; outrow < row_end; outrow += stepx)
                 {
                    png_byte alpha = inrow[channels];
     
    @@ -3461,14 +3459,14 @@ png_image_read_background(png_voidp argument)
                          png_bytep inrow = png_voidcast(png_bytep,
                              display->local_row);
                          png_bytep outrow = first_row + y * row_step;
    -                     png_const_bytep end_row = outrow + width;
    +                     png_const_bytep row_end = outrow + width;
     
                          /* Read the row, which is packed: */
                          png_read_row(png_ptr, inrow, NULL);
     
                          /* Now do the composition on each pixel in this row. */
                          outrow += startx;
    -                     for (; outrow < end_row; outrow += stepx)
    +                     for (; outrow < row_end; outrow += stepx)
                          {
                             png_byte alpha = inrow[1];
     
    @@ -3506,14 +3504,14 @@ png_image_read_background(png_voidp argument)
                          png_bytep inrow = png_voidcast(png_bytep,
                              display->local_row);
                          png_bytep outrow = first_row + y * row_step;
    -                     png_const_bytep end_row = outrow + width;
    +                     png_const_bytep row_end = outrow + width;
     
                          /* Read the row, which is packed: */
                          png_read_row(png_ptr, inrow, NULL);
     
                          /* Now do the composition on each pixel in this row. */
                          outrow += startx;
    -                     for (; outrow < end_row; outrow += stepx)
    +                     for (; outrow < row_end; outrow += stepx)
                          {
                             png_byte alpha = inrow[1];
     
    @@ -3596,7 +3594,7 @@ png_image_read_background(png_voidp argument)
                    {
                       png_const_uint_16p inrow;
                       png_uint_16p outrow = first_row + y * row_step;
    -                  png_uint_16p end_row = outrow + width * outchannels;
    +                  png_uint_16p row_end = outrow + width * outchannels;
     
                       /* Read the row, which is packed: */
                       png_read_row(png_ptr, png_voidcast(png_bytep,
    @@ -3606,7 +3604,7 @@ png_image_read_background(png_voidp argument)
                       /* Now do the pre-multiplication on each pixel in this row.
                        */
                       outrow += startx;
    -                  for (; outrow < end_row; outrow += stepx)
    +                  for (; outrow < row_end; outrow += stepx)
                       {
                          png_uint_32 component = inrow[0];
                          png_uint_16 alpha = inrow[1];
    @@ -4142,7 +4140,7 @@ png_image_finish_read(png_imagep image, png_const_colorp background,
                 row_stride = (png_int_32)/*SAFE*/png_row_stride;
     
              if (row_stride < 0)
    -            check = (png_uint_32)(-row_stride);
    +            check = -(png_uint_32)row_stride;
     
              else
                 check = (png_uint_32)row_stride;
    diff --git a/src/java.desktop/share/native/libsplashscreen/libpng/pngrtran.c b/src/java.desktop/share/native/libsplashscreen/libpng/pngrtran.c
    index fcce80da1cb..838c8460f91 100644
    --- a/src/java.desktop/share/native/libsplashscreen/libpng/pngrtran.c
    +++ b/src/java.desktop/share/native/libsplashscreen/libpng/pngrtran.c
    @@ -259,7 +259,7 @@ png_set_strip_alpha(png_structrp png_ptr)
      *
      * Terminology (assuming power law, "gamma", encodings):
      *    "screen" gamma: a power law imposed by the output device when digital
    - *    samples are converted to visible light output.  The EOTF - volage to
    + *    samples are converted to visible light output.  The EOTF - voltage to
      *    luminance on output.
      *
      *    "file" gamma: a power law used to encode luminance levels from the input
    @@ -524,6 +524,9 @@ png_set_quantize(png_structrp png_ptr, png_colorp palette,
        if (png_rtran_ok(png_ptr, 0) == 0)
           return;
     
    +   if (palette == NULL)
    +      return;
    +
        png_ptr->transformations |= PNG_QUANTIZE;
     
        if (full_quantize == 0)
    @@ -840,7 +843,13 @@ png_set_quantize(png_structrp png_ptr, png_colorp palette,
        }
        if (png_ptr->palette == NULL)
        {
    -      png_ptr->palette = palette;
    +      /* Allocate an owned copy rather than aliasing the caller's pointer,
    +       * so that png_read_destroy can free png_ptr->palette unconditionally.
    +       */
    +      png_ptr->palette = png_voidcast(png_colorp, png_calloc(png_ptr,
    +          PNG_MAX_PALETTE_LENGTH * (sizeof (png_color))));
    +      memcpy(png_ptr->palette, palette, (unsigned int)num_palette *
    +          (sizeof (png_color)));
        }
        png_ptr->num_palette = (png_uint_16)num_palette;
     
    @@ -1393,7 +1402,7 @@ png_resolve_file_gamma(png_const_structrp png_ptr)
        if (file_gamma != 0)
           return file_gamma;
     
    -   /* If png_reciprocal oveflows it returns 0 which indicates to the caller that
    +   /* If png_reciprocal overflows, it returns 0, indicating to the caller that
         * there is no usable file gamma.  (The checks added to png_set_gamma and
         * png_set_alpha_mode should prevent a screen_gamma which would overflow.)
         */
    @@ -2090,6 +2099,21 @@ png_read_transform_info(png_structrp png_ptr, png_inforp info_ptr)
     {
        png_debug(1, "in png_read_transform_info");
     
    +   if (png_ptr->transformations != 0)
    +   {
    +      if (info_ptr->color_type == PNG_COLOR_TYPE_PALETTE &&
    +          info_ptr->palette != NULL && png_ptr->palette != NULL)
    +      {
    +         /* Sync info_ptr->palette with png_ptr->palette.
    +          * The function png_init_read_transformations may have modified
    +          * png_ptr->palette in place (e.g. for gamma correction or for
    +          * background compositing).
    +          */
    +         memcpy(info_ptr->palette, png_ptr->palette,
    +             PNG_MAX_PALETTE_LENGTH * (sizeof (png_color)));
    +      }
    +   }
    +
     #ifdef PNG_READ_EXPAND_SUPPORTED
        if ((png_ptr->transformations & PNG_EXPAND) != 0)
        {
    @@ -2384,7 +2408,7 @@ png_do_unpack(png_row_infop row_info, png_bytep row)
           }
           row_info->bit_depth = 8;
           row_info->pixel_depth = (png_byte)(8 * row_info->channels);
    -      row_info->rowbytes = row_width * row_info->channels;
    +      row_info->rowbytes = (size_t)row_width * row_info->channels;
        }
     }
     #endif
    @@ -2586,7 +2610,7 @@ png_do_scale_16_to_8(png_row_infop row_info, png_bytep row)
     
           row_info->bit_depth = 8;
           row_info->pixel_depth = (png_byte)(8 * row_info->channels);
    -      row_info->rowbytes = row_info->width * row_info->channels;
    +      row_info->rowbytes = (size_t)row_info->width * row_info->channels;
        }
     }
     #endif
    @@ -2614,7 +2638,7 @@ png_do_chop(png_row_infop row_info, png_bytep row)
     
           row_info->bit_depth = 8;
           row_info->pixel_depth = (png_byte)(8 * row_info->channels);
    -      row_info->rowbytes = row_info->width * row_info->channels;
    +      row_info->rowbytes = (size_t)row_info->width * row_info->channels;
        }
     }
     #endif
    @@ -2850,7 +2874,7 @@ png_do_read_filler(png_row_infop row_info, png_bytep row,
                 *(--dp) = lo_filler;
                 row_info->channels = 2;
                 row_info->pixel_depth = 16;
    -            row_info->rowbytes = row_width * 2;
    +            row_info->rowbytes = (size_t)row_width * 2;
              }
     
              else
    @@ -2865,7 +2889,7 @@ png_do_read_filler(png_row_infop row_info, png_bytep row,
                 }
                 row_info->channels = 2;
                 row_info->pixel_depth = 16;
    -            row_info->rowbytes = row_width * 2;
    +            row_info->rowbytes = (size_t)row_width * 2;
              }
           }
     
    @@ -2888,7 +2912,7 @@ png_do_read_filler(png_row_infop row_info, png_bytep row,
                 *(--dp) = hi_filler;
                 row_info->channels = 2;
                 row_info->pixel_depth = 32;
    -            row_info->rowbytes = row_width * 4;
    +            row_info->rowbytes = (size_t)row_width * 4;
              }
     
              else
    @@ -2905,7 +2929,7 @@ png_do_read_filler(png_row_infop row_info, png_bytep row,
                 }
                 row_info->channels = 2;
                 row_info->pixel_depth = 32;
    -            row_info->rowbytes = row_width * 4;
    +            row_info->rowbytes = (size_t)row_width * 4;
              }
           }
     #endif
    @@ -2929,7 +2953,7 @@ png_do_read_filler(png_row_infop row_info, png_bytep row,
                 *(--dp) = lo_filler;
                 row_info->channels = 4;
                 row_info->pixel_depth = 32;
    -            row_info->rowbytes = row_width * 4;
    +            row_info->rowbytes = (size_t)row_width * 4;
              }
     
              else
    @@ -2946,7 +2970,7 @@ png_do_read_filler(png_row_infop row_info, png_bytep row,
                 }
                 row_info->channels = 4;
                 row_info->pixel_depth = 32;
    -            row_info->rowbytes = row_width * 4;
    +            row_info->rowbytes = (size_t)row_width * 4;
              }
           }
     
    @@ -2973,7 +2997,7 @@ png_do_read_filler(png_row_infop row_info, png_bytep row,
                 *(--dp) = hi_filler;
                 row_info->channels = 4;
                 row_info->pixel_depth = 64;
    -            row_info->rowbytes = row_width * 8;
    +            row_info->rowbytes = (size_t)row_width * 8;
              }
     
              else
    @@ -2995,7 +3019,7 @@ png_do_read_filler(png_row_infop row_info, png_bytep row,
     
                 row_info->channels = 4;
                 row_info->pixel_depth = 64;
    -            row_info->rowbytes = row_width * 8;
    +            row_info->rowbytes = (size_t)row_width * 8;
              }
           }
     #endif
    @@ -4489,7 +4513,7 @@ png_do_expand_palette(png_structrp png_ptr, png_row_infop row_info,
                    }
                    row_info->bit_depth = 8;
                    row_info->pixel_depth = 32;
    -               row_info->rowbytes = row_width * 4;
    +               row_info->rowbytes = (size_t)row_width * 4;
                    row_info->color_type = 6;
                    row_info->channels = 4;
                 }
    @@ -4497,7 +4521,7 @@ png_do_expand_palette(png_structrp png_ptr, png_row_infop row_info,
                 else
                 {
                    sp = row + (size_t)row_width - 1;
    -               dp = row + (size_t)(row_width * 3) - 1;
    +               dp = row + (size_t)row_width * 3 - 1;
                    i = 0;
     #ifdef PNG_ARM_NEON_INTRINSICS_AVAILABLE
                    i = png_do_expand_palette_rgb8_neon(png_ptr, row_info, row,
    @@ -4516,7 +4540,7 @@ png_do_expand_palette(png_structrp png_ptr, png_row_infop row_info,
     
                    row_info->bit_depth = 8;
                    row_info->pixel_depth = 24;
    -               row_info->rowbytes = row_width * 3;
    +               row_info->rowbytes = (size_t)row_width * 3;
                    row_info->color_type = 2;
                    row_info->channels = 3;
                 }
    diff --git a/src/java.desktop/share/native/libsplashscreen/libpng/pngrutil.c b/src/java.desktop/share/native/libsplashscreen/libpng/pngrutil.c
    index 01bb0c8bedc..4712dfd418a 100644
    --- a/src/java.desktop/share/native/libsplashscreen/libpng/pngrutil.c
    +++ b/src/java.desktop/share/native/libsplashscreen/libpng/pngrutil.c
    @@ -465,7 +465,7 @@ png_inflate_claim(png_structrp png_ptr, png_uint_32 owner)
         * be gained by using this when it is known *if* the zlib stream itself does
         * not record the number; however, this is an illusion: the original writer
         * of the PNG may have selected a lower window size, and we really must
    -    * follow that because, for systems with with limited capabilities, we
    +    * follow that because, for systems with limited capabilities, we
         * would otherwise reject the application's attempts to use a smaller window
         * size (zlib doesn't have an interface to say "this or lower"!).
         *
    @@ -1035,7 +1035,7 @@ png_handle_PLTE(png_structrp png_ptr, png_inforp info_ptr, png_uint_32 length)
         * in the case of an 8-bit display with a decoder which controls the palette.
         *
         * The alternative here is to ignore the error and store the palette anyway;
    -    * destroying the tRNS will definately cause problems.
    +    * destroying the tRNS will definitely cause problems.
         *
         * NOTE: the case of PNG_COLOR_TYPE_PALETTE need not be considered because
         * the png_handle_ routines for the three 'after PLTE' chunks tRNS, bKGD and
    @@ -1082,19 +1082,6 @@ png_handle_PLTE(png_structrp png_ptr, png_inforp info_ptr, png_uint_32 length)
           /* A valid PLTE chunk has been read */
           png_ptr->mode |= PNG_HAVE_PLTE;
     
    -      /* TODO: png_set_PLTE has the side effect of setting png_ptr->palette to
    -       * its own copy of the palette.  This has the side effect that when
    -       * png_start_row is called (this happens after any call to
    -       * png_read_update_info) the info_ptr palette gets changed.  This is
    -       * extremely unexpected and confusing.
    -       *
    -       * REVIEW: there have been consistent bugs in the past about gamma and
    -       * similar transforms to colour mapped images being useless because the
    -       * modified palette cannot be accessed because of the above.
    -       *
    -       * CONSIDER: Fix this by not sharing the palette in this way.  But does
    -       * this completely fix the problem?
    -       */
           png_set_PLTE(png_ptr, info_ptr, palette, num);
           return handled_ok;
        }
    @@ -1296,7 +1283,7 @@ png_handle_cHRM(png_structrp png_ptr, png_inforp info_ptr, png_uint_32 length)
     
        /* png_set_cHRM may complain about some of the values but this doesn't matter
         * because it was a cHRM and it did have vaguely (if, perhaps, ridiculous)
    -    * values.  Ridiculousity will be checked if the values are used later.
    +    * values.  Ridiculosity will be checked if the values are used later.
         */
        png_set_cHRM_fixed(png_ptr, info_ptr, xy.whitex, xy.whitey, xy.redx, xy.redy,
              xy.greenx, xy.greeny, xy.bluex, xy.bluey);
    @@ -1593,7 +1580,8 @@ static png_handle_result_code /* PRIVATE */
     png_handle_sPLT(png_structrp png_ptr, png_inforp info_ptr, png_uint_32 length)
     /* Note: this does not properly handle chunks that are > 64K under DOS */
     {
    -   png_bytep entry_start, buffer;
    +   png_bytep buffer;
    +   png_bytep entry_start;
        png_sPLT_t new_palette;
        png_sPLT_entryp pp;
        png_uint_32 data_length;
    @@ -1800,10 +1788,6 @@ png_handle_tRNS(png_structrp png_ptr, png_inforp info_ptr, png_uint_32 length)
           return handled_error;
        }
     
    -   /* TODO: this is a horrible side effect in the palette case because the
    -    * png_struct ends up with a pointer to the tRNS buffer owned by the
    -    * png_info.  Fix this.
    -    */
        png_set_tRNS(png_ptr, info_ptr, readbuf, png_ptr->num_trans,
            &(png_ptr->trans_color));
        return handled_ok;
    @@ -2062,7 +2046,7 @@ png_handle_eXIf(png_structrp png_ptr, png_inforp info_ptr, png_uint_32 length)
           return handled_error;
     
        /* PNGv3: the code used to check the byte order mark at the start for MM or
    -    * II, however PNGv3 states that the the first 4 bytes should be checked.
    +    * II, however PNGv3 states that the first 4 bytes should be checked.
         * The caller ensures that there are four bytes available.
         */
        {
    @@ -2184,9 +2168,13 @@ png_handle_oFFs(png_structrp png_ptr, png_inforp info_ptr, png_uint_32 length)
     static png_handle_result_code /* PRIVATE */
     png_handle_pCAL(png_structrp png_ptr, png_inforp info_ptr, png_uint_32 length)
     {
    +   png_bytep buffer;
    +   png_bytep buf;
    +   png_bytep endptr;
        png_int_32 X0, X1;
    -   png_byte type, nparams;
    -   png_bytep buffer, buf, units, endptr;
    +   png_byte type;
    +   png_byte nparams;
    +   png_byte *units;
        png_charpp params;
        int i;
     
    @@ -3040,7 +3028,7 @@ static const struct
        png_uint_32 max_length :12; /* Length min, max in bytes */
        png_uint_32 min_length :8;
           /* Length errors on critical chunks have special handling to preserve the
    -       * existing behaviour in libpng 1.6.  Anciallary chunks are checked below
    +       * existing behaviour in libpng 1.6.  Ancillary chunks are checked below
            * and produce a 'benign' error.
            */
        png_uint_32 pos_before :4; /* PNG_HAVE_ values chunk must precede */
    @@ -3048,7 +3036,7 @@ static const struct
           /* NOTE: PLTE, tRNS and bKGD require special handling which depends on
            * the colour type of the base image.
            */
    -   png_uint_32 multiple   :1; /* Multiple occurences permitted */
    +   png_uint_32 multiple   :1; /* Multiple occurrences permitted */
           /* This is enabled for PLTE because PLTE may, in practice, be optional */
     }
     read_chunks[PNG_INDEX_unknown] =
    @@ -3082,7 +3070,7 @@ read_chunks[PNG_INDEX_unknown] =
     #  define CDIHDR      13U,   13U,  hIHDR,     0,        0
     #  define CDPLTE  NoCheck,    0U,      0, hIHDR,        1
           /* PLTE errors are only critical for colour-map images, consequently the
    -       * hander does all the checks.
    +       * handler does all the checks.
            */
     #  define CDIDAT  NoCheck,    0U,  aIDAT, hIHDR,        1
     #  define CDIEND  NoCheck,    0U,      0, aIDAT,        0
    diff --git a/src/java.desktop/share/native/libsplashscreen/libpng/pngset.c b/src/java.desktop/share/native/libsplashscreen/libpng/pngset.c
    index 0b2844f1864..29082a6be08 100644
    --- a/src/java.desktop/share/native/libsplashscreen/libpng/pngset.c
    +++ b/src/java.desktop/share/native/libsplashscreen/libpng/pngset.c
    @@ -29,7 +29,7 @@
      * However, the following notice accompanied the original version of this
      * file and, per its terms, should not be removed:
      *
    - * Copyright (c) 2018-2025 Cosmin Truta
    + * Copyright (c) 2018-2026 Cosmin Truta
      * Copyright (c) 1998-2018 Glenn Randers-Pehrson
      * Copyright (c) 1996-1997 Andreas Dilger
      * Copyright (c) 1995-1996 Guy Eric Schalnat, Group 42, Inc.
    @@ -362,7 +362,8 @@ png_set_eXIf_1(png_const_structrp png_ptr, png_inforp info_ptr,
        png_debug1(1, "in %s storage function", "eXIf");
     
        if (png_ptr == NULL || info_ptr == NULL ||
    -       (png_ptr->mode & PNG_WROTE_eXIf) != 0)
    +       (png_ptr->mode & PNG_WROTE_eXIf) != 0 ||
    +       exif == NULL)
           return;
     
        new_exif = png_voidcast(png_bytep, png_malloc_warn(png_ptr, num_exif));
    @@ -413,11 +414,12 @@ void PNGAPI
     png_set_hIST(png_const_structrp png_ptr, png_inforp info_ptr,
         png_const_uint_16p hist)
     {
    +   png_uint_16 safe_hist[PNG_MAX_PALETTE_LENGTH];
        int i;
     
        png_debug1(1, "in %s storage function", "hIST");
     
    -   if (png_ptr == NULL || info_ptr == NULL)
    +   if (png_ptr == NULL || info_ptr == NULL || hist == NULL)
           return;
     
        if (info_ptr->num_palette == 0 || info_ptr->num_palette
    @@ -429,6 +431,13 @@ png_set_hIST(png_const_structrp png_ptr, png_inforp info_ptr,
           return;
        }
     
    +   /* Snapshot the caller's hist before freeing, in case it points to
    +    * info_ptr->hist (getter-to-setter aliasing).
    +    */
    +   memcpy(safe_hist, hist, (unsigned int)info_ptr->num_palette *
    +       (sizeof (png_uint_16)));
    +   hist = safe_hist;
    +
        png_free_data(png_ptr, info_ptr, PNG_FREE_HIST, 0);
     
        /* Changed from info->num_palette to PNG_MAX_PALETTE_LENGTH in
    @@ -770,7 +779,7 @@ void PNGAPI
     png_set_PLTE(png_structrp png_ptr, png_inforp info_ptr,
         png_const_colorp palette, int num_palette)
     {
    -
    +   png_color safe_palette[PNG_MAX_PALETTE_LENGTH];
        png_uint_32 max_palette_length;
     
        png_debug1(1, "in %s storage function", "PLTE");
    @@ -804,28 +813,47 @@ png_set_PLTE(png_structrp png_ptr, png_inforp info_ptr,
           png_error(png_ptr, "Invalid palette");
        }
     
    -   /* It may not actually be necessary to set png_ptr->palette here;
    -    * we do it for backward compatibility with the way the png_handle_tRNS
    -    * function used to do the allocation.
    -    *
    -    * 1.6.0: the above statement appears to be incorrect; something has to set
    -    * the palette inside png_struct on read.
    +   /* Snapshot the caller's palette before freeing, in case it points to
    +    * info_ptr->palette (getter-to-setter aliasing).
         */
    +   if (num_palette > 0)
    +      memcpy(safe_palette, palette, (unsigned int)num_palette *
    +          (sizeof (png_color)));
    +
    +   palette = safe_palette;
    +
        png_free_data(png_ptr, info_ptr, PNG_FREE_PLTE, 0);
     
        /* Changed in libpng-1.2.1 to allocate PNG_MAX_PALETTE_LENGTH instead
         * of num_palette entries, in case of an invalid PNG file or incorrect
         * call to png_set_PLTE() with too-large sample values.
    +    *
    +    * Allocate independent buffers for info_ptr and png_ptr so that the
    +    * lifetime of png_ptr->palette is decoupled from the lifetime of
    +    * info_ptr->palette.  Previously, these two pointers were aliased,
    +    * which caused a use-after-free vulnerability if png_free_data freed
    +    * info_ptr->palette while png_ptr->palette was still in use by the
    +    * row transform functions (e.g. png_do_expand_palette).
    +    *
    +    * Both buffers are allocated with png_calloc to zero-fill, because
    +    * the ARM NEON palette riffle reads all 256 entries unconditionally,
    +    * regardless of num_palette.
         */
    +   png_free(png_ptr, png_ptr->palette);
        png_ptr->palette = png_voidcast(png_colorp, png_calloc(png_ptr,
            PNG_MAX_PALETTE_LENGTH * (sizeof (png_color))));
    +   info_ptr->palette = png_voidcast(png_colorp, png_calloc(png_ptr,
    +       PNG_MAX_PALETTE_LENGTH * (sizeof (png_color))));
    +   png_ptr->num_palette = info_ptr->num_palette = (png_uint_16)num_palette;
     
        if (num_palette > 0)
    +   {
    +      memcpy(info_ptr->palette, palette, (unsigned int)num_palette *
    +          (sizeof (png_color)));
           memcpy(png_ptr->palette, palette, (unsigned int)num_palette *
               (sizeof (png_color)));
    +   }
     
    -   info_ptr->palette = png_ptr->palette;
    -   info_ptr->num_palette = png_ptr->num_palette = (png_uint_16)num_palette;
        info_ptr->free_me |= PNG_FREE_PLTE;
        info_ptr->valid |= PNG_INFO_PLTE;
     }
    @@ -955,6 +983,7 @@ png_set_text_2(png_const_structrp png_ptr, png_inforp info_ptr,
         png_const_textp text_ptr, int num_text)
     {
        int i;
    +   png_textp old_text = NULL;
     
        png_debug1(1, "in text storage function, chunk typeid = 0x%lx",
           png_ptr == NULL ? 0xabadca11UL : (unsigned long)png_ptr->chunk_name);
    @@ -1002,7 +1031,10 @@ png_set_text_2(png_const_structrp png_ptr, png_inforp info_ptr,
              return 1;
           }
     
    -      png_free(png_ptr, info_ptr->text);
    +      /* Defer freeing the old array until after the copy loop below,
    +       * in case text_ptr aliases info_ptr->text (getter-to-setter).
    +       */
    +      old_text = info_ptr->text;
     
           info_ptr->text = new_text;
           info_ptr->free_me |= PNG_FREE_TEXT;
    @@ -1087,6 +1119,7 @@ png_set_text_2(png_const_structrp png_ptr, png_inforp info_ptr,
           {
              png_chunk_report(png_ptr, "text chunk: out of memory",
                  PNG_CHUNK_WRITE_ERROR);
    +         png_free(png_ptr, old_text);
     
              return 1;
           }
    @@ -1140,6 +1173,8 @@ png_set_text_2(png_const_structrp png_ptr, png_inforp info_ptr,
           png_debug1(3, "transferred text chunk %d", info_ptr->num_text);
        }
     
    +   png_free(png_ptr, old_text);
    +
        return 0;
     }
     #endif
    @@ -1183,28 +1218,50 @@ png_set_tRNS(png_structrp png_ptr, png_inforp info_ptr,
     
        if (trans_alpha != NULL)
        {
    -       /* It may not actually be necessary to set png_ptr->trans_alpha here;
    -        * we do it for backward compatibility with the way the png_handle_tRNS
    -        * function used to do the allocation.
    -        *
    -        * 1.6.0: The above statement is incorrect; png_handle_tRNS effectively
    -        * relies on png_set_tRNS storing the information in png_struct
    -        * (otherwise it won't be there for the code in pngrtran.c).
    +       /* Snapshot the caller's trans_alpha before freeing, in case it
    +        * points to info_ptr->trans_alpha (getter-to-setter aliasing).
             */
    +       png_byte safe_trans[PNG_MAX_PALETTE_LENGTH];
    +
    +       if (num_trans > 0 && num_trans <= PNG_MAX_PALETTE_LENGTH)
    +          memcpy(safe_trans, trans_alpha, (size_t)num_trans);
    +
    +       trans_alpha = safe_trans;
     
            png_free_data(png_ptr, info_ptr, PNG_FREE_TRNS, 0);
     
            if (num_trans > 0 && num_trans <= PNG_MAX_PALETTE_LENGTH)
            {
    -         /* Changed from num_trans to PNG_MAX_PALETTE_LENGTH in version 1.2.1 */
    +          /* Allocate info_ptr's copy of the transparency data.
    +           * Initialize all entries to fully opaque (0xff), then overwrite
    +           * the first num_trans entries with the actual values.
    +           */
               info_ptr->trans_alpha = png_voidcast(png_bytep,
                   png_malloc(png_ptr, PNG_MAX_PALETTE_LENGTH));
    +          memset(info_ptr->trans_alpha, 0xff, PNG_MAX_PALETTE_LENGTH);
               memcpy(info_ptr->trans_alpha, trans_alpha, (size_t)num_trans);
    -
               info_ptr->free_me |= PNG_FREE_TRNS;
               info_ptr->valid |= PNG_INFO_tRNS;
    +
    +          /* Allocate an independent copy for png_struct, so that the
    +           * lifetime of png_ptr->trans_alpha is decoupled from the
    +           * lifetime of info_ptr->trans_alpha.  Previously these two
    +           * pointers were aliased, which caused a use-after-free if
    +           * png_free_data freed info_ptr->trans_alpha while
    +           * png_ptr->trans_alpha was still in use by the row transform
    +           * functions (e.g. png_do_expand_palette).
    +           */
    +          png_free(png_ptr, png_ptr->trans_alpha);
    +          png_ptr->trans_alpha = png_voidcast(png_bytep,
    +              png_malloc(png_ptr, PNG_MAX_PALETTE_LENGTH));
    +          memset(png_ptr->trans_alpha, 0xff, PNG_MAX_PALETTE_LENGTH);
    +          memcpy(png_ptr->trans_alpha, trans_alpha, (size_t)num_trans);
    +       }
    +       else
    +       {
    +          png_free(png_ptr, png_ptr->trans_alpha);
    +          png_ptr->trans_alpha = NULL;
            }
    -       png_ptr->trans_alpha = info_ptr->trans_alpha;
        }
     
        if (trans_color != NULL)
    @@ -1255,6 +1312,7 @@ png_set_sPLT(png_const_structrp png_ptr,
      */
     {
        png_sPLT_tp np;
    +   png_sPLT_tp old_spalettes;
     
        png_debug1(1, "in %s storage function", "sPLT");
     
    @@ -1275,7 +1333,10 @@ png_set_sPLT(png_const_structrp png_ptr,
           return;
        }
     
    -   png_free(png_ptr, info_ptr->splt_palettes);
    +   /* Defer freeing the old array until after the copy loop below,
    +    * in case entries aliases info_ptr->splt_palettes (getter-to-setter).
    +    */
    +   old_spalettes = info_ptr->splt_palettes;
     
        info_ptr->splt_palettes = np;
        info_ptr->free_me |= PNG_FREE_SPLT;
    @@ -1339,6 +1400,8 @@ png_set_sPLT(png_const_structrp png_ptr,
        }
        while (--nentries);
     
    +   png_free(png_ptr, old_spalettes);
    +
        if (nentries > 0)
           png_chunk_report(png_ptr, "sPLT out of memory", PNG_CHUNK_WRITE_ERROR);
     }
    @@ -1387,6 +1450,7 @@ png_set_unknown_chunks(png_const_structrp png_ptr,
         png_inforp info_ptr, png_const_unknown_chunkp unknowns, int num_unknowns)
     {
        png_unknown_chunkp np;
    +   png_unknown_chunkp old_unknowns;
     
        if (png_ptr == NULL || info_ptr == NULL || num_unknowns <= 0 ||
            unknowns == NULL)
    @@ -1433,7 +1497,10 @@ png_set_unknown_chunks(png_const_structrp png_ptr,
           return;
        }
     
    -   png_free(png_ptr, info_ptr->unknown_chunks);
    +   /* Defer freeing the old array until after the copy loop below,
    +    * in case unknowns aliases info_ptr->unknown_chunks (getter-to-setter).
    +    */
    +   old_unknowns = info_ptr->unknown_chunks;
     
        info_ptr->unknown_chunks = np; /* safe because it is initialized */
        info_ptr->free_me |= PNG_FREE_UNKN;
    @@ -1479,6 +1546,8 @@ png_set_unknown_chunks(png_const_structrp png_ptr,
           ++np;
           ++(info_ptr->unknown_chunks_num);
        }
    +
    +   png_free(png_ptr, old_unknowns);
     }
     
     void PNGAPI
    @@ -1902,7 +1971,7 @@ png_set_benign_errors(png_structrp png_ptr, int allowed)
     #endif /* BENIGN_ERRORS */
     
     #ifdef PNG_CHECK_FOR_INVALID_INDEX_SUPPORTED
    -   /* Whether to report invalid palette index; added at libng-1.5.10.
    +   /* Whether to report invalid palette index; added at libpng-1.5.10.
         * It is possible for an indexed (color-type==3) PNG file to contain
         * pixels with invalid (out-of-range) indexes if the PLTE chunk has
         * fewer entries than the image's bit-depth would allow. We recover
    diff --git a/src/java.desktop/share/native/libsplashscreen/libpng/pngstruct.h b/src/java.desktop/share/native/libsplashscreen/libpng/pngstruct.h
    index 8edb4bc393a..f02365e8d8e 100644
    --- a/src/java.desktop/share/native/libsplashscreen/libpng/pngstruct.h
    +++ b/src/java.desktop/share/native/libsplashscreen/libpng/pngstruct.h
    @@ -29,7 +29,7 @@
      * However, the following notice accompanied the original version of this
      * file and, per its terms, should not be removed:
      *
    - * Copyright (c) 2018-2025 Cosmin Truta
    + * Copyright (c) 2018-2026 Cosmin Truta
      * Copyright (c) 1998-2002,2004,2006-2018 Glenn Randers-Pehrson
      * Copyright (c) 1996-1997 Andreas Dilger
      * Copyright (c) 1995-1996 Guy Eric Schalnat, Group 42, Inc.
    @@ -135,7 +135,7 @@ typedef enum
      * TODO: C23: convert these macros to C23 inlines (which are static).
      */
     #define png_chunk_flag_from_index(i) (0x80000000U >> (31 - (i)))
    -   /* The flag coresponding to the given png_index enum value.  This is defined
    +   /* The flag corresponding to the given png_index enum value.  This is defined
         * for png_unknown as well (until it reaches the value 32) but this should
         * not be relied on.
         */
    @@ -144,7 +144,7 @@ typedef enum
        (((png_ptr)->chunks & png_chunk_flag_from_index(i)) != 0)
        /* The chunk has been recorded in png_struct */
     
    -#define png_file_add_chunk(pnt_ptr, i)\
    +#define png_file_add_chunk(png_ptr, i)\
        ((void)((png_ptr)->chunks |= png_chunk_flag_from_index(i)))
        /* Record the chunk in the png_struct */
     
    diff --git a/src/java.desktop/share/native/libsplashscreen/libpng/pngtrans.c b/src/java.desktop/share/native/libsplashscreen/libpng/pngtrans.c
    index b9f6cb5d437..86ff2812e23 100644
    --- a/src/java.desktop/share/native/libsplashscreen/libpng/pngtrans.c
    +++ b/src/java.desktop/share/native/libsplashscreen/libpng/pngtrans.c
    @@ -29,7 +29,7 @@
      * However, the following notice accompanied the original version of this
      * file and, per its terms, should not be removed:
      *
    - * Copyright (c) 2018-2024 Cosmin Truta
    + * Copyright (c) 2018-2026 Cosmin Truta
      * Copyright (c) 1998-2002,2004,2006-2018 Glenn Randers-Pehrson
      * Copyright (c) 1996-1997 Andreas Dilger
      * Copyright (c) 1995-1996 Guy Eric Schalnat, Group 42, Inc.
    @@ -113,9 +113,38 @@ png_set_shift(png_structrp png_ptr, png_const_color_8p true_bits)
     {
        png_debug(1, "in png_set_shift");
     
    -   if (png_ptr == NULL)
    +   if (png_ptr == NULL || true_bits == NULL)
           return;
     
    +   /* Check the shift values before passing them on to png_do_shift. */
    +   {
    +      png_byte bit_depth = png_ptr->bit_depth;
    +      int invalid = 0;
    +
    +      if ((png_ptr->color_type & PNG_COLOR_MASK_COLOR) != 0)
    +      {
    +         if (true_bits->red == 0 || true_bits->red > bit_depth ||
    +             true_bits->green == 0 || true_bits->green > bit_depth ||
    +             true_bits->blue == 0 || true_bits->blue > bit_depth)
    +            invalid = 1;
    +      }
    +      else
    +      {
    +         if (true_bits->gray == 0 || true_bits->gray > bit_depth)
    +            invalid = 1;
    +      }
    +
    +      if ((png_ptr->color_type & PNG_COLOR_MASK_ALPHA) != 0 &&
    +          (true_bits->alpha == 0 || true_bits->alpha > bit_depth))
    +         invalid = 1;
    +
    +      if (invalid)
    +      {
    +         png_app_error(png_ptr, "png_set_shift: invalid shift values");
    +         return;
    +      }
    +   }
    +
        png_ptr->transformations |= PNG_SHIFT;
        png_ptr->shift = *true_bits;
     }
    @@ -486,10 +515,9 @@ png_do_packswap(png_row_infop row_info, png_bytep row)
     
        if (row_info->bit_depth < 8)
        {
    +      png_const_bytep table;
           png_bytep rp;
    -      png_const_bytep end, table;
    -
    -      end = row + row_info->rowbytes;
    +      png_bytep row_end = row + row_info->rowbytes;
     
           if (row_info->bit_depth == 1)
              table = onebppswaptable;
    @@ -503,7 +531,7 @@ png_do_packswap(png_row_infop row_info, png_bytep row)
           else
              return;
     
    -      for (rp = row; rp < end; rp++)
    +      for (rp = row; rp < row_end; rp++)
              *rp = table[*rp];
        }
     }
    diff --git a/src/java.desktop/unix/classes/sun/awt/X11/InfoWindow.java b/src/java.desktop/unix/classes/sun/awt/X11/InfoWindow.java
    index bab0f34f90f..c03eae62225 100644
    --- a/src/java.desktop/unix/classes/sun/awt/X11/InfoWindow.java
    +++ b/src/java.desktop/unix/classes/sun/awt/X11/InfoWindow.java
    @@ -1,5 +1,5 @@
     /*
    - * Copyright (c) 2009, 2025, Oracle and/or its affiliates. All rights reserved.
    + * Copyright (c) 2009, 2026, Oracle and/or its affiliates. All rights reserved.
      * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      *
      * This code is free software; you can redistribute it and/or modify it
    @@ -31,6 +31,7 @@ import java.awt.Color;
     import java.awt.Component;
     import java.awt.Container;
     import java.awt.Dimension;
    +import java.awt.EventQueue;
     import java.awt.Font;
     import java.awt.Frame;
     import java.awt.GridLayout;
    @@ -91,7 +92,7 @@ public abstract class InfoWindow extends Window {
         // Must be executed on EDT.
         @SuppressWarnings("deprecation")
         protected void show(Point corner, int indent) {
    -        assert SunToolkit.isDispatchThreadForAppContext(this);
    +        assert EventQueue.isDispatchThread();
     
             pack();
     
    @@ -464,7 +465,7 @@ public abstract class InfoWindow extends Window {
                         ActionEvent aev = new ActionEvent(target, ActionEvent.ACTION_PERFORMED,
                                                           liveArguments.getActionCommand(),
                                                           e.getWhen(), e.getModifiers());
    -                    XToolkit.postEvent(XToolkit.targetToAppContext(aev.getSource()), aev);
    +                    XToolkit.postEvent(aev);
                     }
                 }
             }
    diff --git a/src/java.desktop/unix/classes/sun/awt/X11/XBaseMenuWindow.java b/src/java.desktop/unix/classes/sun/awt/X11/XBaseMenuWindow.java
    index f8fcb30d8d8..9512d1d0351 100644
    --- a/src/java.desktop/unix/classes/sun/awt/X11/XBaseMenuWindow.java
    +++ b/src/java.desktop/unix/classes/sun/awt/X11/XBaseMenuWindow.java
    @@ -1,5 +1,5 @@
     /*
    - * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
    + * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
      * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      *
      * This code is free software; you can redistribute it and/or modify it
    @@ -114,8 +114,6 @@ public abstract class XBaseMenuWindow extends XWindow {
         protected Point grabInputPoint = null;
         protected boolean hasPointerMoved = false;
     
    -    private AppContext disposeAppContext;
    -
         /************************************************
          *
          * Mapping data
    @@ -175,8 +173,6 @@ public abstract class XBaseMenuWindow extends XWindow {
         XBaseMenuWindow() {
             super(new XCreateWindowParams(new Object[] {
                 DELAYED, Boolean.TRUE}));
    -
    -        disposeAppContext = AppContext.getAppContext();
         }
     
         /************************************************
    @@ -920,7 +916,7 @@ public abstract class XBaseMenuWindow extends XWindow {
         public void dispose() {
             setDisposed(true);
     
    -        SunToolkit.invokeLaterOnAppContext(disposeAppContext, new Runnable()  {
    +        SunToolkit.invokeLater(new Runnable()  {
                 public void run() {
                     doDispose();
                 }
    diff --git a/src/java.desktop/unix/classes/sun/awt/X11/XEmbedChildProxyPeer.java b/src/java.desktop/unix/classes/sun/awt/X11/XEmbedChildProxyPeer.java
    index efae47d6f23..03593761111 100644
    --- a/src/java.desktop/unix/classes/sun/awt/X11/XEmbedChildProxyPeer.java
    +++ b/src/java.desktop/unix/classes/sun/awt/X11/XEmbedChildProxyPeer.java
    @@ -1,5 +1,5 @@
     /*
    - * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
    + * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
      * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      *
      * This code is free software; you can redistribute it and/or modify it
    @@ -201,7 +201,7 @@ public final class XEmbedChildProxyPeer implements ComponentPeer, XEventDispatch
         public void                 updateCursorImmediately() {}
     
         void postEvent(AWTEvent event) {
    -        XToolkit.postEvent(XToolkit.targetToAppContext(proxy), event);
    +        XToolkit.postEvent(event);
         }
     
         boolean simulateMotifRequestFocus(Component lightweightChild, boolean temporary,
    @@ -323,9 +323,9 @@ public final class XEmbedChildProxyPeer implements ComponentPeer, XEventDispatch
         }
     
         void childResized() {
    -        XToolkit.postEvent(XToolkit.targetToAppContext(proxy), new ComponentEvent(proxy, ComponentEvent.COMPONENT_RESIZED));
    +        XToolkit.postEvent(new ComponentEvent(proxy, ComponentEvent.COMPONENT_RESIZED));
             container.childResized(proxy);
    -//         XToolkit.postEvent(XToolkit.targetToAppContext(proxy), new InvocationEvent(proxy, new Runnable() {
    +//         XToolkit.postEvent(new InvocationEvent(proxy, new Runnable() {
     //                 public void run() {
     //                     getTopLevel(proxy).invalidate();
     //                     getTopLevel(proxy).pack();
    diff --git a/src/java.desktop/unix/classes/sun/awt/X11/XTaskbarPeer.java b/src/java.desktop/unix/classes/sun/awt/X11/XTaskbarPeer.java
    index 7f0629e101e..cb80f0abd0e 100644
    --- a/src/java.desktop/unix/classes/sun/awt/X11/XTaskbarPeer.java
    +++ b/src/java.desktop/unix/classes/sun/awt/X11/XTaskbarPeer.java
    @@ -1,5 +1,5 @@
     /*
    - * Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
    + * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
      * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      *
      * This code is free software; you can redistribute it and/or modify it
    @@ -152,7 +152,7 @@ final class XTaskbarPeer implements TaskbarPeer {
                         mi.getActionCommand());
                 try {
                     XToolkit.awtLock();
    -                XToolkit.postEvent(XToolkit.targetToAppContext(ae.getSource()), ae);
    +                XToolkit.postEvent(ae);
                 } finally {
                     XToolkit.awtUnlock();
                 }
    diff --git a/src/java.desktop/unix/classes/sun/awt/X11/XToolkit.java b/src/java.desktop/unix/classes/sun/awt/X11/XToolkit.java
    index 78cd4a7e57d..1ec0039febd 100644
    --- a/src/java.desktop/unix/classes/sun/awt/X11/XToolkit.java
    +++ b/src/java.desktop/unix/classes/sun/awt/X11/XToolkit.java
    @@ -1,5 +1,5 @@
     /*
    - * Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
    + * Copyright (c) 2002, 2026, Oracle and/or its affiliates. All rights reserved.
      * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      *
      * This code is free software; you can redistribute it and/or modify it
    @@ -625,14 +625,8 @@ public final class XToolkit extends UNIXToolkit implements Runnable {
             while(true) {
                 // Fix for 6829923: we should gracefully handle toolkit thread interruption
                 if (Thread.currentThread().isInterrupted()) {
    -                // We expect interruption from the AppContext.dispose() method only.
                     // If the thread is interrupted from another place, let's skip it
    -                // for compatibility reasons. Probably some time later we'll remove
    -                // the check for AppContext.isDisposed() and will unconditionally
    -                // break the loop here.
    -                if (AppContext.getAppContext().isDisposed()) {
    -                    break;
    -                }
    +                // for compatibility reasons.
                 }
                 awtLock();
                 try {
    @@ -1770,7 +1764,11 @@ public final class XToolkit extends UNIXToolkit implements Runnable {
             final int altL = keysymToPrimaryKeycode(XKeySymConstants.XK_Alt_L);
             final int altR = keysymToPrimaryKeycode(XKeySymConstants.XK_Alt_R);
             final int numLock = keysymToPrimaryKeycode(XKeySymConstants.XK_Num_Lock);
    -        final int modeSwitch = keysymToPrimaryKeycode(XKeySymConstants.XK_Mode_switch);
    +        int modeSwitchTmp = keysymToPrimaryKeycode(XKeySymConstants.XK_Mode_switch);
    +        if (modeSwitchTmp == 0) {
    +            modeSwitchTmp = keysymToPrimaryKeycode(XKeySymConstants.XK_ISO_Level3_Shift);
    +        }
    +        final int modeSwitch = modeSwitchTmp;
             final int shiftLock = keysymToPrimaryKeycode(XKeySymConstants.XK_Shift_Lock);
             final int capsLock  = keysymToPrimaryKeycode(XKeySymConstants.XK_Caps_Lock);
     
    @@ -2050,14 +2048,6 @@ public final class XToolkit extends UNIXToolkit implements Runnable {
                    (exclusionType == Dialog.ModalExclusionType.TOOLKIT_EXCLUDE);
         }
     
    -    static EventQueue getEventQueue(Object target) {
    -        AppContext appContext = targetToAppContext(target);
    -        if (appContext != null) {
    -            return (EventQueue)appContext.get(AppContext.EVENT_QUEUE_KEY);
    -        }
    -        return null;
    -    }
    -
         static void removeSourceEvents(EventQueue queue,
                                        Object source,
                                        boolean removeAllEvents) {
    diff --git a/src/java.desktop/unix/classes/sun/awt/X11/XTrayIconPeer.java b/src/java.desktop/unix/classes/sun/awt/X11/XTrayIconPeer.java
    index 9f0ac241f5b..5ab97125991 100644
    --- a/src/java.desktop/unix/classes/sun/awt/X11/XTrayIconPeer.java
    +++ b/src/java.desktop/unix/classes/sun/awt/X11/XTrayIconPeer.java
    @@ -1,5 +1,5 @@
     /*
    - * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
    + * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
      * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      *
      * This code is free software; you can redistribute it and/or modify it
    @@ -269,7 +269,7 @@ public final class XTrayIconPeer implements TrayIconPeer,
     
         @Override
         public void dispose() {
    -        if (SunToolkit.isDispatchThreadForAppContext(target)) {
    +        if (EventQueue.isDispatchThread()) {
                 disposeOnEDT();
             } else {
                 try {
    @@ -329,7 +329,7 @@ public final class XTrayIconPeer implements TrayIconPeer,
                     }
                 };
     
    -        if (!SunToolkit.isDispatchThreadForAppContext(target)) {
    +        if (!EventQueue.isDispatchThread()) {
                 SunToolkit.executeOnEventHandlerThread(target, r);
             } else {
                 r.run();
    @@ -355,7 +355,7 @@ public final class XTrayIconPeer implements TrayIconPeer,
             if (isDisposed())
                 return;
     
    -        assert SunToolkit.isDispatchThreadForAppContext(target);
    +        assert EventQueue.isDispatchThread();
     
             PopupMenu newPopup = target.getPopupMenu();
             if (popup != newPopup) {
    @@ -476,7 +476,7 @@ public final class XTrayIconPeer implements TrayIconPeer,
                 // other class tries to cast source field to Component).
                 // We already filter DRAG events out (CR 6565779).
                 e.setSource(xtiPeer.target);
    -            XToolkit.postEvent(XToolkit.targetToAppContext(e.getSource()), e);
    +            XToolkit.postEvent(e);
             }
             @Override
             @SuppressWarnings("deprecation")
    @@ -487,7 +487,7 @@ public final class XTrayIconPeer implements TrayIconPeer,
                     ActionEvent aev = new ActionEvent(xtiPeer.target, ActionEvent.ACTION_PERFORMED,
                                                       xtiPeer.target.getActionCommand(), e.getWhen(),
                                                       e.getModifiers());
    -                XToolkit.postEvent(XToolkit.targetToAppContext(aev.getSource()), aev);
    +                XToolkit.postEvent(aev);
                 }
                 if (xtiPeer.balloon.isVisible()) {
                     xtiPeer.balloon.hide();
    diff --git a/src/java.desktop/unix/classes/sun/awt/X11InputMethodBase.java b/src/java.desktop/unix/classes/sun/awt/X11InputMethodBase.java
    index d9eaa9629d1..3aa1624a1dd 100644
    --- a/src/java.desktop/unix/classes/sun/awt/X11InputMethodBase.java
    +++ b/src/java.desktop/unix/classes/sun/awt/X11InputMethodBase.java
    @@ -1,5 +1,5 @@
     /*
    - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
    + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
      * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      *
      * This code is free software; you can redistribute it and/or modify it
    @@ -434,7 +434,7 @@ public abstract class X11InputMethodBase extends InputMethodAdapter {
             if (source != null) {
                 InputMethodEvent event = new InputMethodEvent(source,
                     id, when, text, committedCharacterCount, caret, visiblePosition);
    -            SunToolkit.postEvent(SunToolkit.targetToAppContext(source), (AWTEvent)event);
    +            SunToolkit.postEvent((AWTEvent)event);
             }
         }
     
    diff --git a/src/java.desktop/unix/native/common/awt/awt_Component.h b/src/java.desktop/unix/native/common/awt/awt_Component.h
    index 089691a8a4d..7ef0a0de94b 100644
    --- a/src/java.desktop/unix/native/common/awt/awt_Component.h
    +++ b/src/java.desktop/unix/native/common/awt/awt_Component.h
    @@ -1,5 +1,5 @@
     /*
    - * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
    + * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved.
      * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      *
      * This code is free software; you can redistribute it and/or modify it
    @@ -38,7 +38,6 @@ struct ComponentIDs {
         jfieldID graphicsConfig;
         jfieldID name;
         jfieldID isProxyActive;
    -    jfieldID appContext;
         jmethodID getParent;
         jmethodID getLocationOnScreen;
     };
    diff --git a/src/java.desktop/unix/native/common/java2d/opengl/GLXSurfaceData.c b/src/java.desktop/unix/native/common/java2d/opengl/GLXSurfaceData.c
    index 8f264278d9f..0497dbf69fa 100644
    --- a/src/java.desktop/unix/native/common/java2d/opengl/GLXSurfaceData.c
    +++ b/src/java.desktop/unix/native/common/java2d/opengl/GLXSurfaceData.c
    @@ -1,5 +1,5 @@
     /*
    - * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
    + * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
      * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      *
      * This code is free software; you can redistribute it and/or modify it
    @@ -40,6 +40,8 @@
     
     #ifndef HEADLESS
     
    +#include 
    +
     extern LockFunc       OGLSD_Lock;
     extern GetRasInfoFunc OGLSD_GetRasInfo;
     extern UnlockFunc     OGLSD_Unlock;
    @@ -50,6 +52,74 @@ extern void
     
     jboolean surfaceCreationFailed = JNI_FALSE;
     
    +/**
    + * Per-Window GLXWindow entry with reference counting.
    + * Stored in an XContext keyed by the X Window XID.
    + */
    +typedef struct {
    +    GLXWindow glxWindow;
    +    int       refCount;
    +} GLXWindowRef;
    +
    +static XContext glxWindowContext;
    +
    +/**
    + * Gets or creates a shared GLXWindow for the given X Window.
    + * All callers are synchronized by the AWT lock.
    + */
    +static GLXWindow acquireGLXWindow(Window window, GLXFBConfig fbconfig)
    +{
    +    if (glxWindowContext == 0) {
    +        glxWindowContext = XUniqueContext();
    +    }
    +
    +    XPointer data;
    +    if (XFindContext(awt_display, window, glxWindowContext, &data) == 0) {
    +        GLXWindowRef *ref = (GLXWindowRef *)data;
    +        ref->refCount++;
    +        return ref->glxWindow;
    +    }
    +
    +    GLXWindow glxWin = j2d_glXCreateWindow(awt_display, fbconfig, window, NULL);
    +    if (glxWin == 0) {
    +        return 0;
    +    }
    +
    +    GLXWindowRef *ref = malloc(sizeof(*ref));
    +    if (ref == NULL) {
    +        j2d_glXDestroyWindow(awt_display, glxWin);
    +        return 0;
    +    }
    +    ref->glxWindow = glxWin;
    +    ref->refCount = 1;
    +    if (XSaveContext(awt_display, window, glxWindowContext, (XPointer)ref) != 0)
    +    {
    +        j2d_glXDestroyWindow(awt_display, glxWin);
    +        free(ref);
    +        return 0;
    +    }
    +    return glxWin;
    +}
    +
    +/**
    + * Decrements the reference count for the GLXWindow associated with the given
    + * X Window. Destroys it when the count reaches zero.
    + * All callers are synchronized by the AWT lock.
    + */
    +static void releaseGLXWindow(Window window)
    +{
    +    XPointer data;
    +    if (XFindContext(awt_display, window, glxWindowContext, &data) != 0) {
    +        return;
    +    }
    +    GLXWindowRef *ref = (GLXWindowRef *)data;
    +    if (--ref->refCount <= 0) {
    +        j2d_glXDestroyWindow(awt_display, ref->glxWindow);
    +        XDeleteContext(awt_display, window, glxWindowContext);
    +        free(ref);
    +    }
    +}
    +
     #endif /* !HEADLESS */
     
     JNIEXPORT void JNICALL
    @@ -74,7 +144,7 @@ Java_sun_java2d_opengl_GLXSurfaceData_initOps(JNIEnv *env, jobject glxsd,
         // later the graphicsConfig will be used for deallocation of oglsdo
         oglsdo->graphicsConfig = gc;
     
    -    GLXSDOps *glxsdo = (GLXSDOps *)malloc(sizeof(GLXSDOps));
    +    GLXSDOps *glxsdo = (GLXSDOps *)calloc(1, sizeof(GLXSDOps));
     
         if (glxsdo == NULL) {
             JNU_ThrowOutOfMemoryError(env, "creating native GLX ops");
    @@ -125,8 +195,13 @@ Java_sun_java2d_opengl_GLXSurfaceData_initOps(JNIEnv *env, jobject glxsd,
     void
     OGLSD_DestroyOGLSurface(JNIEnv *env, OGLSDOps *oglsdo)
     {
    +    GLXSDOps *glxsdo = (GLXSDOps *)oglsdo->privOps;
         J2dTraceLn(J2D_TRACE_INFO, "OGLSD_DestroyOGLSurface");
    -    // X Window is free'd later by AWT code...
    +    if (glxsdo != NULL && glxsdo->drawable != 0) {
    +        releaseGLXWindow(glxsdo->window);
    +        glxsdo->drawable = 0;
    +        oglsdo->drawableType = OGLSD_UNDEFINED;
    +    }
     }
     
     /**
    @@ -296,6 +371,13 @@ OGLSD_InitOGLWindow(JNIEnv *env, OGLSDOps *oglsdo)
             return JNI_FALSE;
         }
     
    +    glxsdo->drawable = acquireGLXWindow(window,
    +                                        glxsdo->configData->glxInfo->fbconfig);
    +    if (glxsdo->drawable == 0) {
    +        J2dRlsTraceLn(J2D_TRACE_ERROR, "OGLSD_InitOGLWindow: GLXWindow is 0");
    +        return JNI_FALSE;
    +    }
    +
         XGetWindowAttributes(awt_display, window, &attr);
         oglsdo->width = attr.width;
         oglsdo->height = attr.height;
    @@ -304,7 +386,6 @@ OGLSD_InitOGLWindow(JNIEnv *env, OGLSDOps *oglsdo)
         oglsdo->isOpaque = JNI_TRUE;
         oglsdo->xOffset = 0;
         oglsdo->yOffset = 0;
    -    glxsdo->drawable = window;
         glxsdo->xdrawable = window;
     
         J2dTraceLn(J2D_TRACE_VERBOSE, "  created window: w=%d h=%d",
    @@ -333,7 +414,16 @@ OGLSD_SwapBuffers(JNIEnv *env, jlong window)
             return;
         }
     
    -    j2d_glXSwapBuffers(awt_display, (Window)window);
    +    XPointer data;
    +    if (XFindContext(awt_display, (Window)window, glxWindowContext, &data) != 0)
    +    {
    +        J2dRlsTraceLn(J2D_TRACE_ERROR,
    +                      "OGLSD_SwapBuffers: GLXWindow not found");
    +        return;
    +    }
    +
    +    GLXWindowRef *ref = (GLXWindowRef *)data;
    +    j2d_glXSwapBuffers(awt_display, ref->glxWindow);
     }
     
     // needed by Mac OS X port, no-op on other platforms
    diff --git a/src/java.desktop/unix/native/libawt/awt/awt_LoadLibrary.c b/src/java.desktop/unix/native/libawt/awt/awt_LoadLibrary.c
    index cd07c347c9e..af50fdbb5c0 100644
    --- a/src/java.desktop/unix/native/libawt/awt/awt_LoadLibrary.c
    +++ b/src/java.desktop/unix/native/libawt/awt/awt_LoadLibrary.c
    @@ -1,5 +1,5 @@
     /*
    - * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
    + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
      * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      *
      * This code is free software; you can redistribute it and/or modify it
    @@ -36,7 +36,7 @@
     #include 
     
     #ifdef AIX
    -#include "porting_aix.h" /* For the 'dladdr' function. */
    +#define dladdr JVM_dladdr
     #endif
     
     #ifdef DEBUG
    diff --git a/src/java.desktop/unix/native/libawt_xawt/awt/swing_GTKEngine.c b/src/java.desktop/unix/native/libawt_xawt/awt/swing_GTKEngine.c
    index 3b7b2880316..1c29e5168ba 100644
    --- a/src/java.desktop/unix/native/libawt_xawt/awt/swing_GTKEngine.c
    +++ b/src/java.desktop/unix/native/libawt_xawt/awt/swing_GTKEngine.c
    @@ -1,5 +1,5 @@
     /*
    - * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved.
    + * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
      * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      *
      * This code is free software; you can redistribute it and/or modify it
    @@ -400,5 +400,5 @@ Java_com_sun_java_swing_plaf_gtk_GTKLookAndFeel_applyThemeIfNeeded(JNIEnv *env,
         const gboolean result = gtk->apply_theme_if_needed();
         gtk->gdk_threads_leave();
     
    -    return result;
    +    return result ? JNI_TRUE : JNI_FALSE;
     }
    diff --git a/src/java.desktop/unix/native/libawt_xawt/xawt/XToolkit.c b/src/java.desktop/unix/native/libawt_xawt/xawt/XToolkit.c
    index d5f3130386d..c84aa4ff8f9 100644
    --- a/src/java.desktop/unix/native/libawt_xawt/xawt/XToolkit.c
    +++ b/src/java.desktop/unix/native/libawt_xawt/xawt/XToolkit.c
    @@ -1,5 +1,5 @@
     /*
    - * Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
    + * Copyright (c) 2002, 2026, Oracle and/or its affiliates. All rights reserved.
      * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      *
      * This code is free software; you can redistribute it and/or modify it
    @@ -195,10 +195,6 @@ Java_java_awt_Component_initIDs
                                "Z");
         CHECK_NULL(componentIDs.isProxyActive);
     
    -    componentIDs.appContext =
    -        (*env)->GetFieldID(env, cls, "appContext",
    -                           "Lsun/awt/AppContext;");
    -
         (*env)->DeleteLocalRef(env, keyclass);
     }
     
    diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsBorders.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsBorders.java
    index 81766db8116..572e9e8c117 100644
    --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsBorders.java
    +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsBorders.java
    @@ -1,5 +1,5 @@
     /*
    - * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
    + * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved.
      * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      *
      * This code is free software; you can redistribute it and/or modify it
    @@ -115,7 +115,7 @@ public final class WindowsBorders {
         }
     
         @SuppressWarnings("serial") // Superclass is not serializable across versions
    -    public static final class ProgressBarBorder extends AbstractBorder implements UIResource {
    +    public static class ProgressBarBorder extends AbstractBorder implements UIResource {
             protected Color shadow;
             protected Color highlight;
     
    @@ -148,7 +148,7 @@ public final class WindowsBorders {
          * @since 1.4
          */
         @SuppressWarnings("serial") // Superclass is not serializable across versions
    -    public static final class ToolBarBorder extends AbstractBorder implements UIResource, SwingConstants {
    +    public static class ToolBarBorder extends AbstractBorder implements UIResource, SwingConstants {
             protected Color shadow;
             protected Color highlight;
     
    @@ -308,7 +308,7 @@ public final class WindowsBorders {
          * @since 1.4
          */
         @SuppressWarnings("serial") // Superclass is not serializable across versions
    -    public static final class InternalFrameLineBorder extends LineBorder implements
    +    public static class InternalFrameLineBorder extends LineBorder implements
                 UIResource {
             protected Color activeColor;
             protected Color inactiveColor;
    diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsButtonUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsButtonUI.java
    index ee07276aa30..63e99e0804a 100644
    --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsButtonUI.java
    +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsButtonUI.java
    @@ -56,7 +56,7 @@ import static com.sun.java.swing.plaf.windows.XPStyle.Skin;
      *
      * @author Jeff Dinkins
      */
    -public final class WindowsButtonUI extends BasicButtonUI
    +public class WindowsButtonUI extends BasicButtonUI
     {
         protected int dashedRectGapX;
         protected int dashedRectGapY;
    diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsCheckBoxMenuItemUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsCheckBoxMenuItemUI.java
    index 3a2578b3e0b..47e311486ba 100644
    --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsCheckBoxMenuItemUI.java
    +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsCheckBoxMenuItemUI.java
    @@ -1,5 +1,5 @@
     /*
    - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
    + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
      * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      *
      * This code is free software; you can redistribute it and/or modify it
    @@ -42,7 +42,7 @@ import com.sun.java.swing.plaf.windows.TMSchema.State;
     /**
      * Windows check box menu item.
      */
    -public final class WindowsCheckBoxMenuItemUI extends BasicCheckBoxMenuItemUI {
    +public class WindowsCheckBoxMenuItemUI extends BasicCheckBoxMenuItemUI {
     
         final WindowsMenuItemUIAccessor accessor =
             new WindowsMenuItemUIAccessor() {
    diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsCheckBoxUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsCheckBoxUI.java
    index 7cb2490fd76..d264393f4d8 100644
    --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsCheckBoxUI.java
    +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsCheckBoxUI.java
    @@ -35,7 +35,7 @@ import javax.swing.plaf.ComponentUI;
      *
      * @author Jeff Dinkins
      */
    -public final class WindowsCheckBoxUI extends WindowsRadioButtonUI
    +public class WindowsCheckBoxUI extends WindowsRadioButtonUI
     {
         // NOTE: WindowsCheckBoxUI inherits from WindowsRadioButtonUI instead
         // of BasicCheckBoxUI because we want to pick up all the
    diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsClassicLookAndFeel.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsClassicLookAndFeel.java
    index 5716a875cd7..802b5f66888 100644
    --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsClassicLookAndFeel.java
    +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsClassicLookAndFeel.java
    @@ -1,5 +1,5 @@
     /*
    - * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
    + * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
      * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      *
      * This code is free software; you can redistribute it and/or modify it
    @@ -31,7 +31,7 @@ package com.sun.java.swing.plaf.windows;
      * @since 1.5
      */
     @SuppressWarnings("serial") // Superclass is not serializable across versions
    -public final class WindowsClassicLookAndFeel extends WindowsLookAndFeel {
    +public class WindowsClassicLookAndFeel extends WindowsLookAndFeel {
         @Override
         public String getName() {
             return "Windows Classic";
    diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsComboBoxUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsComboBoxUI.java
    index fdc9b03ae7d..8717fd715ea 100644
    --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsComboBoxUI.java
    +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsComboBoxUI.java
    @@ -1,5 +1,5 @@
     /*
    - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
    + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
      * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      *
      * This code is free software; you can redistribute it and/or modify it
    @@ -75,7 +75,7 @@ import static com.sun.java.swing.plaf.windows.XPStyle.Skin;
      * @author Tom Santos
      * @author Igor Kushnirskiy
      */
    -public final class WindowsComboBoxUI extends BasicComboBoxUI {
    +public class WindowsComboBoxUI extends BasicComboBoxUI {
     
         private static final MouseListener rolloverListener =
             new MouseAdapter() {
    @@ -532,7 +532,7 @@ public final class WindowsComboBoxUI extends BasicComboBoxUI {
         }
     
         @SuppressWarnings("serial") // Same-version serialization only
    -    protected final class WinComboPopUp extends BasicComboPopup {
    +    protected class WinComboPopUp extends BasicComboPopup {
             private Skin listBoxBorder = null;
             private XPStyle xp;
     
    @@ -550,7 +550,7 @@ public final class WindowsComboBoxUI extends BasicComboBoxUI {
                 return new InvocationKeyHandler();
             }
     
    -        protected final class InvocationKeyHandler extends BasicComboPopup.InvocationKeyHandler {
    +        protected class InvocationKeyHandler extends BasicComboPopup.InvocationKeyHandler {
                 protected InvocationKeyHandler() {
                     WinComboPopUp.this.super();
                 }
    @@ -570,7 +570,7 @@ public final class WindowsComboBoxUI extends BasicComboBoxUI {
         /**
          * Subclassed to highlight selected item in an editable combo box.
          */
    -    public static final class WindowsComboBoxEditor
    +    public static class WindowsComboBoxEditor
             extends BasicComboBoxEditor.UIResource {
     
             /**
    diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsDesktopIconUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsDesktopIconUI.java
    index 47ecdf5747b..2cebb050396 100644
    --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsDesktopIconUI.java
    +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsDesktopIconUI.java
    @@ -1,5 +1,5 @@
     /*
    - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
    + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
      * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      *
      * This code is free software; you can redistribute it and/or modify it
    @@ -36,7 +36,7 @@ import javax.swing.plaf.basic.BasicDesktopIconUI;
     /**
      * Windows icon for a minimized window on the desktop.
      */
    -public final class WindowsDesktopIconUI extends BasicDesktopIconUI {
    +public class WindowsDesktopIconUI extends BasicDesktopIconUI {
         private int width;
     
         public static ComponentUI createUI(JComponent c) {
    diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsDesktopManager.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsDesktopManager.java
    index 79d81bad089..ae081a7690c 100644
    --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsDesktopManager.java
    +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsDesktopManager.java
    @@ -1,5 +1,5 @@
     /*
    - * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
    + * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved.
      * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      *
      * This code is free software; you can redistribute it and/or modify it
    @@ -52,7 +52,7 @@ import java.lang.ref.WeakReference;
      * @author Thomas Ball
      */
     @SuppressWarnings("serial") // JDK-implementation class
    -public final class WindowsDesktopManager extends DefaultDesktopManager
    +public class WindowsDesktopManager extends DefaultDesktopManager
             implements java.io.Serializable, javax.swing.plaf.UIResource {
     
         /* The frame which is currently selected/activated.
    diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsDesktopPaneUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsDesktopPaneUI.java
    index dabbe3fb992..4a3f0ec38b1 100644
    --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsDesktopPaneUI.java
    +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsDesktopPaneUI.java
    @@ -1,5 +1,5 @@
     /*
    - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
    + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
      * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      *
      * This code is free software; you can redistribute it and/or modify it
    @@ -34,7 +34,7 @@ import javax.swing.plaf.basic.BasicDesktopPaneUI;
      *
      * @author David Kloba
      */
    -public final class WindowsDesktopPaneUI extends BasicDesktopPaneUI
    +public class WindowsDesktopPaneUI extends BasicDesktopPaneUI
     {
         public static ComponentUI createUI(JComponent c) {
             return new WindowsDesktopPaneUI();
    diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsEditorPaneUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsEditorPaneUI.java
    index 44cb0e9634c..ea21b41c619 100644
    --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsEditorPaneUI.java
    +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsEditorPaneUI.java
    @@ -1,5 +1,5 @@
     /*
    - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
    + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
      * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      *
      * This code is free software; you can redistribute it and/or modify it
    @@ -33,7 +33,7 @@ import javax.swing.text.Caret;
     /**
      * Windows rendition of the component.
      */
    -public final class WindowsEditorPaneUI extends BasicEditorPaneUI
    +public class WindowsEditorPaneUI extends BasicEditorPaneUI
     {
     
         /**
    diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsFileChooserUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsFileChooserUI.java
    index 08c01760be9..86c40ea70d6 100644
    --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsFileChooserUI.java
    +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsFileChooserUI.java
    @@ -1,5 +1,5 @@
     /*
    - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
    + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
      * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      *
      * This code is free software; you can redistribute it and/or modify it
    @@ -101,7 +101,7 @@ import sun.swing.WindowsPlacesBar;
      *
      * @author Jeff Dinkins
      */
    -public final class WindowsFileChooserUI extends BasicFileChooserUI {
    +public class WindowsFileChooserUI extends BasicFileChooserUI {
     
         // The following are private because the implementation of the
         // Windows FileChooser L&F is not complete yet.
    @@ -1122,7 +1122,7 @@ public final class WindowsFileChooserUI extends BasicFileChooserUI {
          * Data model for a type-face selection combo-box.
          */
         @SuppressWarnings("serial") // Superclass is not serializable across versions
    -    protected final class DirectoryComboBoxModel extends AbstractListModel implements ComboBoxModel {
    +    protected class DirectoryComboBoxModel extends AbstractListModel implements ComboBoxModel {
             Vector directories = new Vector();
             int[] depths = null;
             File selectedDirectory = null;
    @@ -1252,7 +1252,7 @@ public final class WindowsFileChooserUI extends BasicFileChooserUI {
          * Render different type sizes and styles.
          */
         @SuppressWarnings("serial") // Superclass is not serializable across versions
    -    public final class FilterComboBoxRenderer extends DefaultListCellRenderer {
    +    public class FilterComboBoxRenderer extends DefaultListCellRenderer {
             @Override
             public Component getListCellRendererComponent(JList list,
                 Object value, int index, boolean isSelected,
    @@ -1279,7 +1279,7 @@ public final class WindowsFileChooserUI extends BasicFileChooserUI {
          * Data model for a type-face selection combo-box.
          */
         @SuppressWarnings("serial") // Superclass is not serializable across versions
    -    protected final class FilterComboBoxModel extends AbstractListModel implements ComboBoxModel,
    +    protected class FilterComboBoxModel extends AbstractListModel implements ComboBoxModel,
                 PropertyChangeListener {
             protected FileFilter[] filters;
             protected FilterComboBoxModel() {
    @@ -1362,7 +1362,7 @@ public final class WindowsFileChooserUI extends BasicFileChooserUI {
         /**
          * Acts when DirectoryComboBox has changed the selected item.
          */
    -    protected final class DirectoryComboBoxAction implements ActionListener {
    +    protected class DirectoryComboBoxAction implements ActionListener {
     
     
     
    @@ -1387,7 +1387,7 @@ public final class WindowsFileChooserUI extends BasicFileChooserUI {
         // ***********************
         // * FileView operations *
         // ***********************
    -    protected final class WindowsFileView extends BasicFileView {
    +    protected class WindowsFileView extends BasicFileView {
             /* FileView type descriptions */
     
             @Override
    diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsInternalFrameTitlePane.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsInternalFrameTitlePane.java
    index ba4bde12122..029e139fe8f 100644
    --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsInternalFrameTitlePane.java
    +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsInternalFrameTitlePane.java
    @@ -1,5 +1,5 @@
     /*
    - * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
    + * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
      * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      *
      * This code is free software; you can redistribute it and/or modify it
    @@ -418,7 +418,7 @@ public class WindowsInternalFrameTitlePane extends BasicInternalFrameTitlePane {
             return new WindowsTitlePaneLayout();
         }
     
    -    public final class WindowsTitlePaneLayout extends BasicInternalFrameTitlePane.TitlePaneLayout {
    +    public class WindowsTitlePaneLayout extends BasicInternalFrameTitlePane.TitlePaneLayout {
             private Insets captionMargin = null;
             private Insets contentMargin = null;
             private XPStyle xp = XPStyle.getXP();
    @@ -506,7 +506,7 @@ public class WindowsInternalFrameTitlePane extends BasicInternalFrameTitlePane {
             }
         } // end WindowsTitlePaneLayout
     
    -    public final class WindowsPropertyChangeHandler extends PropertyChangeHandler {
    +    public class WindowsPropertyChangeHandler extends PropertyChangeHandler {
             @Override
             public void propertyChange(PropertyChangeEvent evt) {
                 String prop = evt.getPropertyName();
    @@ -530,7 +530,7 @@ public class WindowsInternalFrameTitlePane extends BasicInternalFrameTitlePane {
          * 

    * Note: We assume here that icons are square. */ - public static final class ScalableIconUIResource implements Icon, UIResource { + public static class ScalableIconUIResource implements Icon, UIResource { // We can use an arbitrary size here because we scale to it in paintIcon() private static final int SIZE = 16; diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsInternalFrameUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsInternalFrameUI.java index 9db31ba38f9..6e76ac6a5b4 100644 --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsInternalFrameUI.java +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsInternalFrameUI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,7 +45,7 @@ import static com.sun.java.swing.plaf.windows.XPStyle.Skin; /** * Windows rendition of the component. */ -public final class WindowsInternalFrameUI extends BasicInternalFrameUI +public class WindowsInternalFrameUI extends BasicInternalFrameUI { XPStyle xp = XPStyle.getXP(); diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsLabelUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsLabelUI.java index 4283f743b97..c910b635491 100644 --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsLabelUI.java +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsLabelUI.java @@ -40,7 +40,7 @@ import sun.swing.SwingUtilities2; /** * Windows rendition of the component. */ -public final class WindowsLabelUI extends BasicLabelUI { +public class WindowsLabelUI extends BasicLabelUI { private static final ComponentUI UI = new WindowsLabelUI(); diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsMenuBarUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsMenuBarUI.java index 3d3cf5feee7..ac26dcbf425 100644 --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsMenuBarUI.java +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsMenuBarUI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -57,7 +57,7 @@ import sun.swing.MnemonicHandler; /** * Windows rendition of the component. */ -public final class WindowsMenuBarUI extends BasicMenuBarUI +public class WindowsMenuBarUI extends BasicMenuBarUI { /* to be accessed on the EDT only */ private WindowListener windowListener = null; diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsMenuItemUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsMenuItemUI.java index d15bc93a628..d50540588fb 100644 --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsMenuItemUI.java +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsMenuItemUI.java @@ -58,7 +58,7 @@ import sun.swing.SwingUtilities2; * * @author Igor Kushnirskiy */ -public final class WindowsMenuItemUI extends BasicMenuItemUI { +public class WindowsMenuItemUI extends BasicMenuItemUI { /** * The instance of {@code PropertyChangeListener}. */ diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsMenuUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsMenuUI.java index a7aca0c5ccf..78028db7c00 100644 --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsMenuUI.java +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsMenuUI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -50,9 +50,9 @@ import com.sun.java.swing.plaf.windows.TMSchema.State; /** * Windows rendition of the component. */ -public final class WindowsMenuUI extends BasicMenuUI { - private Integer menuBarHeight; - private boolean hotTrackingOn; +public class WindowsMenuUI extends BasicMenuUI { + protected Integer menuBarHeight; + protected boolean hotTrackingOn; final WindowsMenuItemUIAccessor accessor = new WindowsMenuItemUIAccessor() { @@ -283,7 +283,7 @@ public final class WindowsMenuUI extends BasicMenuUI { * true when the mouse enters the menu and false when it exits. * @since 1.4 */ - protected final class WindowsMouseInputHandler extends BasicMenuUI.MouseInputHandler { + protected class WindowsMouseInputHandler extends BasicMenuUI.MouseInputHandler { @Override public void mouseEntered(MouseEvent evt) { super.mouseEntered(evt); diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsOptionPaneUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsOptionPaneUI.java index 05c1b177705..3bed1856a55 100644 --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsOptionPaneUI.java +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsOptionPaneUI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,5 +30,5 @@ import javax.swing.plaf.basic.BasicOptionPaneUI; /** * Windows rendition of the component. */ -public final class WindowsOptionPaneUI extends BasicOptionPaneUI { +public class WindowsOptionPaneUI extends BasicOptionPaneUI { } diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsPasswordFieldUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsPasswordFieldUI.java index 6adf6e402ec..0c30b291648 100644 --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsPasswordFieldUI.java +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsPasswordFieldUI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,7 +33,7 @@ import javax.swing.text.Caret; /** * Windows rendition of the component. */ -public final class WindowsPasswordFieldUI extends BasicPasswordFieldUI { +public class WindowsPasswordFieldUI extends BasicPasswordFieldUI { /** * Creates a UI for a JPasswordField diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsPopupMenuSeparatorUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsPopupMenuSeparatorUI.java index 576549ae482..f236c6b14fc 100644 --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsPopupMenuSeparatorUI.java +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsPopupMenuSeparatorUI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2004, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,7 +42,7 @@ import com.sun.java.swing.plaf.windows.XPStyle.Skin; * @author Igor Kushnirskiy */ -public final class WindowsPopupMenuSeparatorUI extends BasicPopupMenuSeparatorUI { +public class WindowsPopupMenuSeparatorUI extends BasicPopupMenuSeparatorUI { public static ComponentUI createUI(JComponent c) { return new WindowsPopupMenuSeparatorUI(); diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsPopupMenuUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsPopupMenuUI.java index 1361286df4a..1c85cfebd94 100644 --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsPopupMenuUI.java +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsPopupMenuUI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -57,7 +57,7 @@ import static sun.swing.SwingUtilities2.BASICMENUITEMUI_MAX_TEXT_OFFSET; * * @author Igor Kushnirskiy */ -public final class WindowsPopupMenuUI extends BasicPopupMenuUI { +public class WindowsPopupMenuUI extends BasicPopupMenuUI { static MnemonicListener mnemonicListener = null; static final Object GUTTER_OFFSET_KEY = diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsProgressBarUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsProgressBarUI.java index 9cc7d277ff1..5440b98cd1b 100644 --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsProgressBarUI.java +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsProgressBarUI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -51,7 +51,7 @@ import static com.sun.java.swing.plaf.windows.XPStyle.Skin; * * @author Michael C. Albers */ -public final class WindowsProgressBarUI extends BasicProgressBarUI +public class WindowsProgressBarUI extends BasicProgressBarUI { private Rectangle previousFullBox; diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsRadioButtonMenuItemUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsRadioButtonMenuItemUI.java index 78768c29ab3..2ec78341c2a 100644 --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsRadioButtonMenuItemUI.java +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsRadioButtonMenuItemUI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,7 +42,7 @@ import com.sun.java.swing.plaf.windows.TMSchema.State; /** * Windows rendition of the component. */ -public final class WindowsRadioButtonMenuItemUI extends BasicRadioButtonMenuItemUI { +public class WindowsRadioButtonMenuItemUI extends BasicRadioButtonMenuItemUI { final WindowsMenuItemUIAccessor accessor = new WindowsMenuItemUIAccessor() { diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsRootPaneUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsRootPaneUI.java index 5e08dcf5605..d41fd9421e4 100644 --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsRootPaneUI.java +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsRootPaneUI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -70,7 +70,7 @@ import sun.swing.MnemonicHandler; * @author Mark Davidson * @since 1.4 */ -public final class WindowsRootPaneUI extends BasicRootPaneUI { +public class WindowsRootPaneUI extends BasicRootPaneUI { private static final WindowsRootPaneUI windowsRootPaneUI = new WindowsRootPaneUI(); static final AltProcessor altProcessor = new AltProcessor(); diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsScrollBarUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsScrollBarUI.java index 04a9f2e97cf..2755f3543f1 100644 --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsScrollBarUI.java +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsScrollBarUI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -55,7 +55,7 @@ import static com.sun.java.swing.plaf.windows.XPStyle.Skin; /** * Windows rendition of the component. */ -public final class WindowsScrollBarUI extends BasicScrollBarUI { +public class WindowsScrollBarUI extends BasicScrollBarUI { private Grid thumbGrid; private Grid highlightGrid; private Dimension horizontalThumbSize; diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsScrollPaneUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsScrollPaneUI.java index 48e7a8c02fb..56b8eb1004e 100644 --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsScrollPaneUI.java +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsScrollPaneUI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,5 +30,5 @@ import javax.swing.plaf.basic.BasicScrollPaneUI; /** * Windows rendition of the component. */ -public final class WindowsScrollPaneUI extends BasicScrollPaneUI +public class WindowsScrollPaneUI extends BasicScrollPaneUI {} diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsSeparatorUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsSeparatorUI.java index 2a2caef60d2..12eaa33872c 100644 --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsSeparatorUI.java +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsSeparatorUI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,4 +30,4 @@ import javax.swing.plaf.basic.*; /** * Windows Separator. */ -public final class WindowsSeparatorUI extends BasicSeparatorUI { } +public class WindowsSeparatorUI extends BasicSeparatorUI { } diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsSliderUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsSliderUI.java index 731775a2575..cfc509babf4 100644 --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsSliderUI.java +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsSliderUI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,7 +44,7 @@ import static com.sun.java.swing.plaf.windows.XPStyle.Skin; /** * Windows rendition of the component. */ -public final class WindowsSliderUI extends BasicSliderUI +public class WindowsSliderUI extends BasicSliderUI { private boolean rollover = false; private boolean pressed = false; diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsSpinnerUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsSpinnerUI.java index a8e2a2ddcf1..8934bf9ff21 100644 --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsSpinnerUI.java +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsSpinnerUI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,7 +37,7 @@ import static com.sun.java.swing.plaf.windows.TMSchema.State; import static com.sun.java.swing.plaf.windows.XPStyle.Skin; -public final class WindowsSpinnerUI extends BasicSpinnerUI { +public class WindowsSpinnerUI extends BasicSpinnerUI { public static ComponentUI createUI(JComponent c) { return new WindowsSpinnerUI(); } diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsSplitPaneDivider.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsSplitPaneDivider.java index a132756bbee..26cd1bd8c2d 100644 --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsSplitPaneDivider.java +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsSplitPaneDivider.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,7 +39,7 @@ import javax.swing.plaf.basic.BasicSplitPaneUI; * @author Jeff Dinkins */ @SuppressWarnings("serial") // Superclass is not serializable across versions -public final class WindowsSplitPaneDivider extends BasicSplitPaneDivider +public class WindowsSplitPaneDivider extends BasicSplitPaneDivider { /** diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsSplitPaneUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsSplitPaneUI.java index 481fa466a5b..b67ab22f48f 100644 --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsSplitPaneUI.java +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsSplitPaneUI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,7 +33,7 @@ import javax.swing.plaf.basic.BasicSplitPaneUI; /** * Windows rendition of the component. */ -public final class WindowsSplitPaneUI extends BasicSplitPaneUI +public class WindowsSplitPaneUI extends BasicSplitPaneUI { public WindowsSplitPaneUI() { diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsTabbedPaneUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsTabbedPaneUI.java index 874b5c65c6e..da8e8b9d385 100644 --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsTabbedPaneUI.java +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsTabbedPaneUI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -48,7 +48,7 @@ import static com.sun.java.swing.plaf.windows.XPStyle.Skin; /** * Windows rendition of the component. */ -public final class WindowsTabbedPaneUI extends BasicTabbedPaneUI { +public class WindowsTabbedPaneUI extends BasicTabbedPaneUI { /** * Keys to use for forward focus traversal when the JComponent is * managing focus. diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsTableHeaderUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsTableHeaderUI.java index de8f18b4ea1..1db0050f162 100644 --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsTableHeaderUI.java +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsTableHeaderUI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -50,7 +50,7 @@ import static com.sun.java.swing.plaf.windows.TMSchema.Part; import static com.sun.java.swing.plaf.windows.TMSchema.State; import static com.sun.java.swing.plaf.windows.XPStyle.Skin; -public final class WindowsTableHeaderUI extends BasicTableHeaderUI { +public class WindowsTableHeaderUI extends BasicTableHeaderUI { private TableCellRenderer originalHeaderRenderer; public static ComponentUI createUI(JComponent h) { diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsTextAreaUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsTextAreaUI.java index 78cceff2a0c..7c9abb12e05 100644 --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsTextAreaUI.java +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsTextAreaUI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,7 +33,7 @@ import javax.swing.text.Caret; /** * Windows rendition of the component. */ -public final class WindowsTextAreaUI extends BasicTextAreaUI { +public class WindowsTextAreaUI extends BasicTextAreaUI { /** * Creates the object to use for a caret. By default an * instance of WindowsCaret is created. This method diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsTextFieldUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsTextFieldUI.java index 5846dcb9f09..9920ed371d8 100644 --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsTextFieldUI.java +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsTextFieldUI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -62,7 +62,7 @@ import javax.swing.text.Position; * * @author Timothy Prinzing */ -public final class WindowsTextFieldUI extends BasicTextFieldUI +public class WindowsTextFieldUI extends BasicTextFieldUI { /** * Creates a UI for a JTextField. diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsTextPaneUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsTextPaneUI.java index 2c645903e51..d1418205385 100644 --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsTextPaneUI.java +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsTextPaneUI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,7 +33,7 @@ import javax.swing.text.Caret; /** * Windows rendition of the component. */ -public final class WindowsTextPaneUI extends BasicTextPaneUI +public class WindowsTextPaneUI extends BasicTextPaneUI { /** * Creates a UI for a JTextPane. diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsToggleButtonUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsToggleButtonUI.java index 67eb5c1d6a0..a612b3f392e 100644 --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsToggleButtonUI.java +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsToggleButtonUI.java @@ -43,7 +43,7 @@ import javax.swing.plaf.basic.BasicToggleButtonUI; * * @author Jeff Dinkins */ -public final class WindowsToggleButtonUI extends BasicToggleButtonUI +public class WindowsToggleButtonUI extends BasicToggleButtonUI { protected int dashedRectGapX; protected int dashedRectGapY; diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsToolBarSeparatorUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsToolBarSeparatorUI.java index 47175b83d30..1707ce5a80c 100644 --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsToolBarSeparatorUI.java +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsToolBarSeparatorUI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,7 +40,7 @@ import static com.sun.java.swing.plaf.windows.XPStyle.Skin; * * @author Mark Davidson */ -public final class WindowsToolBarSeparatorUI extends BasicToolBarSeparatorUI { +public class WindowsToolBarSeparatorUI extends BasicToolBarSeparatorUI { public static ComponentUI createUI( JComponent c ) { return new WindowsToolBarSeparatorUI(); diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsToolBarUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsToolBarUI.java index 025c30c5c96..4e2cf42bf5d 100644 --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsToolBarUI.java +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsToolBarUI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,7 +45,7 @@ import javax.swing.plaf.basic.BasicToolBarUI; import static com.sun.java.swing.plaf.windows.TMSchema.Part; -public final class WindowsToolBarUI extends BasicToolBarUI { +public class WindowsToolBarUI extends BasicToolBarUI { public static ComponentUI createUI(JComponent c) { return new WindowsToolBarUI(); diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsTreeUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsTreeUI.java index 78384bbd18a..26edfb978bd 100644 --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsTreeUI.java +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsTreeUI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -167,7 +167,7 @@ public class WindowsTreeUI extends BasicTreeUI { * The plus sign button icon */ @SuppressWarnings("serial") // Superclass is not serializable across versions - public static final class CollapsedIcon extends ExpandedIcon { + public static class CollapsedIcon extends ExpandedIcon { public static Icon createCollapsedIcon() { return new CollapsedIcon(); } @@ -185,7 +185,7 @@ public class WindowsTreeUI extends BasicTreeUI { } @SuppressWarnings("serial") // Superclass is not serializable across versions - public final class WindowsTreeCellRenderer extends DefaultTreeCellRenderer { + public class WindowsTreeCellRenderer extends DefaultTreeCellRenderer { /** * Configures the renderer based on the passed in components. diff --git a/src/java.desktop/windows/classes/sun/awt/windows/WPrinterJob.java b/src/java.desktop/windows/classes/sun/awt/windows/WPrinterJob.java index 1da9db3a35b..b238921cb77 100644 --- a/src/java.desktop/windows/classes/sun/awt/windows/WPrinterJob.java +++ b/src/java.desktop/windows/classes/sun/awt/windows/WPrinterJob.java @@ -1734,7 +1734,9 @@ public final class WPrinterJob extends RasterPrinterJob attributes.add(new PageRanges(from, to)); setPageRange(from, to); } else { - attributes.remove(PageRanges.class); + // Sets default values for PageRange attribute and setPageRange + attributes.add(new PageRanges(1, + Integer.MAX_VALUE)); setPageRange(Pageable.UNKNOWN_NUMBER_OF_PAGES, Pageable.UNKNOWN_NUMBER_OF_PAGES); } diff --git a/src/java.desktop/windows/classes/sun/awt/windows/WWindowPeer.java b/src/java.desktop/windows/classes/sun/awt/windows/WWindowPeer.java index f678be9f1ab..f028e0fbec5 100644 --- a/src/java.desktop/windows/classes/sun/awt/windows/WWindowPeer.java +++ b/src/java.desktop/windows/classes/sun/awt/windows/WWindowPeer.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -56,7 +56,6 @@ import java.util.LinkedList; import java.util.List; import sun.awt.AWTAccessor; -import sun.awt.AppContext; import sun.awt.DisplayChangedListener; import sun.awt.SunToolkit; import sun.awt.TimedWindowEvent; @@ -84,26 +83,12 @@ public class WWindowPeer extends WPanelPeer implements WindowPeer, private TranslucentWindowPainter painter; /* - * A key used for storing a list of active windows in AppContext. The value - * is a list of windows, sorted by the time of activation: later a window is - * activated, greater its index is in the list. - */ - private static final StringBuffer ACTIVE_WINDOWS_KEY = - new StringBuffer("active_windows_list"); - - /* - * Listener for 'activeWindow' KFM property changes. It is added to each - * AppContext KFM. See ActiveWindowListener inner class below. + * Listener for 'activeWindow' KFM property changes. + * See ActiveWindowListener inner class below. */ private static PropertyChangeListener activeWindowListener = new ActiveWindowListener(); - /* - * The object is a listener for the AppContext.GUI_DISPOSED property. - */ - private static final PropertyChangeListener guiDisposedListener = - new GuiDisposedListener(); - /* * Called (on the Toolkit thread) before the appropriate * WindowStateEvent is posted to the EventQueue. @@ -116,18 +101,17 @@ public class WWindowPeer extends WPanelPeer implements WindowPeer, private static native void initIDs(); static { initIDs(); + KeyboardFocusManager kfm = KeyboardFocusManager.getCurrentKeyboardFocusManager(); + kfm.addPropertyChangeListener("activeWindow", activeWindowListener); } + static List activeWindows = new LinkedList(); // WComponentPeer overrides @Override @SuppressWarnings("unchecked") protected void disposeImpl() { - AppContext appContext = SunToolkit.targetToAppContext(target); - synchronized (appContext) { - List l = (List)appContext.get(ACTIVE_WINDOWS_KEY); - if (l != null) { - l.remove(this); - } + synchronized (activeWindows) { + activeWindows.remove(this); } // Remove ourself from the Map of DisplayChangeListeners @@ -222,8 +206,6 @@ public class WWindowPeer extends WPanelPeer implements WindowPeer, Win32GraphicsDevice gd = (Win32GraphicsDevice) gc.getDevice(); gd.addDisplayChangedListener(this); - initActiveWindowsTracking((Window)target); - updateIconImages(); Shape shape = ((Window)target).getShape(); @@ -530,22 +512,15 @@ public class WWindowPeer extends WPanelPeer implements WindowPeer, native void modalEnable(Dialog blocker); /* - * Returns all the ever active windows from the current AppContext. + * Returns all the active windows. * The list is sorted by the time of activation, so the latest * active window is always at the end. */ - @SuppressWarnings("unchecked") public static long[] getActiveWindowHandles(Component target) { - AppContext appContext = SunToolkit.targetToAppContext(target); - if (appContext == null) return null; - synchronized (appContext) { - List l = (List)appContext.get(ACTIVE_WINDOWS_KEY); - if (l == null) { - return null; - } - long[] result = new long[l.size()]; - for (int j = 0; j < l.size(); j++) { - result[j] = l.get(j).getHWnd(); + synchronized (activeWindows) { + long[] result = new long[activeWindows.size()]; + for (int j = 0; j < activeWindows.size(); j++) { + result[j] = activeWindows.get(j).getHWnd(); } return result; } @@ -823,58 +798,11 @@ public class WWindowPeer extends WPanelPeer implements WindowPeer, } } - /* - * The method maps the list of the active windows to the window's AppContext, - * then the method registers ActiveWindowListener, GuiDisposedListener listeners; - * it executes the initilialization only once per AppContext. - */ - @SuppressWarnings("unchecked") - private static void initActiveWindowsTracking(Window w) { - AppContext appContext = AppContext.getAppContext(); - synchronized (appContext) { - List l = (List)appContext.get(ACTIVE_WINDOWS_KEY); - if (l == null) { - l = new LinkedList(); - appContext.put(ACTIVE_WINDOWS_KEY, l); - appContext.addPropertyChangeListener(AppContext.GUI_DISPOSED, guiDisposedListener); - - KeyboardFocusManager kfm = KeyboardFocusManager.getCurrentKeyboardFocusManager(); - kfm.addPropertyChangeListener("activeWindow", activeWindowListener); - } - } - } - - /* - * The GuiDisposedListener class listens for the AppContext.GUI_DISPOSED property, - * it removes the list of the active windows from the disposed AppContext and - * unregisters ActiveWindowListener listener. - */ - private static final class GuiDisposedListener implements PropertyChangeListener { - @Override - public void propertyChange(PropertyChangeEvent e) { - boolean isDisposed = (Boolean)e.getNewValue(); - if (isDisposed != true) { - if (log.isLoggable(PlatformLogger.Level.FINE)) { - log.fine(" Assertion (newValue != true) failed for AppContext.GUI_DISPOSED "); - } - } - AppContext appContext = AppContext.getAppContext(); - synchronized (appContext) { - appContext.remove(ACTIVE_WINDOWS_KEY); - appContext.removePropertyChangeListener(AppContext.GUI_DISPOSED, this); - - KeyboardFocusManager kfm = KeyboardFocusManager.getCurrentKeyboardFocusManager(); - kfm.removePropertyChangeListener("activeWindow", activeWindowListener); - } - } - } - /* * Static inner class, listens for 'activeWindow' KFM property changes and - * updates the list of active windows per AppContext, so the latest active - * window is always at the end of the list. The list is stored in AppContext. + * updates the list of active windows so the latest active + * window is always at the end of the list. */ - @SuppressWarnings("unchecked") private static final class ActiveWindowListener implements PropertyChangeListener { @Override public void propertyChange(PropertyChangeEvent e) { @@ -882,15 +810,11 @@ public class WWindowPeer extends WPanelPeer implements WindowPeer, if (w == null) { return; } - AppContext appContext = SunToolkit.targetToAppContext(w); - synchronized (appContext) { + synchronized (activeWindows) { WWindowPeer wp = AWTAccessor.getComponentAccessor().getPeer(w); // add/move wp to the end of the list - List l = (List)appContext.get(ACTIVE_WINDOWS_KEY); - if (l != null) { - l.remove(wp); - l.add(wp); - } + activeWindows.remove(wp); + activeWindows.add(wp); } } } diff --git a/src/java.desktop/windows/native/libawt/windows/awt_Component.cpp b/src/java.desktop/windows/native/libawt/windows/awt_Component.cpp index b67c5dfcf8d..eca8290a1aa 100644 --- a/src/java.desktop/windows/native/libawt/windows/awt_Component.cpp +++ b/src/java.desktop/windows/native/libawt/windows/awt_Component.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -178,7 +178,6 @@ jfieldID AwtComponent::parentID; jfieldID AwtComponent::graphicsConfigID; jfieldID AwtComponent::peerGCID; jfieldID AwtComponent::focusableID; -jfieldID AwtComponent::appContextID; jfieldID AwtComponent::cursorID; jfieldID AwtComponent::hwndID; @@ -6573,11 +6572,6 @@ Java_java_awt_Component_initIDs(JNIEnv *env, jclass cls) DASSERT(AwtComponent::focusableID); CHECK_NULL(AwtComponent::focusableID); - AwtComponent::appContextID = env->GetFieldID(cls, "appContext", - "Lsun/awt/AppContext;"); - DASSERT(AwtComponent::appContextID); - CHECK_NULL(AwtComponent::appContextID); - AwtComponent::peerGCID = env->GetFieldID(peerCls, "winGraphicsConfig", "Lsun/awt/Win32GraphicsConfig;"); DASSERT(AwtComponent::peerGCID); diff --git a/src/java.desktop/windows/native/libawt/windows/awt_Component.h b/src/java.desktop/windows/native/libawt/windows/awt_Component.h index 740eb8c72f9..1246f6cb06e 100644 --- a/src/java.desktop/windows/native/libawt/windows/awt_Component.h +++ b/src/java.desktop/windows/native/libawt/windows/awt_Component.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -112,7 +112,6 @@ public: static jfieldID graphicsConfigID; static jfieldID peerGCID; static jfieldID focusableID; - static jfieldID appContextID; static jfieldID hwndID; static jmethodID getFontMID; diff --git a/src/java.desktop/windows/native/libawt/windows/awt_PrintJob.cpp b/src/java.desktop/windows/native/libawt/windows/awt_PrintJob.cpp index 8d016d8b39f..b18fa5a7e2c 100644 --- a/src/java.desktop/windows/native/libawt/windows/awt_PrintJob.cpp +++ b/src/java.desktop/windows/native/libawt/windows/awt_PrintJob.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -522,7 +522,6 @@ Java_sun_awt_windows_WPageDialogPeer__1show(JNIEnv *env, jobject peer) AwtComponent *awtParent = (parent != NULL) ? (AwtComponent *)JNI_GET_PDATA(parent) : NULL; HWND hwndOwner = awtParent ? awtParent->GetHWnd() : NULL; - jboolean doIt = JNI_FALSE; PAGESETUPDLG setup; memset(&setup, 0, sizeof(setup)); @@ -578,7 +577,7 @@ Java_sun_awt_windows_WPageDialogPeer__1show(JNIEnv *env, jobject peer) */ if ((setup.hDevMode == NULL) && (setup.hDevNames == NULL)) { CLEANUP_SHOW; - return doIt; + return JNI_FALSE; } } else { int measure = PSD_INTHOUSANDTHSOFINCHES; @@ -606,7 +605,7 @@ Java_sun_awt_windows_WPageDialogPeer__1show(JNIEnv *env, jobject peer) pageFormatToSetup(env, self, page, &setup, AwtPrintControl::getPrintDC(env, self)); if (env->ExceptionCheck()) { CLEANUP_SHOW; - return doIt; + return JNI_FALSE; } setup.lpfnPageSetupHook = reinterpret_cast(pageDlgHook); @@ -615,89 +614,91 @@ Java_sun_awt_windows_WPageDialogPeer__1show(JNIEnv *env, jobject peer) AwtDialog::CheckInstallModalHook(); BOOL ret = ::PageSetupDlg(&setup); - if (ret) { - - jobject paper = getPaper(env, page); - if (paper == NULL) { - CLEANUP_SHOW; - return doIt; - } - int units = setup.Flags & PSD_INTHOUSANDTHSOFINCHES ? - MM_HIENGLISH : - MM_HIMETRIC; - POINT paperSize; - RECT margins; - jint orientation; - - /* The printer may have been changed, and we track that change, - * but then need to get a new DC for the current printer so that - * we validate the paper size correctly - */ - if (setup.hDevNames != NULL) { - DEVNAMES* names = (DEVNAMES*)::GlobalLock(setup.hDevNames); - if (names != NULL) { - LPTSTR printer = (LPTSTR)names+names->wDeviceOffset; - SAVE_CONTROLWORD - HDC newDC = ::CreateDC(TEXT("WINSPOOL"), printer, NULL, NULL); - RESTORE_CONTROLWORD - if (newDC != NULL) { - HDC oldDC = AwtPrintControl::getPrintDC(env, self); - if (oldDC != NULL) { - ::DeleteDC(oldDC); - } - } - AwtPrintControl::setPrintDC(env, self, newDC); - } - ::GlobalUnlock(setup.hDevNames); - } - - /* Get the Windows paper and margins description. - */ - retrievePaperInfo(&setup, &paperSize, &margins, &orientation, - AwtPrintControl::getPrintDC(env, self)); - - /* Convert the Windows' paper and margins description - * and place them into a Paper instance. - */ - setPaperValues(env, paper, &paperSize, &margins, units); - if (env->ExceptionCheck()) { - CLEANUP_SHOW; - return doIt; - } - /* - * Put the updated Paper instance and the orientation into - * the PageFormat. - */ - setPaper(env, page, paper); - if (env->ExceptionCheck()) { - CLEANUP_SHOW; - return doIt; - } - setPageFormatOrientation(env, page, orientation); - if (env->ExceptionCheck()) { - CLEANUP_SHOW; - return JNI_FALSE; - } - if (setup.hDevMode != NULL) { - DEVMODE *devmode = (DEVMODE *)::GlobalLock(setup.hDevMode); - if (devmode != NULL) { - if (devmode->dmFields & DM_PAPERSIZE) { - jboolean err = setPrintPaperSize(env, self, devmode->dmPaperSize); - if (err) { - CLEANUP_SHOW; - return doIt; - } - } - } - ::GlobalUnlock(setup.hDevMode); - } - doIt = JNI_TRUE; - } AwtDialog::CheckUninstallModalHook(); - AwtDialog::ModalActivateNextWindow(NULL, target, peer); + if (!ret) { + CLEANUP_SHOW; + return JNI_FALSE; + } + + jobject paper = getPaper(env, page); + if (paper == NULL) { + CLEANUP_SHOW; + return JNI_FALSE; + } + int units = setup.Flags & PSD_INTHOUSANDTHSOFINCHES ? + MM_HIENGLISH : + MM_HIMETRIC; + POINT paperSize; + RECT margins; + jint orientation; + + /* The printer may have been changed, and we track that change, + * but then need to get a new DC for the current printer so that + * we validate the paper size correctly + */ + if (setup.hDevNames != NULL) { + DEVNAMES* names = (DEVNAMES*)::GlobalLock(setup.hDevNames); + if (names != NULL) { + LPTSTR printer = (LPTSTR)names+names->wDeviceOffset; + SAVE_CONTROLWORD + HDC newDC = ::CreateDC(TEXT("WINSPOOL"), printer, NULL, NULL); + RESTORE_CONTROLWORD + if (newDC != NULL) { + HDC oldDC = AwtPrintControl::getPrintDC(env, self); + if (oldDC != NULL) { + ::DeleteDC(oldDC); + } + } + AwtPrintControl::setPrintDC(env, self, newDC); + } + ::GlobalUnlock(setup.hDevNames); + } + + /* Get the Windows paper and margins description. + */ + retrievePaperInfo(&setup, &paperSize, &margins, &orientation, + AwtPrintControl::getPrintDC(env, self)); + + /* Convert the Windows' paper and margins description + * and place them into a Paper instance. + */ + setPaperValues(env, paper, &paperSize, &margins, units); + if (env->ExceptionCheck()) { + CLEANUP_SHOW; + return JNI_FALSE; + } + /* + * Put the updated Paper instance and the orientation into + * the PageFormat. + */ + setPaper(env, page, paper); + if (env->ExceptionCheck()) { + CLEANUP_SHOW; + return JNI_FALSE; + } + setPageFormatOrientation(env, page, orientation); + if (env->ExceptionCheck()) { + CLEANUP_SHOW; + return JNI_FALSE; + } + if (setup.hDevMode != NULL) { + DEVMODE *devmode = (DEVMODE *)::GlobalLock(setup.hDevMode); + if (devmode != NULL) { + if (devmode->dmFields & DM_PAPERSIZE) { + jboolean err = setPrintPaperSize(env, self, devmode->dmPaperSize); + if (err) { + ::GlobalUnlock(setup.hDevMode); + CLEANUP_SHOW; + return JNI_FALSE; + } + } + } + ::GlobalUnlock(setup.hDevMode); + } + HGLOBAL oldG = AwtPrintControl::getPrintHDMode(env, self); if (setup.hDevMode != oldG) { AwtPrintControl::setPrintHDMode(env, self, setup.hDevMode); @@ -710,7 +711,7 @@ Java_sun_awt_windows_WPageDialogPeer__1show(JNIEnv *env, jobject peer) CLEANUP_SHOW; - return doIt; + return JNI_TRUE; CATCH_BAD_ALLOC_RET(0); } diff --git a/src/java.instrument/share/native/libinstrument/JavaExceptions.c b/src/java.instrument/share/native/libinstrument/JavaExceptions.c index 0a787ce4150..45c31e329d0 100644 --- a/src/java.instrument/share/native/libinstrument/JavaExceptions.c +++ b/src/java.instrument/share/native/libinstrument/JavaExceptions.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -68,9 +68,20 @@ forceFallback(jthrowable potentialException) { jboolean initializeFallbackError(JNIEnv* jnienv) { jplis_assert(isSafeForJNICalls(jnienv)); - sFallbackInternalError = createInternalError(jnienv, NULL); + jthrowable localRef = createInternalError(jnienv, NULL); + if (localRef == NULL) { + return JNI_FALSE; + } + + jthrowable globalRef = (*jnienv)->NewGlobalRef(jnienv, localRef); + if (globalRef == NULL) { + return JNI_FALSE; + } + + sFallbackInternalError = globalRef; jplis_assert(isSafeForJNICalls(jnienv)); - return (sFallbackInternalError != NULL); + + return JNI_TRUE; } diff --git a/src/java.logging/share/classes/java/util/logging/LogManager.java b/src/java.logging/share/classes/java/util/logging/LogManager.java index 9c9c708a062..102f4bac6e4 100644 --- a/src/java.logging/share/classes/java/util/logging/LogManager.java +++ b/src/java.logging/share/classes/java/util/logging/LogManager.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -736,41 +736,56 @@ public class LogManager { logger.setLevel(level); } - // instantiation of the handler is done in the LogManager.addLogger - // implementation as a handler class may be only visible to LogManager - // subclass for the custom log manager case - processParentHandlers(logger, name, VisitedLoggers.NEVER); + // We need to make sure that loggers created by processParentHandlers + // will not be garbage collected before the child/parent + // pointers are updated. We use an ArrayList to temporarily + // store these loggers, until the parent/child relationship + // have been updated + final List saved = new ArrayList<>(); + try { - // Find the new node and its parent. - LogNode node = getNode(name); - node.loggerRef = ref; - Logger parent = null; - LogNode nodep = node.parent; - while (nodep != null) { - LoggerWeakRef nodeRef = nodep.loggerRef; - if (nodeRef != null) { - parent = nodeRef.get(); - if (parent != null) { - break; + // always return false, to make sure we process all loggers from + // root to child. + Predicate visited = (l) -> saved.add(l) && false; + + // instantiation of the handler is done in the LogManager.addLogger + // implementation as a handler class may be only visible to LogManager + // subclass for the custom log manager case + processParentHandlers(logger, name, visited); + + // Find the new node and its parent. + LogNode node = getNode(name); + node.loggerRef = ref; + Logger parent = null; + LogNode nodep = node.parent; + while (nodep != null) { + LoggerWeakRef nodeRef = nodep.loggerRef; + if (nodeRef != null) { + parent = nodeRef.get(); + if (parent != null) { + break; + } } + nodep = nodep.parent; } - nodep = nodep.parent; - } - if (parent != null) { - logger.setParent(parent); - } - // Walk over the children and tell them we are their new parent. - node.walkAndSetParent(logger); - // new LogNode is ready so tell the LoggerWeakRef about it - ref.setNode(node); + if (parent != null) { + logger.setParent(parent); + } + // Walk over the children and tell them we are their new parent. + node.walkAndSetParent(logger); + // new LogNode is ready so tell the LoggerWeakRef about it + ref.setNode(node); - // Do not publish 'ref' in namedLoggers before the logger tree - // is fully updated - because the named logger will be visible as - // soon as it is published in namedLoggers (findLogger takes - // benefit of the ConcurrentHashMap implementation of namedLoggers - // to avoid synchronizing on retrieval when that is possible). - namedLoggers.put(name, ref); + // Do not publish 'ref' in namedLoggers before the logger tree + // is fully updated - because the named logger will be visible as + // soon as it is published in namedLoggers (findLogger takes + // benefit of the ConcurrentHashMap implementation of namedLoggers + // to avoid synchronizing on retrieval when that is possible). + namedLoggers.put(name, ref); + } finally { + saved.clear(); + } return true; } @@ -1647,11 +1662,6 @@ public class LogManager { public void clear() { if (visited != null) visited.clear(); } - - // An object that considers that no logger has ever been visited. - // This is used when processParentHandlers is called from - // LoggerContext.addLocalLogger - static final VisitedLoggers NEVER = new VisitedLoggers(null); } diff --git a/src/java.naming/share/classes/com/sun/jndi/ldap/Connection.java b/src/java.naming/share/classes/com/sun/jndi/ldap/Connection.java index dcb739a8697..1e0a924f12c 100644 --- a/src/java.naming/share/classes/com/sun/jndi/ldap/Connection.java +++ b/src/java.naming/share/classes/com/sun/jndi/ldap/Connection.java @@ -57,6 +57,8 @@ import javax.net.ssl.HandshakeCompletedListener; import javax.net.ssl.SSLPeerUnverifiedException; import javax.security.sasl.SaslException; +import jdk.internal.misc.InnocuousThread; + /** * A thread that creates a connection to an LDAP server. * After the connection, the thread reads from the connection. @@ -112,9 +114,6 @@ import javax.security.sasl.SaslException; * for v2. * %%% made public for access by LdapSasl %%% * - * @author Vincent Ryan - * @author Rosanna Lee - * @author Jagane Sundar */ public final class Connection implements Runnable { @@ -254,7 +253,7 @@ public final class Connection implements Runnable { throw ce; } - worker = new Thread(this); + worker = InnocuousThread.newSystemThread("LDAP Connection", this); worker.setDaemon(true); worker.start(); } @@ -912,7 +911,7 @@ public final class Connection implements Runnable { // //////////////////////////////////////////////////////////////////////////// - + @Override public void run() { byte inbuf[]; // Buffer for reading incoming bytes int inMsgId; // Message id of incoming response diff --git a/src/java.naming/share/classes/com/sun/jndi/ldap/EventQueue.java b/src/java.naming/share/classes/com/sun/jndi/ldap/EventQueue.java index 4f1cb9ec6a7..7d45d058c68 100644 --- a/src/java.naming/share/classes/com/sun/jndi/ldap/EventQueue.java +++ b/src/java.naming/share/classes/com/sun/jndi/ldap/EventQueue.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,13 +36,13 @@ import javax.naming.event.NamingListener; import javax.naming.ldap.UnsolicitedNotificationEvent; import javax.naming.ldap.UnsolicitedNotificationListener; +import jdk.internal.misc.InnocuousThread; + /** * Package private class used by EventSupport to dispatch events. * This class implements an event queue, and a dispatcher thread that * dequeues and dispatches events from the queue. * - * Pieces stolen from sun.misc.Queue. - * * @author Bill Shannon (from javax.mail.event) * @author Rosanna Lee (modified for JNDI-related events) */ @@ -71,7 +71,7 @@ final class EventQueue implements Runnable { // package private EventQueue() { - qThread = new Thread(this); + qThread = InnocuousThread.newSystemThread("LDAP Event Dispatcher", this); qThread.setDaemon(true); // not a user thread qThread.start(); } @@ -141,6 +141,7 @@ final class EventQueue implements Runnable { /** * Pull events off the queue and dispatch them. */ + @Override public void run() { QueueElement qe; diff --git a/src/java.naming/share/classes/com/sun/jndi/ldap/NamingEventNotifier.java b/src/java.naming/share/classes/com/sun/jndi/ldap/NamingEventNotifier.java index 40a8173b768..0e30c1c1d38 100644 --- a/src/java.naming/share/classes/com/sun/jndi/ldap/NamingEventNotifier.java +++ b/src/java.naming/share/classes/com/sun/jndi/ldap/NamingEventNotifier.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,6 +33,7 @@ import javax.naming.ldap.LdapName; import java.util.Vector; import com.sun.jndi.toolkit.ctx.Continuation; +import jdk.internal.misc.InnocuousThread; /** * Gathers information to generate events by using the Persistent Search @@ -86,7 +87,7 @@ final class NamingEventNotifier implements Runnable { namingListeners = new Vector<>(); namingListeners.addElement(firstListener); - worker = new Thread(this); + worker = InnocuousThread.newSystemThread("LDAP Event Notifier", this); worker.setDaemon(true); // not a user thread worker.start(); } @@ -111,6 +112,7 @@ final class NamingEventNotifier implements Runnable { * For each result, create the appropriate NamingEvent and * queue to be dispatched to listeners. */ + @Override public void run() { try { Continuation cont = new Continuation(); diff --git a/src/java.net.http/share/classes/jdk/internal/net/http/HttpRequestBuilderImpl.java b/src/java.net.http/share/classes/jdk/internal/net/http/HttpRequestBuilderImpl.java index ef0e2b152bb..c39edf878c9 100644 --- a/src/java.net.http/share/classes/jdk/internal/net/http/HttpRequestBuilderImpl.java +++ b/src/java.net.http/share/classes/jdk/internal/net/http/HttpRequestBuilderImpl.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -145,7 +145,7 @@ public class HttpRequestBuilderImpl implements HttpRequest.Builder { @Override public HttpRequestBuilderImpl headers(String... params) { requireNonNull(params); - if (params.length == 0 || params.length % 2 != 0) { + if (params.length % 2 != 0) { throw newIAE("wrong number, %d, of parameters", params.length); } for (int i = 0; i < params.length; i += 2) { diff --git a/src/java.net.http/share/classes/jdk/internal/net/http/quic/ConnectionTerminatorImpl.java b/src/java.net.http/share/classes/jdk/internal/net/http/quic/ConnectionTerminatorImpl.java index 5e2384dce27..3fc013b4fde 100644 --- a/src/java.net.http/share/classes/jdk/internal/net/http/quic/ConnectionTerminatorImpl.java +++ b/src/java.net.http/share/classes/jdk/internal/net/http/quic/ConnectionTerminatorImpl.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2024, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -203,6 +203,9 @@ final class ConnectionTerminatorImpl implements ConnectionTerminator { // an endpoint has been established (which is OK) return; } + // close the connection ID managers; any in-flight connection ID changes should be ignored. + connection.localConnectionIdManager().close(); + connection.peerConnectionIdManager().close(); endpoint.removeConnection(this.connection); } @@ -434,6 +437,9 @@ final class ConnectionTerminatorImpl implements ConnectionTerminator { final QuicPacket packet = connection.newQuicPacket(keySpace, List.of(toSend)); final ProtectionRecord protectionRecord = ProtectionRecord.single(packet, connection::allocateDatagramForEncryption); + // close the connection ID managers; any in-flight connection ID changes should be ignored. + connection.localConnectionIdManager().close(); + connection.peerConnectionIdManager().close(); // while sending the packet containing the CONNECTION_CLOSE frame, the pushDatagram will // remap the QuicConnectionImpl in QuicEndpoint. connection.pushDatagram(protectionRecord); diff --git a/src/java.net.http/share/classes/jdk/internal/net/http/quic/PeerConnIdManager.java b/src/java.net.http/share/classes/jdk/internal/net/http/quic/PeerConnIdManager.java index 2bc759a920a..0646026e28b 100644 --- a/src/java.net.http/share/classes/jdk/internal/net/http/quic/PeerConnIdManager.java +++ b/src/java.net.http/share/classes/jdk/internal/net/http/quic/PeerConnIdManager.java @@ -65,6 +65,7 @@ final class PeerConnIdManager { private final QuicConnectionImpl connection; private final String logTag; private final boolean isClient; + private boolean closed; // when true, no more reset tokens are registered private enum State { INITIAL_PKT_NOT_RECEIVED_FROM_PEER, @@ -267,6 +268,7 @@ final class PeerConnIdManager { if (handshakeConnId == null) { throw new IllegalStateException("No handshake peer connection available"); } + if (closed) return; // recreate the conn id with the stateless token this.peerConnectionIds.put(0L, new PeerConnectionId(handshakeConnId.asReadOnlyBuffer(), statelessResetToken)); @@ -283,6 +285,10 @@ final class PeerConnIdManager { public List activeResetTokens() { lock.lock(); try { + // this method is currently only used to remove a connection from the endpoint + // after the connection is closed. + // The below assert can be removed if the method is needed elsewhere. + assert closed; // we only support one active connection ID at the time PeerConnectionId cid = peerConnectionIds.get(activeConnIdSeq); byte[] statelessResetToken = null; @@ -305,7 +311,7 @@ final class PeerConnIdManager { QuicConnectionId getPeerConnId() { lock.lock(); try { - if (activeConnIdSeq < largestReceivedRetirePriorTo) { + if (activeConnIdSeq < largestReceivedRetirePriorTo && !closed) { // stop using the old connection ID switchConnectionId(); } @@ -496,9 +502,11 @@ final class PeerConnIdManager { // connection ids. It does however store the peer-issued stateless reset token of a // peer connection id, so we let the endpoint know that the stateless reset token needs // to be forgotten since the corresponding peer connection id is being retired - final byte[] resetTokenToForget = entry.getValue().getStatelessResetToken(); - if (resetTokenToForget != null) { - this.connection.endpoint().forgetStatelessResetToken(resetTokenToForget); + if (seqNumToRetire == activeConnIdSeq) { + final byte[] resetTokenToForget = entry.getValue().getStatelessResetToken(); + if (resetTokenToForget != null) { + this.connection.endpoint().forgetStatelessResetToken(resetTokenToForget); + } } } for (Iterator iterator = gaps.iterator(); iterator.hasNext(); ) { @@ -540,4 +548,13 @@ final class PeerConnIdManager { lock.unlock(); } } + + public void close() { + lock.lock(); + try { + closed = true; + } finally { + lock.unlock(); + } + } } diff --git a/src/java.net.http/share/classes/jdk/internal/net/http/quic/QuicConnectionImpl.java b/src/java.net.http/share/classes/jdk/internal/net/http/quic/QuicConnectionImpl.java index 41b814a551c..b13d49ead7d 100644 --- a/src/java.net.http/share/classes/jdk/internal/net/http/quic/QuicConnectionImpl.java +++ b/src/java.net.http/share/classes/jdk/internal/net/http/quic/QuicConnectionImpl.java @@ -1758,6 +1758,10 @@ public class QuicConnectionImpl extends QuicConnection implements QuicPacketRece return localConnIdManager; } + PeerConnIdManager peerConnectionIdManager() { + return peerConnIdManager; + } + /** * {@return the local connection id} */ diff --git a/src/java.net.http/share/classes/jdk/internal/net/http/quic/QuicEndpoint.java b/src/java.net.http/share/classes/jdk/internal/net/http/quic/QuicEndpoint.java index ef342d4cb56..3dee814e1f1 100644 --- a/src/java.net.http/share/classes/jdk/internal/net/http/quic/QuicEndpoint.java +++ b/src/java.net.http/share/classes/jdk/internal/net/http/quic/QuicEndpoint.java @@ -1532,12 +1532,16 @@ public abstract sealed class QuicEndpoint implements AutoCloseable */ void removeConnection(final QuicPacketReceiver connection) { if (debug.on()) debug.log("removing connection " + connection); - // remove the connection completely - connection.connectionIds().forEach(connections::remove); - assert !connections.containsValue(connection) : connection; // remove references to this connection from the map which holds the peer issued // reset tokens dropPeerIssuedResetTokensFor(connection); + // remove the connection completely + connection.connectionIds().forEach(connections::remove); + assert !connections.containsValue(connection) : connection; + // Check that if there are no connections, there are no reset tokens either. + // This is safe because connections are added before reset tokens and removed after, + // except when we're closing the endpoint and don't bother with removing tokens. + assert peerIssuedResetTokens.isEmpty() || !connections.isEmpty() || closed : peerIssuedResetTokens; } /** @@ -1587,7 +1591,6 @@ public abstract sealed class QuicEndpoint implements AutoCloseable if (closed) return; final long idleTimeout = connection.peerPtoMs() * 3; // 3 PTO - connection.localConnectionIdManager().close(); DrainingConnection draining = new DrainingConnection(connection.connectionIds(), connection.activeResetTokens(), idleTimeout); // we can ignore stateless reset in the draining state. @@ -1626,7 +1629,6 @@ public abstract sealed class QuicEndpoint implements AutoCloseable closingDatagram.flip(); final long idleTimeout = connection.peerPtoMs() * 3; // 3 PTO - connection.localConnectionIdManager().close(); var closingConnection = new ClosingConnection(connection.connectionIds(), connection.activeResetTokens(), idleTimeout, datagram); remapPeerIssuedResetToken(closingConnection); diff --git a/src/java.net.http/share/classes/jdk/internal/net/http/quic/TerminationCause.java b/src/java.net.http/share/classes/jdk/internal/net/http/quic/TerminationCause.java index 9e441cf7873..df8c229a000 100644 --- a/src/java.net.http/share/classes/jdk/internal/net/http/quic/TerminationCause.java +++ b/src/java.net.http/share/classes/jdk/internal/net/http/quic/TerminationCause.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2024, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -128,6 +128,9 @@ public abstract sealed class TerminationCause { ? new IOException("connection terminated") : new IOException(fallbackExceptionMsg); } else if (original instanceof QuicTransportException qte) { + if (qte.getCause() instanceof IOException ioe) { + return ioe; + } return new IOException(qte.getMessage()); } else if (original instanceof IOException ioe) { return ioe; diff --git a/src/jdk.accessibility/windows/classes/com/sun/java/accessibility/internal/AccessBridge.java b/src/jdk.accessibility/windows/classes/com/sun/java/accessibility/internal/AccessBridge.java index c932d0f73ff..718acf6a6b8 100644 --- a/src/jdk.accessibility/windows/classes/com/sun/java/accessibility/internal/AccessBridge.java +++ b/src/jdk.accessibility/windows/classes/com/sun/java/accessibility/internal/AccessBridge.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -120,9 +120,6 @@ import com.sun.java.accessibility.util.AccessibilityEventMonitor; import com.sun.java.accessibility.util.EventQueueMonitor; import com.sun.java.accessibility.util.SwingEventMonitor; import com.sun.java.accessibility.util.Translator; -import sun.awt.AWTAccessor; -import sun.awt.AppContext; -import sun.awt.SunToolkit; /* * Note: This class has to be public. It's loaded from the VM like this: @@ -5292,7 +5289,6 @@ public final class AccessBridge { ac = a.getAccessibleContext(); } if (ac != null) { - InvocationUtils.registerAccessibleContext(ac, AppContext.getAppContext()); accessBridge.debugString("[INFO]: AccessibleContext: " + ac); String propertyName = e.getPropertyName(); @@ -5385,11 +5381,9 @@ public final class AccessBridge { if (e.getOldValue() instanceof AccessibleContext) { oldAC = (AccessibleContext) e.getOldValue(); - InvocationUtils.registerAccessibleContext(oldAC, AppContext.getAppContext()); } if (e.getNewValue() instanceof AccessibleContext) { newAC = (AccessibleContext) e.getNewValue(); - InvocationUtils.registerAccessibleContext(newAC, AppContext.getAppContext()); } accessBridge.debugString("[INFO]: - about to call propertyChildChange() old AC: " + oldAC + "new AC: " + newAC); accessBridge.propertyChildChange(e, ac, oldAC, newAC); @@ -5455,8 +5449,6 @@ public final class AccessBridge { prevAC = newAC; accessBridge.debugString("[INFO]: - about to call propertyActiveDescendentChange() AC: " + ac + " old AC: " + oldAC + "new AC: " + newAC); - InvocationUtils.registerAccessibleContext(oldAC, AppContext.getAppContext()); - InvocationUtils.registerAccessibleContext(newAC, AppContext.getAppContext()); accessBridge.propertyActiveDescendentChange(e, ac, oldAC, newAC); } @@ -5493,14 +5485,12 @@ public final class AccessBridge { // selected. The menu itself is selected. FocusEvent e = new FocusEvent(penult, FocusEvent.FOCUS_GAINED); AccessibleContext context = penult.getAccessibleContext(); - InvocationUtils.registerAccessibleContext(context, SunToolkit.targetToAppContext(penult)); accessBridge.focusGained(e, context); } else if (penult instanceof JPopupMenu) { // This is a popup with an item selected FocusEvent e = new FocusEvent(last, FocusEvent.FOCUS_GAINED); AccessibleContext focusedAC = last.getAccessibleContext(); - InvocationUtils.registerAccessibleContext(focusedAC, SunToolkit.targetToAppContext(last)); accessBridge.debugString("[INFO]: - about to call focusGained() AC: " + focusedAC); accessBridge.focusGained(e, focusedAC); } @@ -5511,7 +5501,6 @@ public final class AccessBridge { FocusEvent e = new FocusEvent(focusOwner, FocusEvent.FOCUS_GAINED); AccessibleContext focusedAC = focusOwner.getAccessibleContext(); - InvocationUtils.registerAccessibleContext(focusedAC, SunToolkit.targetToAppContext(focusOwner)); accessBridge.debugString("[INFO]: - about to call focusGained() AC: " + focusedAC); accessBridge.focusGained(e, focusedAC); } @@ -5524,7 +5513,6 @@ public final class AccessBridge { if (a != null) { accessBridge.debugString("[INFO]: - about to call focusLost() AC: " + a.getAccessibleContext()); AccessibleContext context = a.getAccessibleContext(); - InvocationUtils.registerAccessibleContext(context, AppContext.getAppContext()); accessBridge.focusLost(e, context); } } @@ -5538,7 +5526,6 @@ public final class AccessBridge { Accessible a = Translator.getAccessible(e.getSource()); if (a != null) { AccessibleContext context = a.getAccessibleContext(); - InvocationUtils.registerAccessibleContext(context, AppContext.getAppContext()); accessBridge.caretUpdate(e, context); } } @@ -5553,7 +5540,6 @@ public final class AccessBridge { Accessible a = Translator.getAccessible(e.getSource()); if (a != null) { AccessibleContext context = a.getAccessibleContext(); - InvocationUtils.registerAccessibleContext(context, AppContext.getAppContext()); accessBridge.mouseClicked(e, context); } } @@ -5564,7 +5550,6 @@ public final class AccessBridge { Accessible a = Translator.getAccessible(e.getSource()); if (a != null) { AccessibleContext context = a.getAccessibleContext(); - InvocationUtils.registerAccessibleContext(context, AppContext.getAppContext()); accessBridge.mouseEntered(e, context); } } @@ -5575,7 +5560,6 @@ public final class AccessBridge { Accessible a = Translator.getAccessible(e.getSource()); if (a != null) { AccessibleContext context = a.getAccessibleContext(); - InvocationUtils.registerAccessibleContext(context, AppContext.getAppContext()); accessBridge.mouseExited(e, context); } } @@ -5586,7 +5570,6 @@ public final class AccessBridge { Accessible a = Translator.getAccessible(e.getSource()); if (a != null) { AccessibleContext context = a.getAccessibleContext(); - InvocationUtils.registerAccessibleContext(context, AppContext.getAppContext()); accessBridge.mousePressed(e, context); } } @@ -5597,7 +5580,6 @@ public final class AccessBridge { Accessible a = Translator.getAccessible(e.getSource()); if (a != null) { AccessibleContext context = a.getAccessibleContext(); - InvocationUtils.registerAccessibleContext(context, AppContext.getAppContext()); accessBridge.mouseReleased(e, context); } } @@ -5611,7 +5593,6 @@ public final class AccessBridge { Accessible a = Translator.getAccessible(e.getSource()); if (a != null) { AccessibleContext context = a.getAccessibleContext(); - InvocationUtils.registerAccessibleContext(context, AppContext.getAppContext()); accessBridge.menuCanceled(e, context); } } @@ -5622,7 +5603,6 @@ public final class AccessBridge { Accessible a = Translator.getAccessible(e.getSource()); if (a != null) { AccessibleContext context = a.getAccessibleContext(); - InvocationUtils.registerAccessibleContext(context, AppContext.getAppContext()); accessBridge.menuDeselected(e, context); } } @@ -5633,7 +5613,6 @@ public final class AccessBridge { Accessible a = Translator.getAccessible(e.getSource()); if (a != null) { AccessibleContext context = a.getAccessibleContext(); - InvocationUtils.registerAccessibleContext(context, AppContext.getAppContext()); accessBridge.menuSelected(e, context); } } @@ -5644,7 +5623,6 @@ public final class AccessBridge { Accessible a = Translator.getAccessible(e.getSource()); if (a != null) { AccessibleContext context = a.getAccessibleContext(); - InvocationUtils.registerAccessibleContext(context, AppContext.getAppContext()); accessBridge.popupMenuCanceled(e, context); } } @@ -5655,7 +5633,6 @@ public final class AccessBridge { Accessible a = Translator.getAccessible(e.getSource()); if (a != null) { AccessibleContext context = a.getAccessibleContext(); - InvocationUtils.registerAccessibleContext(context, AppContext.getAppContext()); accessBridge.popupMenuWillBecomeInvisible(e, context); } } @@ -5666,7 +5643,6 @@ public final class AccessBridge { Accessible a = Translator.getAccessible(e.getSource()); if (a != null) { AccessibleContext context = a.getAccessibleContext(); - InvocationUtils.registerAccessibleContext(context, AppContext.getAppContext()); accessBridge.popupMenuWillBecomeVisible(e, context); } } @@ -7227,8 +7203,7 @@ public final class AccessBridge { private static class InvocationUtils { /** - * Invokes a {@code Callable} in the {@code AppContext} of the given {@code Accessible} - * and waits for it to finish blocking the caller thread. + * Invokes a {@code Callable} and waits for it to finish blocking the caller thread. * * @param callable the {@code Callable} to invoke * @param accessibleTable the {@code AccessibleExtendedTable} which would be used to find the right context @@ -7246,8 +7221,7 @@ public final class AccessBridge { } /** - * Invokes a {@code Callable} in the {@code AppContext} of the given {@code Accessible} - * and waits for it to finish blocking the caller thread. + * Invokes a {@code Callable} and waits for it to finish blocking the caller thread. * * @param callable the {@code Callable} to invoke * @param accessible the {@code Accessible} which would be used to find the right context @@ -7269,8 +7243,7 @@ public final class AccessBridge { } /** - * Invokes a {@code Callable} in the {@code AppContext} of the given {@code Component} - * and waits for it to finish blocking the caller thread. + * Invokes a {@code Callable} and waits for it to finish blocking the caller thread. * * @param callable the {@code Callable} to invoke * @param component the {@code Component} which would be used to find the right context @@ -7281,12 +7254,11 @@ public final class AccessBridge { */ public static T invokeAndWait(final Callable callable, final Component component) { - return invokeAndWait(callable, SunToolkit.targetToAppContext(component)); + return invokeAndWait(callable); } /** - * Invokes a {@code Callable} in the {@code AppContext} mapped to the given {@code AccessibleContext} - * and waits for it to finish blocking the caller thread. + * Invokes a {@code Callable} and waits for it to finish blocking the caller thread. * * @param callable the {@code Callable} to invoke * @param accessibleContext the {@code AccessibleContext} which would be used to determine the right @@ -7297,45 +7269,26 @@ public final class AccessBridge { */ public static T invokeAndWait(final Callable callable, final AccessibleContext accessibleContext) { - AppContext targetContext = AWTAccessor.getAccessibleContextAccessor() - .getAppContext(accessibleContext); - if (targetContext != null) { - return invokeAndWait(callable, targetContext); - } else { - // Normally this should not happen, unmapped context provided and - // the target AppContext is unknown. - - // Try to recover in case the context is a translator. - if (accessibleContext instanceof Translator) { - Object source = ((Translator)accessibleContext).getSource(); - if (source instanceof Component) { - return invokeAndWait(callable, (Component)source); - } - } - } - throw new RuntimeException("Unmapped AccessibleContext used to dispatch event: " + accessibleContext); + return invokeAndWait(callable); } - private static T invokeAndWait(final Callable callable, - final AppContext targetAppContext) { + private static T invokeAndWait(final Callable callable) { final CallableWrapper wrapper = new CallableWrapper(callable); try { - invokeAndWait(wrapper, targetAppContext); + invokeAndWait(wrapper); T result = wrapper.getResult(); - updateAppContextMap(result, targetAppContext); return result; } catch (final Exception e) { throw new RuntimeException(e); } } - private static void invokeAndWait(final Runnable runnable, - final AppContext appContext) + private static void invokeAndWait(final Runnable runnable) throws InterruptedException, InvocationTargetException { - EventQueue eq = SunToolkit.getSystemEventQueueImplPP(appContext); Object lock = new Object(); Toolkit source = Toolkit.getDefaultToolkit(); + EventQueue eq = source.getSystemEventQueue(); InvocationEvent event = new InvocationEvent(source, runnable, lock, true); synchronized (lock) { @@ -7349,26 +7302,6 @@ public final class AccessBridge { } } - /** - * Maps the {@code AccessibleContext} to the {@code AppContext} which should be used - * to dispatch events related to the {@code AccessibleContext} - * @param accessibleContext the {@code AccessibleContext} for the mapping - * @param targetContext the {@code AppContext} for the mapping - */ - public static void registerAccessibleContext(final AccessibleContext accessibleContext, - final AppContext targetContext) { - if (accessibleContext != null) { - AWTAccessor.getAccessibleContextAccessor().setAppContext(accessibleContext, targetContext); - } - } - - private static void updateAppContextMap(final T accessibleContext, - final AppContext targetContext) { - if (accessibleContext instanceof AccessibleContext) { - registerAccessibleContext((AccessibleContext)accessibleContext, targetContext); - } - } - private static class CallableWrapper implements Runnable { private final Callable callable; private volatile T object; diff --git a/src/jdk.compiler/share/classes/com/sun/source/util/DocSourcePositions.java b/src/jdk.compiler/share/classes/com/sun/source/util/DocSourcePositions.java index 520943c464d..4eb6d12fd38 100644 --- a/src/jdk.compiler/share/classes/com/sun/source/util/DocSourcePositions.java +++ b/src/jdk.compiler/share/classes/com/sun/source/util/DocSourcePositions.java @@ -28,6 +28,7 @@ package com.sun.source.util; import com.sun.source.doctree.DocCommentTree; import com.sun.source.doctree.DocTree; import com.sun.source.tree.CompilationUnitTree; +import com.sun.source.tree.Tree; /** * Provides methods to obtain the position of a DocTree within a javadoc comment. @@ -59,8 +60,32 @@ public interface DocSourcePositions extends SourcePositions { * position is being sought * @param tree tree for which a position is sought * @return the start position of tree + * @deprecated use {@link #getStartPosition(DocCommentTree, DocTree)} instead */ - long getStartPosition(CompilationUnitTree file, DocCommentTree comment, DocTree tree); + @Deprecated(since = "27", forRemoval = true) + default long getStartPosition(CompilationUnitTree file, DocCommentTree comment, DocTree tree) { + return getStartPosition(comment, tree); + } + + /** + * {@return the starting position of the given {@link Tree}. If the starting position is not available, returns + * {@link javax.tools.Diagnostic#NOPOS}} + * + *

    The given tree should be under the given comment tree. The returned position must be at the start of the + * yield of this tree, that is for any sub-tree of this tree, the following must hold: + * + *

    + * {@code getStartPosition(comment, tree) <= getStartPosition(comment, subtree)} or
    + * {@code getStartPosition(comment, tree) == NOPOS} or
    + * {@code getStartPosition(comment, subtree) == NOPOS} + *

    + * + * @param comment the comment tree that encloses the tree for which the + * position is being sought + * @param tree tree for which a position is sought + * @since 27 + */ + long getStartPosition(DocCommentTree comment, DocTree tree); /** * Returns the ending position of the tree within the comment within the file. If tree is not found within @@ -91,7 +116,39 @@ public interface DocSourcePositions extends SourcePositions { * position is being sought * @param tree tree for which a position is sought * @return the end position of tree + * @deprecated use {@link #getEndPosition(DocCommentTree, DocTree)} instead */ - long getEndPosition(CompilationUnitTree file, DocCommentTree comment, DocTree tree); + @Deprecated(since = "27", forRemoval = true) + default long getEndPosition(CompilationUnitTree file, DocCommentTree comment, DocTree tree) { + return getEndPosition(comment, tree); + } + + /** + * {@return the ending position of the given {@link Tree}. If the ending position is not available, returns + * {@link javax.tools.Diagnostic#NOPOS}} + * + *

    The given tree should be under the given comment tree. The returned position must be at the end of the yield + * of this tree, that is for any sub-tree of this tree, the following must hold: + * + *

    + * {@code getEndPosition(comment, tree) >= getEndPosition(comment, subtree)} or
    + * {@code getEndPosition(comment, tree) == NOPOS} or
    + * {@code getEndPosition(comment, subtree) == NOPOS} + *

    + * + * In addition, the following must hold: + * + *

    + * {@code getStartPosition(comment, tree) <= getEndPosition(comment, tree)} or
    + * {@code getStartPosition(comment, tree) == NOPOS} or
    + * {@code getEndPosition(comment, tree) == NOPOS} + *

    + * + * @param comment the comment tree that encloses the tree for which the + * position is being sought + * @param tree tree for which a position is sought + * @since 27 + */ + long getEndPosition(DocCommentTree comment, DocTree tree); } diff --git a/src/jdk.compiler/share/classes/com/sun/source/util/DocTrees.java b/src/jdk.compiler/share/classes/com/sun/source/util/DocTrees.java index 45a452bd0dd..44d9bd89917 100644 --- a/src/jdk.compiler/share/classes/com/sun/source/util/DocTrees.java +++ b/src/jdk.compiler/share/classes/com/sun/source/util/DocTrees.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -195,23 +195,30 @@ public abstract class DocTrees extends Trees { /** * Returns the language model element referred to by the leaf node of the given - * {@link DocTreePath}, or {@code null} if unknown. + * {@link DocTreePath}, or {@code null} if the leaf node of {@code path} does + * not refer to an element. + * * @param path the path for the tree node - * @return the element + * @return the referenced element, or null + * @see #getType(DocTreePath) */ public abstract Element getElement(DocTreePath path); /** * Returns the language model type referred to by the leaf node of the given - * {@link DocTreePath}, or {@code null} if unknown. This method usually - * returns the same value as {@code getElement(path).asType()} for a - * {@code path} argument for which {@link #getElement(DocTreePath)} returns - * a non-null value, but may return a type that includes additional - * information, such as a parameterized generic type instead of a raw type. + * {@link DocTreePath}, or {@code null} if the leaf node of {@code path} does + * not refer to a type. + * + *

    If {@link #getElement(DocTreePath)} returns a non-null value for a given {@code path} + * argument, this method usally returns the same value as {@code getElement(path).asType()}. + * However, there are cases where the returned type includes additional information, + * such as a parameterized generic type instead of a raw type. In other cases, such as with + * primitive or array types, the returned type may not have a corresponding element returned + * by {@code getElement(DocTreePath)}.

    * * @param path the path for the tree node * @return the referenced type, or null - * + * @see #getElement(DocTreePath) * @since 15 */ public abstract TypeMirror getType(DocTreePath path); diff --git a/src/jdk.compiler/share/classes/com/sun/source/util/SourcePositions.java b/src/jdk.compiler/share/classes/com/sun/source/util/SourcePositions.java index b6112fd32e6..3ff6fafe58b 100644 --- a/src/jdk.compiler/share/classes/com/sun/source/util/SourcePositions.java +++ b/src/jdk.compiler/share/classes/com/sun/source/util/SourcePositions.java @@ -53,8 +53,30 @@ public interface SourcePositions { * @param file CompilationUnit in which to find tree * @param tree tree for which a position is sought * @return the start position of tree + * @deprecated use {@link #getStartPosition(Tree)} instead */ - long getStartPosition(CompilationUnitTree file, Tree tree); + @Deprecated(since = "27", forRemoval = true) + default long getStartPosition(CompilationUnitTree file, Tree tree) { + return getStartPosition(tree); + } + + /** + * {@return the starting position of the given {@link Tree}, or if the starting position is not available, returns + * {@link javax.tools.Diagnostic#NOPOS}} + * + *

    The returned position must be at the start of the yield of this tree, that is for any sub-tree of this tree, + * the following must hold: + * + *

    + * {@code getStartPosition(tree) <= getStartPosition(subtree)} or
    + * {@code getStartPosition(tree) == NOPOS} or
    + * {@code getStartPosition(subtree) == NOPOS} + *

    + * + * @param tree tree for which a position is sought + * @since 27 + */ + long getStartPosition(Tree tree); /** * Returns the ending position of tree within file. If tree is not found within @@ -80,7 +102,36 @@ public interface SourcePositions { * @param file CompilationUnit in which to find tree * @param tree tree for which a position is sought * @return the end position of tree + * @deprecated use {@link #getEndPosition(Tree)} instead */ - long getEndPosition(CompilationUnitTree file, Tree tree); + @Deprecated(since = "27", forRemoval = true) + default long getEndPosition(CompilationUnitTree file, Tree tree) { + return getEndPosition(tree); + } + /** + * {@return the ending position of the given {@link Tree}. If the ending position is not available, + * returns {@link javax.tools.Diagnostic#NOPOS}} + * + *

    The returned position must be at the end of the yield of this tree, that is for any sub-tree of this tree, + * the following must hold: + * + *

    + * {@code getEndPosition(tree) >= getEndPosition(subtree)} or
    + * {@code getEndPosition(tree) == NOPOS} or
    + * {@code getEndPosition(subtree) == NOPOS} + *

    + * + * In addition, the following must hold: + * + *

    + * {@code getStartPosition(tree) <= getEndPosition(tree)} or
    + * {@code getStartPosition(tree) == NOPOS} or
    + * {@code getEndPosition(tree) == NOPOS} + *

    + * + * @param tree tree for which a position is sought + * @since 27 + */ + long getEndPosition(Tree tree); } diff --git a/src/jdk.compiler/share/classes/com/sun/tools/javac/api/JavacTrees.java b/src/jdk.compiler/share/classes/com/sun/tools/javac/api/JavacTrees.java index 6bc5d358b6f..41dd904bc8a 100644 --- a/src/jdk.compiler/share/classes/com/sun/tools/javac/api/JavacTrees.java +++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/api/JavacTrees.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -233,24 +233,24 @@ public class JavacTrees extends DocTrees { public DocSourcePositions getSourcePositions() { return new DocSourcePositions() { @Override @DefinedBy(Api.COMPILER_TREE) - public long getStartPosition(CompilationUnitTree file, Tree tree) { + public long getStartPosition(Tree tree) { return TreeInfo.getStartPos((JCTree) tree); } @Override @DefinedBy(Api.COMPILER_TREE) - public long getEndPosition(CompilationUnitTree file, Tree tree) { + public long getEndPosition(Tree tree) { return TreeInfo.getEndPos((JCTree) tree); } @Override @DefinedBy(Api.COMPILER_TREE) - public long getStartPosition(CompilationUnitTree file, DocCommentTree comment, DocTree tree) { + public long getStartPosition(DocCommentTree comment, DocTree tree) { DCDocComment dcComment = (DCDocComment) comment; DCTree dcTree = (DCTree) tree; return dcComment.getSourcePosition(dcTree.getStartPosition()); } @Override @DefinedBy(Api.COMPILER_TREE) - public long getEndPosition(CompilationUnitTree file, DocCommentTree comment, DocTree tree) { + public long getEndPosition(DocCommentTree comment, DocTree tree) { DCDocComment dcComment = (DCDocComment) comment; DCTree dcTree = (DCTree) tree; return dcComment.getSourcePosition(dcTree.getEndPosition()); @@ -354,22 +354,28 @@ public class JavacTrees extends DocTrees { DocTree tree = path.getLeaf(); if (tree instanceof DCReference dcReference) { JCTree qexpr = dcReference.qualifierExpression; - if (qexpr != null) { + + // Forward references with explicit module name to getElement + if (qexpr != null && dcReference.moduleName == null) { + + Env env = getAttrContext(path.getTreePath()); Log.DeferredDiagnosticHandler deferredDiagnosticHandler = log.new DeferredDiagnosticHandler(); + JavaFileObject prevSource = log.useSource(env.toplevel.sourcefile); + try { - Env env = getAttrContext(path.getTreePath()); - JavaFileObject prevSource = log.useSource(env.toplevel.sourcefile); - try { - Type t = attr.attribType(dcReference.qualifierExpression, env); - if (t != null && !t.isErroneous()) { + Type t = attr.attribType(dcReference.qualifierExpression, env); + if (t != null && !t.isErroneous()) { + if (dcReference.memberName != null) { + Symbol sym = resolveMember(t, (Name) dcReference.memberName, dcReference, env); + return sym == null ? null : sym.type; + } else { return t; } - } finally { - log.useSource(prevSource); } } catch (Abort e) { // may be thrown by Check.completionError in case of bad class file return null; } finally { + log.useSource(prevSource); log.popDiagnosticHandler(deferredDiagnosticHandler); } } @@ -426,14 +432,12 @@ public class JavacTrees extends DocTrees { memberName = (Name) ref.memberName; } else { // Check if qualifierExpression is a type or package, using the methods javac provides. - // If no module name is given we check if qualifierExpression identifies a type. - // If that fails or we have a module name, use that to resolve qualifierExpression to - // a package or type. - Type t = ref.moduleName == null ? attr.attribType(ref.qualifierExpression, env) : null; + Type t = attr.attribType(ref.qualifierExpression, env); - if (t == null || t.isErroneous()) { - JCCompilationUnit toplevel = - treeMaker.TopLevel(List.nil()); + if (t == null || t.isErroneous() || + (ref.moduleName != null && !mdlsym.equals(elements.getModuleOf(t.asElement())))) { + + JCCompilationUnit toplevel = treeMaker.TopLevel(List.nil()); toplevel.modle = mdlsym; toplevel.packge = mdlsym.unnamedPackage; Symbol sym = attr.attribIdent(ref.qualifierExpression, toplevel); @@ -447,10 +451,6 @@ public class JavacTrees extends DocTrees { if ((sym.kind == PCK || sym.kind == TYP) && sym.exists()) { tsym = (TypeSymbol) sym; memberName = (Name) ref.memberName; - if (sym.kind == PCK && memberName != null) { - //cannot refer to a package "member" - return null; - } } else { if (modules.modulesInitialized() && ref.moduleName == null && ref.memberName == null) { // package/type does not exist, check if there is a matching module @@ -470,64 +470,22 @@ public class JavacTrees extends DocTrees { } } } else { - Type e = t; - // If this is an array type convert to element type - while (e instanceof ArrayType arrayType) - e = arrayType.elemtype; - tsym = e.tsym; + tsym = switch (t.getKind()) { + case DECLARED, TYPEVAR, PACKAGE, MODULE -> t.tsym; + default -> null; + }; memberName = (Name) ref.memberName; } } if (memberName == null) { return tsym; - } else if (tsym == null || tsym.getKind() == ElementKind.PACKAGE || tsym.getKind() == ElementKind.MODULE) { - return null; // Non-null member name in non-class context - } - - if (tsym.type.isPrimitive()) { + } else if (tsym == null) { return null; } - final List paramTypes; - if (ref.paramTypes == null) - paramTypes = null; - else { - ListBuffer lb = new ListBuffer<>(); - for (List l = (List) ref.paramTypes; l.nonEmpty(); l = l.tail) { - JCTree tree = l.head; - Type t = attr.attribType(tree, env); - lb.add(t); - } - paramTypes = lb.toList(); - } + return resolveMember(tsym.type, memberName, ref, env); - ClassSymbol sym = (ClassSymbol) types.skipTypeVars(tsym.type, false).tsym; - boolean explicitType = ref.qualifierExpression != null; - Symbol msym = (memberName == sym.name) - ? findConstructor(sym, paramTypes, true) - : findMethod(sym, memberName, paramTypes, true, explicitType); - - if (msym == null) { - msym = (memberName == sym.name) - ? findConstructor(sym, paramTypes, false) - : findMethod(sym, memberName, paramTypes, false, explicitType); - } - - if (paramTypes != null) { - // explicit (possibly empty) arg list given, so cannot be a field - return msym; - } - - VarSymbol vsym = (ref.paramTypes != null) ? null : findField(sym, memberName, explicitType); - // prefer a field over a method with no parameters - if (vsym != null && - (msym == null || - types.isSubtypeUnchecked(vsym.enclClass().asType(), msym.enclClass().asType()))) { - return vsym; - } else { - return msym; - } } catch (Abort e) { // may be thrown by Check.completionError in case of bad class file return null; } finally { @@ -536,6 +494,54 @@ public class JavacTrees extends DocTrees { } } + private Symbol resolveMember(Type type, Name memberName, DCReference ref, Env env) { + + if (type.isPrimitive() || type.getKind() == TypeKind.PACKAGE || type.getKind() == TypeKind.MODULE) { + return null; + } + + final List paramTypes; + if (ref.paramTypes == null) + paramTypes = null; + else { + ListBuffer lb = new ListBuffer<>(); + for (List l = (List) ref.paramTypes; l.nonEmpty(); l = l.tail) { + JCTree tree = l.head; + Type t = attr.attribType(tree, env); + lb.add(t); + } + paramTypes = lb.toList(); + } + + // skipTypeVars conversion below is needed if type is itself a type variable + ClassSymbol sym = (ClassSymbol) types.skipTypeVars(type, false).tsym; + boolean explicitType = ref.qualifierExpression != null; + Symbol msym = (memberName == sym.name) + ? findConstructor(sym, paramTypes, true) + : findMethod(sym, memberName, paramTypes, true, explicitType); + + if (msym == null) { + msym = (memberName == sym.name) + ? findConstructor(sym, paramTypes, false) + : findMethod(sym, memberName, paramTypes, false, explicitType); + } + + if (paramTypes != null) { + // explicit (possibly empty) arg list given, so cannot be a field + return msym; + } + + VarSymbol vsym = (ref.paramTypes != null) ? null : findField(sym, memberName, explicitType); + // prefer a field over a method with no parameters + if (vsym != null && + (msym == null || + types.isSubtypeUnchecked(vsym.enclClass().asType(), msym.enclClass().asType()))) { + return vsym; + } else { + return msym; + } + } + private Symbol attributeParamIdentifier(TreePath path, DCParam paramTag) { Symbol javadocSymbol = getElement(path); if (javadocSymbol == null) diff --git a/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Preview.java b/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Preview.java index 1c93c37698a..7a8e6c6f4f1 100644 --- a/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Preview.java +++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Preview.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,6 +34,7 @@ import com.sun.tools.javac.resources.CompilerProperties.LintWarnings; import com.sun.tools.javac.resources.CompilerProperties.Warnings; import com.sun.tools.javac.util.Assert; import com.sun.tools.javac.util.Context; +import com.sun.tools.javac.util.JCDiagnostic.DiagnosticFlag; import com.sun.tools.javac.util.JCDiagnostic.DiagnosticPosition; import com.sun.tools.javac.util.JCDiagnostic.Error; import com.sun.tools.javac.util.JCDiagnostic.LintWarning; @@ -150,24 +151,26 @@ public class Preview { /** * Report usage of a preview feature. Usages reported through this method will affect the * set of sourcefiles with dependencies on preview features. + * @param flag a flag to set on the diagnostic * @param pos the position at which the preview feature was used. * @param feature the preview feature used. */ - public void warnPreview(int pos, Feature feature) { - warnPreview(new SimpleDiagnosticPosition(pos), feature); + public void warnPreview(DiagnosticFlag flag, int pos, Feature feature) { + warnPreview(flag, new SimpleDiagnosticPosition(pos), feature); } /** * Report usage of a preview feature. Usages reported through this method will affect the * set of sourcefiles with dependencies on preview features. + * @param flag a flag to set on the diagnostic * @param pos the position at which the preview feature was used. * @param feature the preview feature used. */ - public void warnPreview(DiagnosticPosition pos, Feature feature) { + public void warnPreview(DiagnosticFlag flag, DiagnosticPosition pos, Feature feature) { Assert.check(isEnabled()); Assert.check(isPreview(feature)); markUsesPreview(pos); - log.warning(pos, + log.warning(flag, pos, feature.isPlural() ? LintWarnings.PreviewFeatureUsePlural(feature.nameFragment()) : LintWarnings.PreviewFeatureUse(feature.nameFragment())); @@ -263,7 +266,7 @@ public class Preview { log.error(pos, feature.error(source.name)); } if (isEnabled() && isPreview(feature)) { - warnPreview(pos, feature); + warnPreview(null, pos, feature); } } } diff --git a/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Printer.java b/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Printer.java index d9781f19c5d..acd8a35a894 100644 --- a/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Printer.java +++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Printer.java @@ -53,6 +53,7 @@ public abstract class Printer implements Type.Visitor, Symbol.Vi List seenCaptured = List.nil(); static final int PRIME = 997; // largest prime less than 1000 + private boolean printingMethodArgs; protected Printer() { } @@ -195,6 +196,9 @@ public abstract class Printer implements Type.Visitor, Symbol.Vi } private String printAnnotations(Type t, boolean prefix) { + if (printingMethodArgs) { + return ""; + } StringBuilder sb = new StringBuilder(); List annos = t.getAnnotationMirrors(); if (!annos.isEmpty()) { @@ -337,27 +341,28 @@ public abstract class Printer implements Type.Visitor, Symbol.Vi * @return localized string representation */ protected String printMethodArgs(List args, boolean varArgs, Locale locale) { - if (!varArgs) { - return visitTypes(args, locale); - } else { - StringBuilder buf = new StringBuilder(); - while (args.tail.nonEmpty()) { - buf.append(visit(args.head, locale)); - args = args.tail; - buf.append(','); - } - if (args.head.hasTag(TypeTag.ARRAY)) { - buf.append(visit(((ArrayType) args.head).elemtype, locale)); - if (args.head.getAnnotationMirrors().nonEmpty()) { - buf.append(' '); - buf.append(args.head.getAnnotationMirrors()); - buf.append(' '); - } - buf.append("..."); + boolean prev = printingMethodArgs; + printingMethodArgs = true; + try { + if (!varArgs) { + return visitTypes(args, locale); } else { - buf.append(visit(args.head, locale)); + StringBuilder buf = new StringBuilder(); + while (args.tail.nonEmpty()) { + buf.append(visit(args.head, locale)); + args = args.tail; + buf.append(','); + } + if (args.head.hasTag(TypeTag.ARRAY)) { + buf.append(visit(((ArrayType) args.head).elemtype, locale)); + buf.append("..."); + } else { + buf.append(visit(args.head, locale)); + } + return buf.toString(); } - return buf.toString(); + } finally { + printingMethodArgs = prev; } } diff --git a/src/jdk.compiler/share/classes/com/sun/tools/javac/code/TypeAnnotations.java b/src/jdk.compiler/share/classes/com/sun/tools/javac/code/TypeAnnotations.java index 86319f20c73..452d15ed219 100644 --- a/src/jdk.compiler/share/classes/com/sun/tools/javac/code/TypeAnnotations.java +++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/code/TypeAnnotations.java @@ -50,6 +50,7 @@ import com.sun.tools.javac.code.TypeAnnotationPosition.TypePathEntryKind; import com.sun.tools.javac.code.Symbol.VarSymbol; import com.sun.tools.javac.code.Symbol.MethodSymbol; import com.sun.tools.javac.code.Type.ModuleType; +import com.sun.tools.javac.code.Type.UnionClassType; import com.sun.tools.javac.comp.Annotate; import com.sun.tools.javac.comp.Attr; import com.sun.tools.javac.comp.AttrContext; @@ -61,6 +62,7 @@ import com.sun.tools.javac.tree.JCTree.JCAnnotatedType; import com.sun.tools.javac.tree.JCTree.JCAnnotation; import com.sun.tools.javac.tree.JCTree.JCArrayTypeTree; import com.sun.tools.javac.tree.JCTree.JCBlock; +import com.sun.tools.javac.tree.JCTree.JCCatch; import com.sun.tools.javac.tree.JCTree.JCClassDecl; import com.sun.tools.javac.tree.JCTree.JCExpression; import com.sun.tools.javac.tree.JCTree.JCFieldAccess; @@ -70,6 +72,7 @@ import com.sun.tools.javac.tree.JCTree.JCMethodDecl; import com.sun.tools.javac.tree.JCTree.JCMethodInvocation; import com.sun.tools.javac.tree.JCTree.JCNewArray; import com.sun.tools.javac.tree.JCTree.JCNewClass; +import com.sun.tools.javac.tree.JCTree.JCTry; import com.sun.tools.javac.tree.JCTree.JCTypeApply; import com.sun.tools.javac.tree.JCTree.JCTypeIntersection; import com.sun.tools.javac.tree.JCTree.JCTypeParameter; @@ -111,6 +114,7 @@ public class TypeAnnotations { final Symtab syms; final Annotate annotate; final Attr attr; + final Types types; @SuppressWarnings("this-escape") protected TypeAnnotations(Context context) { @@ -120,6 +124,7 @@ public class TypeAnnotations { syms = Symtab.instance(context); annotate = Annotate.instance(context); attr = Attr.instance(context); + types = Types.instance(context); } /** @@ -132,7 +137,32 @@ public class TypeAnnotations { annotate.afterTypes(() -> { JavaFileObject oldSource = log.useSource(env.toplevel.sourcefile); try { - new TypeAnnotationPositions(true).scan(tree); + new TypeAnnotationPositions(null, true).scan(tree); + } finally { + log.useSource(oldSource); + } + }); + } + + public void organizeTypeAnnotationsSignaturesForLocalVarType(final Env env, final JCVariableDecl tree) { + annotate.afterTypes(() -> { + JavaFileObject oldSource = log.useSource(env.toplevel.sourcefile); + try { + TypeAnnotationPositions pos = new TypeAnnotationPositions(env.tree, true); + if (env.tree instanceof JCLambda) { + pos.push(env.tree); + } else { + pos.push(env.enclMethod); + } + Env env1 = env; + while (env1 != null && !env1.tree.hasTag(Tag.CLASSDEF)) { + if (env1.tree instanceof JCLambda l) { + pos.currentLambda = l; + break; + } + env1 = env1.next; + } + pos.scan(tree); } finally { log.useSource(oldSource); } @@ -155,7 +185,7 @@ public class TypeAnnotations { * top-level blocks, and method bodies, and should be called from Attr. */ public void organizeTypeAnnotationsBodies(JCClassDecl tree) { - new TypeAnnotationPositions(false).scan(tree); + new TypeAnnotationPositions(null, false).scan(tree); } public enum AnnotationType { DECLARATION, TYPE, NONE, BOTH } @@ -265,9 +295,11 @@ public class TypeAnnotations { private class TypeAnnotationPositions extends TreeScanner { + private final JCTree contextTree; private final boolean sigOnly; - TypeAnnotationPositions(boolean sigOnly) { + TypeAnnotationPositions(JCTree contextTree, boolean sigOnly) { + this.contextTree = contextTree; this.sigOnly = sigOnly; } @@ -448,21 +480,23 @@ public class TypeAnnotations { Assert.check(tc.position == pos); } - if (type.hasTag(TypeTag.ARRAY)) - return rewriteArrayType(typetree, (ArrayType)type, annotations, onlyTypeAnnotations, pos); + Type ret; - if (type.hasTag(TypeTag.TYPEVAR)) { - return type.annotatedType(onlyTypeAnnotations); + if (type.hasTag(TypeTag.ARRAY)) { + ret = rewriteArrayType(typetree, (ArrayType)type, annotations, onlyTypeAnnotations, pos); + } else if (type.hasTag(TypeTag.TYPEVAR)) { + ret = type.annotatedType(onlyTypeAnnotations); } else if (type.getKind() == TypeKind.UNION) { // There is a TypeKind, but no TypeTag. + UnionClassType ut = (UnionClassType) type; JCTypeUnion tutree = (JCTypeUnion)typetree; JCExpression fst = tutree.alternatives.get(0); Type res = typeWithAnnotations(fst, fst.type, annotations, onlyTypeAnnotations, pos); fst.type = res; - // TODO: do we want to set res as first element in uct.alternatives? - // UnionClassType uct = (com.sun.tools.javac.code.Type.UnionClassType)type; - // Return the un-annotated union-type. - return type; + ListBuffer alternatives = new ListBuffer<>(); + alternatives.add(res); + alternatives.addAll(ut.alternatives_field.tail); + ret = new UnionClassType((ClassType) ut.getLub(), alternatives.toList()); } else { Type enclTy = type; Element enclEl = type.asElement(); @@ -542,10 +576,10 @@ public class TypeAnnotations { pos.location = pos.location.appendList(depth.toList()); } - Type ret = typeWithAnnotations(type, enclTy, annotations); - typetree.type = ret; - return ret; + ret = typeWithAnnotations(type, enclTy, annotations); } + typetree.type = ret; + return ret; } /** @@ -1237,7 +1271,17 @@ public class TypeAnnotations { } else if (tree.sym == null) { Assert.error("Visiting tree node before memberEnter"); } else if (tree.sym.getKind() == ElementKind.PARAMETER) { - // Parameters are handled in visitMethodDef or visitLambda. + if (sigOnly) { + if (contextTree instanceof JCCatch c && c.param == tree) { + //exception "parameter": + final TypeAnnotationPosition pos = + TypeAnnotationPosition.exceptionParameter(currentLambda, + tree.pos); + separateAnnotationsKinds(tree, tree.vartype, tree.sym.type, tree.sym, pos); + } else { + // (real) parameters are handled in visitMethodDef or visitLambda. + } + } } else if (tree.sym.getKind() == ElementKind.FIELD) { if (sigOnly) { TypeAnnotationPosition pos = @@ -1245,27 +1289,36 @@ public class TypeAnnotations { separateAnnotationsKinds(tree, tree.vartype, tree.sym.type, tree.sym, pos); } } else if (tree.sym.getKind() == ElementKind.LOCAL_VARIABLE) { - final TypeAnnotationPosition pos = - TypeAnnotationPosition.localVariable(currentLambda, - tree.pos); - if (!tree.declaredUsingVar()) { - separateAnnotationsKinds(tree, tree.vartype, tree.sym.type, tree.sym, pos); + if (sigOnly && !tree.declaredUsingVar()) { + if (contextTree instanceof JCTry t && t.resources.contains(tree)) { + final TypeAnnotationPosition pos = + TypeAnnotationPosition.resourceVariable(currentLambda, + tree.pos); + separateAnnotationsKinds(tree, tree.vartype, tree.sym.type, tree.sym, pos); + } else { + final TypeAnnotationPosition pos = + TypeAnnotationPosition.localVariable(currentLambda, + tree.pos); + if (!tree.declaredUsingVar()) { + separateAnnotationsKinds(tree, tree.vartype, tree.sym.type, tree.sym, pos); + } + } } } else if (tree.sym.getKind() == ElementKind.BINDING_VARIABLE) { - final TypeAnnotationPosition pos = - TypeAnnotationPosition.localVariable(currentLambda, - tree.pos); - separateAnnotationsKinds(tree, tree.vartype, tree.sym.type, tree.sym, pos); + if (sigOnly) { + final TypeAnnotationPosition pos = + TypeAnnotationPosition.localVariable(currentLambda, + tree.pos); + separateAnnotationsKinds(tree, tree.vartype, tree.sym.type, tree.sym, pos); + } } else if (tree.sym.getKind() == ElementKind.EXCEPTION_PARAMETER) { - final TypeAnnotationPosition pos = - TypeAnnotationPosition.exceptionParameter(currentLambda, - tree.pos); - separateAnnotationsKinds(tree, tree.vartype, tree.sym.type, tree.sym, pos); + if (sigOnly) { + Assert.error("Should not get variable kind: " + tree.sym.getKind()); + } } else if (tree.sym.getKind() == ElementKind.RESOURCE_VARIABLE) { - final TypeAnnotationPosition pos = - TypeAnnotationPosition.resourceVariable(currentLambda, - tree.pos); - separateAnnotationsKinds(tree, tree.vartype, tree.sym.type, tree.sym, pos); + if (sigOnly) { + Assert.error("Should not get variable kind: " + tree.sym.getKind()); + } } else if (tree.sym.getKind() == ElementKind.ENUM_CONSTANT) { // No type annotations can occur here. } else { diff --git a/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Types.java b/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Types.java index 9ffab9fd961..f1c6676d087 100644 --- a/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Types.java +++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Types.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -993,10 +993,12 @@ public class Types { @Override public boolean test(Symbol sym) { + List msyms; return sym.kind == MTH && (sym.flags() & (ABSTRACT | DEFAULT)) == ABSTRACT && !overridesObjectMethod(origin, sym) && - (interfaceCandidates(origin.type, (MethodSymbol)sym).head.flags() & DEFAULT) == 0; + (msyms = interfaceCandidates(origin.type, (MethodSymbol)sym)).nonEmpty() && + (msyms.head.flags() & DEFAULT) == 0; } } diff --git a/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Annotate.java b/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Annotate.java index f865afe11fb..118d761573b 100644 --- a/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Annotate.java +++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Annotate.java @@ -861,7 +861,6 @@ public class Annotate { if (!chk.validateAnnotationDeferErrors(annoTree)) log.error(annoTree.pos(), Errors.DuplicateAnnotationInvalidRepeated(origAnnoType)); - c = attributeAnnotation(annoTree, targetContainerType, ctx.env); c.setSynthesized(true); @SuppressWarnings("unchecked") diff --git a/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Attr.java b/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Attr.java index 83b684e1225..89ae68e85ba 100644 --- a/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Attr.java +++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Attr.java @@ -1281,6 +1281,7 @@ public class Attr extends JCTree.Visitor { try { annotate.blockAnnotations(); memberEnter.memberEnter(tree, env); + typeAnnotations.organizeTypeAnnotationsSignaturesForLocalVarType(env, tree); } finally { annotate.unblockAnnotations(); } @@ -4226,7 +4227,6 @@ public class Attr extends JCTree.Visitor { } else { type = resultInfo.pt; } - tree.type = tree.var.type = type; BindingSymbol v = new BindingSymbol(tree.var.mods.flags | tree.var.declKind.additionalSymbolFlags, tree.var.name, type, env.info.scope.owner); v.pos = tree.pos; @@ -4244,7 +4244,8 @@ public class Attr extends JCTree.Visitor { annotate.queueScanTreeAndTypeAnnotate(tree.var.vartype, env, v); } annotate.flush(); - result = tree.type; + typeAnnotations.organizeTypeAnnotationsSignaturesForLocalVarType(env, tree.var); + result = tree.type = tree.var.type = v.type; if (v.isUnnamedVariable()) { matchBindings = MatchBindingsComputer.EMPTY; } else { @@ -5269,11 +5270,15 @@ public class Attr extends JCTree.Visitor { public void visitAnnotatedType(JCAnnotatedType tree) { attribAnnotationTypes(tree.annotations, env); - Type underlyingType = attribType(tree.underlyingType, env); - Type annotatedType = underlyingType.preannotatedType(); + Type underlyingType = attribTree(tree.underlyingType, env, resultInfo); + if (underlyingType.getTag() == PACKAGE) { + result = tree.type = underlyingType; + } else { + Type annotatedType = underlyingType.preannotatedType(); - annotate.annotateTypeSecondStage(tree, tree.annotations, annotatedType); - result = tree.type = annotatedType; + annotate.annotateTypeSecondStage(tree, tree.annotations, annotatedType); + result = tree.type = annotatedType; + } } public void visitErroneous(JCErroneous tree) { diff --git a/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/TransTypes.java b/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/TransTypes.java index 862c02ea5f0..1229939c0bf 100644 --- a/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/TransTypes.java +++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/TransTypes.java @@ -29,7 +29,6 @@ package com.sun.tools.javac.comp; import com.sun.source.tree.MemberReferenceTree.ReferenceMode; import com.sun.tools.javac.code.*; import com.sun.tools.javac.code.Attribute.TypeCompound; -import com.sun.tools.javac.code.Source.Feature; import com.sun.tools.javac.code.Symbol.*; import com.sun.tools.javac.code.Type.TypeVar; import com.sun.tools.javac.jvm.Target; @@ -49,8 +48,6 @@ import static com.sun.tools.javac.code.TypeTag.VOID; import static com.sun.tools.javac.comp.CompileStates.CompileState; import com.sun.tools.javac.tree.JCTree.JCBreak; -import javax.lang.model.type.TypeKind; - /** This pass translates Generic Java to conventional Java. * *

    This is NOT part of any supported API. @@ -109,7 +106,9 @@ public class TransTypes extends TreeTranslator { if (!types.isSameType(tree.type, target)) { if (!resolve.isAccessible(env, target.tsym)) resolve.logAccessErrorInternal(env, tree, target); - tree = make.TypeCast(make.Type(target), tree).setType(target); + tree = explicitCastTP != null && types.isSameType(target, explicitCastTP) ? + tree : + make.TypeCast(make.Type(target), tree).setType(target); } make.pos = oldpos; return tree; @@ -440,16 +439,29 @@ public class TransTypes extends TreeTranslator { /** Visitor argument: proto-type. */ private Type pt; + /** we use this type to indicate that "upstream" there is an explicit cast to this type, + * this way we can avoid generating redundant type casts. Redundant casts are not + * innocuous as they can trump user provided ones and affect the offset + * calculation of type annotations applied to the user provided type cast. + */ + private Type explicitCastTP; /** Visitor method: perform a type translation on tree. */ public T translate(T tree, Type pt) { + return translate(tree, pt, pt == explicitCastTP ? explicitCastTP : null); + } + + public T translate(T tree, Type pt, Type castTP) { Type prevPt = this.pt; + Type prevCastPT = this.explicitCastTP; try { this.pt = pt; + this.explicitCastTP = castTP; return translate(tree); } finally { this.pt = prevPt; + this.explicitCastTP = prevCastPT; } } @@ -1037,7 +1049,9 @@ public class TransTypes extends TreeTranslator { tree.clazz = translate(tree.clazz, null); Type originalTarget = tree.type; tree.type = erasure(tree.type); - JCExpression newExpression = translate(tree.expr, tree.type); + JCExpression newExpression = tree.clazz.hasTag(Tag.ANNOTATED_TYPE) ? + translate(tree.expr, tree.type, tree.type) : + translate(tree.expr, tree.type); if (newExpression != tree.expr) { JCTypeCast typeCast = newExpression.hasTag(Tag.TYPECAST) ? (JCTypeCast) newExpression diff --git a/src/jdk.compiler/share/classes/com/sun/tools/javac/jvm/ClassReader.java b/src/jdk.compiler/share/classes/com/sun/tools/javac/jvm/ClassReader.java index b7bf48b4a12..08ba0442781 100644 --- a/src/jdk.compiler/share/classes/com/sun/tools/javac/jvm/ClassReader.java +++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/jvm/ClassReader.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -69,6 +69,7 @@ import com.sun.tools.javac.util.*; import com.sun.tools.javac.util.ByteBuffer.UnderflowException; import com.sun.tools.javac.util.DefinedBy.Api; import com.sun.tools.javac.util.JCDiagnostic.Fragment; +import com.sun.tools.javac.util.Log.DeferredDiagnosticHandler; import static com.sun.tools.javac.code.Flags.*; import static com.sun.tools.javac.code.Kinds.Kind.*; @@ -1366,9 +1367,7 @@ public class ClassReader { else self.fullname = ClassSymbol.formFullName(self.name, self.owner); - if (m != null) { - ((ClassType)sym.type).setEnclosingType(m.type); - } else if ((self.flags_field & STATIC) == 0) { + if ((self.flags_field & STATIC) == 0 && (m == null || (m.flags_field & STATIC) == 0)) { ((ClassType)sym.type).setEnclosingType(c.type); } else { ((ClassType)sym.type).setEnclosingType(Type.noType); @@ -2043,15 +2042,27 @@ public class ClassReader { } Attribute.Compound deproxyCompound(CompoundAnnotationProxy a) { - Type annotationType = resolvePossibleProxyType(a.type); - ListBuffer> buf = new ListBuffer<>(); - for (List> l = a.values; - l.nonEmpty(); - l = l.tail) { - MethodSymbol meth = findAccessMethod(annotationType, l.head.fst); - buf.append(new Pair<>(meth, deproxy(meth.type.getReturnType(), l.head.snd))); + DeferredDiagnosticHandler deferred = log.new DeferredDiagnosticHandler(); + Type annotationType = syms.objectType; + try { + annotationType = resolvePossibleProxyType(a.type); + ListBuffer> buf = new ListBuffer<>(); + for (List> l = a.values; + l.nonEmpty(); + l = l.tail) { + MethodSymbol meth = findAccessMethod(annotationType, l.head.fst); + buf.append(new Pair<>(meth, deproxy(meth.type.getReturnType(), l.head.snd))); + } + return new Attribute.Compound(annotationType, buf.toList()); + } finally { + if (!annotationType.tsym.type.hasTag(TypeTag.ERROR)) { + //if the annotation type does not exists + //throw away warnings reported while de-proxying the annotation, + //as the annotation's library is probably missing from the classpath: + deferred.reportDeferredDiagnostics(); + } + log.popDiagnosticHandler(deferred); } - return new Attribute.Compound(annotationType, buf.toList()); } MethodSymbol findAccessMethod(Type container, Name name) { @@ -2146,15 +2157,21 @@ public class ClassReader { failure = ex; } if (enumerator == null) { - if (failure != null) { - log.warning(Warnings.UnknownEnumConstantReason(currentClassFile, - enumTypeSym, - proxy.enumerator, - failure.getDiagnostic())); - } else { - log.warning(Warnings.UnknownEnumConstant(currentClassFile, - enumTypeSym, - proxy.enumerator)); + // The enumerator wasn't found: emit a warning and recover + JavaFileObject prevSource = log.useSource(requestingOwner.classfile); + try { + if (failure != null) { + log.warning(LintWarnings.UnknownEnumConstantReason(currentClassFile, + enumTypeSym, + proxy.enumerator, + failure.getDiagnostic())); + } else { + log.warning(LintWarnings.UnknownEnumConstant(currentClassFile, + enumTypeSym, + proxy.enumerator)); + } + } finally { + log.useSource(prevSource); } result = new Attribute.Enum(enumTypeSym.type, new VarSymbol(0, proxy.enumerator, syms.botType, enumTypeSym)); @@ -2668,6 +2685,7 @@ public class ClassReader { // won't pass the "hasOuterInstance" check above, but those that don't have an // enclosing method (i.e. from initializers) will pass that check. boolean local = forceLocal = + currentOwner.owner.kind != TYP || !currentOwner.owner.members().includes(currentOwner, LookupKind.NON_RECURSIVE); if (!currentOwner.name.isEmpty() && !local) type = new MethodType(adjustMethodParams(flags, type.getParameterTypes()), @@ -3019,7 +3037,9 @@ public class ClassReader { * `typevars'. */ protected void enterTypevars(Symbol sym, Type t) { - if (t.getEnclosingType() != null) { + if (sym.owner.kind == MTH) { + enterTypevars(sym.owner, sym.owner.type); + } else if (t.getEnclosingType() != null) { if (!t.getEnclosingType().hasTag(TypeTag.NONE)) { enterTypevars(sym.owner, t.getEnclosingType()); } diff --git a/src/jdk.compiler/share/classes/com/sun/tools/javac/main/JavaCompiler.java b/src/jdk.compiler/share/classes/com/sun/tools/javac/main/JavaCompiler.java index 94292d9a348..269d2f5de62 100644 --- a/src/jdk.compiler/share/classes/com/sun/tools/javac/main/JavaCompiler.java +++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/main/JavaCompiler.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -823,9 +823,9 @@ public class JavaCompiler { c, () -> diagFactory.fragment(Fragments.UserSelectedCompletionFailure), dcfh); } JavaFileObject filename = c.classfile; - JavaFileObject prev = log.useSource(filename); if (tree == null) { + JavaFileObject prev = log.useSource(filename); try { tree = parse(filename, filename.getCharContent(false)); } catch (IOException e) { diff --git a/src/jdk.compiler/share/classes/com/sun/tools/javac/parser/JavaTokenizer.java b/src/jdk.compiler/share/classes/com/sun/tools/javac/parser/JavaTokenizer.java index babe372e7dc..d8b5b1ddd6b 100644 --- a/src/jdk.compiler/share/classes/com/sun/tools/javac/parser/JavaTokenizer.java +++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/parser/JavaTokenizer.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -176,7 +176,7 @@ public class JavaTokenizer extends UnicodeReader { lexError(pos, feature.error(source.name)); } else if (preview.isPreview(feature)) { //use of preview feature, warn - preview.warnPreview(pos, feature); + preview.warnPreview(DiagnosticFlag.SYNTAX, pos, feature); } } @@ -1000,7 +1000,7 @@ public class JavaTokenizer extends UnicodeReader { scanIdent(); } else if (digit(pos, 10) >= 0) { scanNumber(pos, 10); - } else if (is((char)EOI) || !isAvailable()) { + } else if (is((char)EOI) && position() + 1 == length() || !isAvailable()) { tk = TokenKind.EOF; pos = position(); } else { @@ -1040,10 +1040,10 @@ public class JavaTokenizer extends UnicodeReader { // Verify that the incidental indentation is consistent. Set checks = TextBlockSupport.checkWhitespace(string); if (checks.contains(TextBlockSupport.WhitespaceChecks.INCONSISTENT)) { - log.warning(pos, LintWarnings.InconsistentWhiteSpaceIndentation); + log.warning(DiagnosticFlag.SYNTAX, pos, LintWarnings.InconsistentWhiteSpaceIndentation); } if (checks.contains(TextBlockSupport.WhitespaceChecks.TRAILING)) { - log.warning(pos, LintWarnings.TrailingWhiteSpaceWillBeRemoved); + log.warning(DiagnosticFlag.SYNTAX, pos, LintWarnings.TrailingWhiteSpaceWillBeRemoved); } // Remove incidental indentation. try { diff --git a/src/jdk.compiler/share/classes/com/sun/tools/javac/parser/JavacParser.java b/src/jdk.compiler/share/classes/com/sun/tools/javac/parser/JavacParser.java index df5da5cb954..b4dfb04766c 100644 --- a/src/jdk.compiler/share/classes/com/sun/tools/javac/parser/JavacParser.java +++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/parser/JavacParser.java @@ -772,7 +772,7 @@ public class JavacParser implements Parser { } } else if (token.kind == UNDERSCORE) { if (Feature.UNDERSCORE_IDENTIFIER.allowedInSource(source)) { - log.warning(token.pos, Warnings.UnderscoreAsIdentifier); + log.warning(DiagnosticFlag.SYNTAX, token.pos, Warnings.UnderscoreAsIdentifier); } else if (asVariable) { checkSourceLevel(Feature.UNNAMED_VARIABLES); if (peekToken(LBRACKET)) { @@ -2339,7 +2339,7 @@ public class JavacParser implements Parser { if (allowYieldStatement) { return true; } else { - log.warning(pos, Warnings.InvalidYield); + log.warning(DiagnosticFlag.SYNTAX, pos, Warnings.InvalidYield); } } return false; @@ -3858,35 +3858,35 @@ public class JavacParser implements Parser { if (Feature.LOCAL_VARIABLE_TYPE_INFERENCE.allowedInSource(source)) { return Source.JDK10; } else if (shouldWarn) { - log.warning(pos, Warnings.RestrictedTypeNotAllowed(name, Source.JDK10)); + log.warning(DiagnosticFlag.SYNTAX, pos, Warnings.RestrictedTypeNotAllowed(name, Source.JDK10)); } } if (name == names.yield) { if (allowYieldStatement) { return Source.JDK14; } else if (shouldWarn) { - log.warning(pos, Warnings.RestrictedTypeNotAllowed(name, Source.JDK14)); + log.warning(DiagnosticFlag.SYNTAX, pos, Warnings.RestrictedTypeNotAllowed(name, Source.JDK14)); } } if (name == names.record) { if (allowRecords) { return Source.JDK14; } else if (shouldWarn) { - log.warning(pos, Warnings.RestrictedTypeNotAllowedPreview(name, Source.JDK14)); + log.warning(DiagnosticFlag.SYNTAX, pos, Warnings.RestrictedTypeNotAllowedPreview(name, Source.JDK14)); } } if (name == names.sealed) { if (allowSealedTypes) { return Source.JDK15; } else if (shouldWarn) { - log.warning(pos, Warnings.RestrictedTypeNotAllowedPreview(name, Source.JDK15)); + log.warning(DiagnosticFlag.SYNTAX, pos, Warnings.RestrictedTypeNotAllowedPreview(name, Source.JDK15)); } } if (name == names.permits) { if (allowSealedTypes) { return Source.JDK15; } else if (shouldWarn) { - log.warning(pos, Warnings.RestrictedTypeNotAllowedPreview(name, Source.JDK15)); + log.warning(DiagnosticFlag.SYNTAX, pos, Warnings.RestrictedTypeNotAllowedPreview(name, Source.JDK15)); } } return null; @@ -4057,7 +4057,7 @@ public class JavacParser implements Parser { if (source.compareTo(Source.JDK21) >= 0) reportSyntaxError(semiList.first().pos, Errors.ExtraneousSemicolon); else - log.warning(semiList.first().pos, Warnings.ExtraneousSemicolon); + log.warning(DiagnosticFlag.SYNTAX, semiList.first().pos, Warnings.ExtraneousSemicolon); } seenImport = true; defs.append(importDeclaration()); @@ -4074,7 +4074,7 @@ public class JavacParser implements Parser { if (source.compareTo(Source.JDK21) >= 0) reportSyntaxError(semiList.first().pos, Errors.ExtraneousSemicolon); else - log.warning(semiList.first().pos, Warnings.ExtraneousSemicolon); + log.warning(DiagnosticFlag.SYNTAX, semiList.first().pos, Warnings.ExtraneousSemicolon); } ModuleKind kind = ModuleKind.STRONG; if (token.name() == names.open) { @@ -5616,7 +5616,7 @@ public class JavacParser implements Parser { log.error(pos, feature.error(source.name)); } else if (preview.isPreview(feature)) { //use of preview feature, warn - preview.warnPreview(pos, feature); + preview.warnPreview(DiagnosticFlag.SYNTAX, pos, feature); } } diff --git a/src/jdk.compiler/share/classes/com/sun/tools/javac/parser/TextBlockSupport.java b/src/jdk.compiler/share/classes/com/sun/tools/javac/parser/TextBlockSupport.java index d099ceadba0..5112849d45f 100644 --- a/src/jdk.compiler/share/classes/com/sun/tools/javac/parser/TextBlockSupport.java +++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/parser/TextBlockSupport.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -56,8 +56,8 @@ class TextBlockSupport { // No need to check indentation if opting out (last line is empty.) char lastChar = string.charAt(string.length() - 1); boolean optOut = lastChar == '\n' || lastChar == '\r'; - // Split string based at line terminators. - String[] lines = string.split("\\R"); + // Split string using JLS text block line terminators: CRLF, CR, or LF. + String[] lines = string.split("\\r\\n|\\r|\\n"); int length = lines.length; // Extract last line. String lastLine = length == 0 ? "" : lines[length - 1]; diff --git a/src/jdk.compiler/share/classes/com/sun/tools/javac/processing/JavacProcessingEnvironment.java b/src/jdk.compiler/share/classes/com/sun/tools/javac/processing/JavacProcessingEnvironment.java index 74d082d4b64..11fa3a5aebf 100644 --- a/src/jdk.compiler/share/classes/com/sun/tools/javac/processing/JavacProcessingEnvironment.java +++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/processing/JavacProcessingEnvironment.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1104,7 +1104,7 @@ public class JavacProcessingEnvironment implements ProcessingEnvironment, Closea return true; return deferredDiagnosticHandler.getDiagnostics().stream() - .anyMatch(d -> (d.getKind() == Diagnostic.Kind.WARNING && werror) || + .anyMatch(d -> (d.getKind() == Diagnostic.Kind.WARNING && werror && ACCEPT_NON_RECOVERABLE_LINTS.test(d)) || (d.getKind() == Diagnostic.Kind.ERROR && (fatalErrors || !d.isFlagSet(RECOVERABLE)))); } @@ -1195,12 +1195,20 @@ public class JavacProcessingEnvironment implements ProcessingEnvironment, Closea } void showDiagnostics(boolean showAll) { - deferredDiagnosticHandler.reportDeferredDiagnostics(showAll ? ACCEPT_ALL - : ACCEPT_NON_RECOVERABLE); + deferredDiagnosticHandler.reportDeferredDiagnostics( + ACCEPT_NON_RECOVERABLE_LINTS.and(showAll ? ACCEPT_ALL + : ACCEPT_NON_RECOVERABLE)); log.popDiagnosticHandler(deferredDiagnosticHandler); compiler.setDeferredDiagnosticHandler(null); } //where: + private final Predicate ACCEPT_NON_RECOVERABLE_LINTS = + d -> !Optional.of(d) + .filter(diag -> !diag.isFlagSet(SYNTAX)) + .map(JCDiagnostic::getLintCategory) + .map(lc -> lc.annotationSuppression || + lc == Lint.LintCategory.INCUBATING) + .orElse(false); private final Predicate ACCEPT_NON_RECOVERABLE = d -> d.getKind() != JCDiagnostic.Kind.ERROR || !d.isFlagSet(DiagnosticFlag.RECOVERABLE) || diff --git a/src/jdk.compiler/share/classes/com/sun/tools/javac/resources/compiler.properties b/src/jdk.compiler/share/classes/com/sun/tools/javac/resources/compiler.properties index 915d7f8a8d8..58a5333ce4c 100644 --- a/src/jdk.compiler/share/classes/com/sun/tools/javac/resources/compiler.properties +++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/resources/compiler.properties @@ -2521,10 +2521,12 @@ compiler.err.cant.attach.type.annotations=\ {3} # 0: file object, 1: symbol, 2: name +# lint: classfile compiler.warn.unknown.enum.constant=\ unknown enum constant {1}.{2} # 0: file object, 1: symbol, 2: name, 3: message segment +# lint: classfile compiler.warn.unknown.enum.constant.reason=\ unknown enum constant {1}.{2}\n\ reason: {3} diff --git a/src/jdk.compiler/share/classes/com/sun/tools/javac/util/AbstractLog.java b/src/jdk.compiler/share/classes/com/sun/tools/javac/util/AbstractLog.java index ce3e56f2f3f..b8b2a3af254 100644 --- a/src/jdk.compiler/share/classes/com/sun/tools/javac/util/AbstractLog.java +++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/util/AbstractLog.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -192,6 +192,16 @@ public abstract class AbstractLog { report(diags.warning(null, source, wrap(pos), warningKey)); } + /** Report a warning, unless suppressed by the -nowarn option or the + * maximum number of warnings has been reached. + * @param flag A flag to set on the diagnostic + * @param pos The source position at which to report the warning. + * @param warningKey The key for the localized warning message. + */ + public void warning(DiagnosticFlag flag, int pos, Warning warningKey) { + report(diags.warning(flag, source, wrap(pos), warningKey)); + } + /** Provide a non-fatal notification, unless suppressed by the -nowarn option. * @param noteKey The key for the localized notification message. */ diff --git a/src/jdk.crypto.mscapi/windows/native/libsunmscapi/security.cpp b/src/jdk.crypto.mscapi/windows/native/libsunmscapi/security.cpp index ff011dca889..5c84b929ef1 100644 --- a/src/jdk.crypto.mscapi/windows/native/libsunmscapi/security.cpp +++ b/src/jdk.crypto.mscapi/windows/native/libsunmscapi/security.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1375,7 +1375,7 @@ JNIEXPORT jobject JNICALL Java_sun_security_mscapi_CKeyPairGenerator_00024RSA_ge PROV_RSA_FULL, CRYPT_NEWKEYSET) == FALSE) { - ThrowException(env, KEY_EXCEPTION, GetLastError()); + ThrowExceptionWithMessageAndErrcode(env, KEY_EXCEPTION, "CryptAcquireContext failure", GetLastError()); __leave; } } @@ -1387,7 +1387,7 @@ JNIEXPORT jobject JNICALL Java_sun_security_mscapi_CKeyPairGenerator_00024RSA_ge dwFlags, &hKeyPair) == FALSE) { - ThrowException(env, KEY_EXCEPTION, GetLastError()); + ThrowExceptionWithMessageAndErrcode(env, KEY_EXCEPTION, "CryptGenKey failure", GetLastError()); __leave; } diff --git a/src/jdk.hotspot.agent/linux/native/libsaproc/DwarfParser.cpp b/src/jdk.hotspot.agent/linux/native/libsaproc/DwarfParser.cpp index 62dbc84f88c..cc03f3fc832 100644 --- a/src/jdk.hotspot.agent/linux/native/libsaproc/DwarfParser.cpp +++ b/src/jdk.hotspot.agent/linux/native/libsaproc/DwarfParser.cpp @@ -31,73 +31,54 @@ #define CHECK_EXCEPTION if (env->ExceptionCheck()) { return; } static jfieldID p_dwarf_context_ID = 0; -static jint sa_RAX = -1; -static jint sa_RDX = -1; -static jint sa_RCX = -1; -static jint sa_RBX = -1; -static jint sa_RSI = -1; -static jint sa_RDI = -1; -static jint sa_RBP = -1; -static jint sa_RSP = -1; -static jint sa_R8 = -1; -static jint sa_R9 = -1; -static jint sa_R10 = -1; -static jint sa_R11 = -1; -static jint sa_R12 = -1; -static jint sa_R13 = -1; -static jint sa_R14 = -1; -static jint sa_R15 = -1; + +// DWARF_REG macro is used by DWARF_REGLIST. +#define DWARF_REG(reg, _) \ + static jint sa_##reg = -1; + +DWARF_REGLIST + +#undef DWARF_REG static jlong get_dwarf_context(JNIEnv *env, jobject obj) { return env->GetLongField(obj, p_dwarf_context_ID); } -#define SET_REG(env, reg, reg_cls) \ +/* + * Class: sun_jvm_hotspot_debugger_linux_DwarfParser + * Method: init0 + * Signature: ()V + */ +extern "C" +JNIEXPORT void JNICALL Java_sun_jvm_hotspot_debugger_linux_DwarfParser_init0 + (JNIEnv *env, jclass this_cls) { + jclass cls = env->FindClass("sun/jvm/hotspot/debugger/linux/DwarfParser"); + CHECK_EXCEPTION + p_dwarf_context_ID = env->GetFieldID(cls, "p_dwarf_context", "J"); + CHECK_EXCEPTION + + jclass reg_cls = env->FindClass(THREAD_CONTEXT_CLASS); + CHECK_EXCEPTION + +// DWARF_REG macro is used by DWARF_REGLIST. +#define DWARF_REG(reg, _) \ jfieldID reg##_ID = env->GetStaticFieldID(reg_cls, #reg, "I"); \ CHECK_EXCEPTION \ sa_##reg = env->GetStaticIntField(reg_cls, reg##_ID); \ CHECK_EXCEPTION -/* - * Class: sun_jvm_hotspot_debugger_linux_amd64_DwarfParser - * Method: init0 - * Signature: ()V - */ -extern "C" -JNIEXPORT void JNICALL Java_sun_jvm_hotspot_debugger_linux_amd64_DwarfParser_init0 - (JNIEnv *env, jclass this_cls) { - jclass cls = env->FindClass("sun/jvm/hotspot/debugger/linux/amd64/DwarfParser"); - CHECK_EXCEPTION - p_dwarf_context_ID = env->GetFieldID(cls, "p_dwarf_context", "J"); - CHECK_EXCEPTION + DWARF_REGLIST - jclass reg_cls = env->FindClass("sun/jvm/hotspot/debugger/amd64/AMD64ThreadContext"); - CHECK_EXCEPTION - SET_REG(env, RAX, reg_cls); - SET_REG(env, RDX, reg_cls); - SET_REG(env, RCX, reg_cls); - SET_REG(env, RBX, reg_cls); - SET_REG(env, RSI, reg_cls); - SET_REG(env, RDI, reg_cls); - SET_REG(env, RBP, reg_cls); - SET_REG(env, RSP, reg_cls); - SET_REG(env, R8, reg_cls); - SET_REG(env, R9, reg_cls); - SET_REG(env, R10, reg_cls); - SET_REG(env, R11, reg_cls); - SET_REG(env, R12, reg_cls); - SET_REG(env, R13, reg_cls); - SET_REG(env, R14, reg_cls); - SET_REG(env, R15, reg_cls); +#undef DWARF_REG } /* - * Class: sun_jvm_hotspot_debugger_linux_amd64_DwarfParser + * Class: sun_jvm_hotspot_debugger_linux_DwarfParser * Method: createDwarfContext * Signature: (J)J */ extern "C" -JNIEXPORT jlong JNICALL Java_sun_jvm_hotspot_debugger_linux_amd64_DwarfParser_createDwarfContext +JNIEXPORT jlong JNICALL Java_sun_jvm_hotspot_debugger_linux_DwarfParser_createDwarfContext (JNIEnv *env, jclass this_cls, jlong lib) { DwarfParser *parser = new DwarfParser(reinterpret_cast(lib)); if (!parser->is_parseable()) { @@ -113,36 +94,36 @@ JNIEXPORT jlong JNICALL Java_sun_jvm_hotspot_debugger_linux_amd64_DwarfParser_cr } /* - * Class: sun_jvm_hotspot_debugger_linux_amd64_DwarfParser + * Class: sun_jvm_hotspot_debugger_linux_DwarfParser * Method: destroyDwarfContext * Signature: (J)V */ extern "C" -JNIEXPORT void JNICALL Java_sun_jvm_hotspot_debugger_linux_amd64_DwarfParser_destroyDwarfContext +JNIEXPORT void JNICALL Java_sun_jvm_hotspot_debugger_linux_DwarfParser_destroyDwarfContext (JNIEnv *env, jclass this_cls, jlong context) { DwarfParser *parser = reinterpret_cast(context); delete parser; } /* - * Class: sun_jvm_hotspot_debugger_linux_amd64_DwarfParser + * Class: sun_jvm_hotspot_debugger_linux_DwarfParser * Method: isIn0 * Signature: (J)Z */ extern "C" -JNIEXPORT jboolean JNICALL Java_sun_jvm_hotspot_debugger_linux_amd64_DwarfParser_isIn0 +JNIEXPORT jboolean JNICALL Java_sun_jvm_hotspot_debugger_linux_DwarfParser_isIn0 (JNIEnv *env, jobject this_obj, jlong pc) { DwarfParser *parser = reinterpret_cast(get_dwarf_context(env, this_obj)); return static_cast(parser->is_in(pc)); } /* - * Class: sun_jvm_hotspot_debugger_linux_amd64_DwarfParser + * Class: sun_jvm_hotspot_debugger_linux_DwarfParser * Method: processDwarf0 * Signature: (J)V */ extern "C" -JNIEXPORT void JNICALL Java_sun_jvm_hotspot_debugger_linux_amd64_DwarfParser_processDwarf0 +JNIEXPORT void JNICALL Java_sun_jvm_hotspot_debugger_linux_DwarfParser_processDwarf0 (JNIEnv *env, jobject this_obj, jlong pc) { DwarfParser *parser = reinterpret_cast(get_dwarf_context(env, this_obj)); if (!parser->process_dwarf(pc)) { @@ -155,67 +136,106 @@ JNIEXPORT void JNICALL Java_sun_jvm_hotspot_debugger_linux_amd64_DwarfParser_pro } /* - * Class: sun_jvm_hotspot_debugger_linux_amd64_DwarfParser + * Class: sun_jvm_hotspot_debugger_linux_DwarfParser * Method: getCFARegister * Signature: ()I */ extern "C" -JNIEXPORT jint JNICALL Java_sun_jvm_hotspot_debugger_linux_amd64_DwarfParser_getCFARegister +JNIEXPORT jint JNICALL Java_sun_jvm_hotspot_debugger_linux_DwarfParser_getCFARegister (JNIEnv *env, jobject this_obj) { DwarfParser *parser = reinterpret_cast(get_dwarf_context(env, this_obj)); + switch (parser->get_cfa_register()) { - case RAX: return sa_RAX; - case RDX: return sa_RDX; - case RCX: return sa_RCX; - case RBX: return sa_RBX; - case RSI: return sa_RSI; - case RDI: return sa_RDI; - case RBP: return sa_RBP; - case RSP: return sa_RSP; - case R8: return sa_R8; - case R9: return sa_R9; - case R10: return sa_R10; - case R11: return sa_R11; - case R12: return sa_R12; - case R13: return sa_R13; - case R14: return sa_R14; - case R15: return sa_R15; +// DWARF_REG macro is used by DWARF_REGLIST. +#define DWARF_REG(reg, _) \ + case reg: return sa_##reg; + + DWARF_REGLIST + +#undef DWARF_REG + default: return -1; } } /* - * Class: sun_jvm_hotspot_debugger_linux_amd64_DwarfParser + * Class: sun_jvm_hotspot_debugger_linux_DwarfParser * Method: getCFAOffset * Signature: ()I */ extern "C" -JNIEXPORT jint JNICALL Java_sun_jvm_hotspot_debugger_linux_amd64_DwarfParser_getCFAOffset +JNIEXPORT jint JNICALL Java_sun_jvm_hotspot_debugger_linux_DwarfParser_getCFAOffset (JNIEnv *env, jobject this_obj) { DwarfParser *parser = reinterpret_cast(get_dwarf_context(env, this_obj)); return parser->get_cfa_offset(); } /* - * Class: sun_jvm_hotspot_debugger_linux_amd64_DwarfParser + * Class: sun_jvm_hotspot_debugger_linux_DwarfParser + * Method: getOffsetFromCFA + * Signature: (I)I + */ +extern "C" +JNIEXPORT jint JNICALL Java_sun_jvm_hotspot_debugger_linux_DwarfParser_getOffsetFromCFA + (JNIEnv *env, jobject this_obj, jint sareg) { + DwarfParser *parser = reinterpret_cast(get_dwarf_context(env, this_obj)); + +// DWARF_REG macro is used by DWARF_REGLIST. +#define DWARF_REG(reg, dwreg) \ + if (sareg == sa_##reg) { \ + return parser->get_offset_from_cfa(static_cast(dwreg)); \ + } else + + DWARF_REGLIST + +#undef DWARF_REG + + return INT_MAX; +} + +/* + * Class: sun_jvm_hotspot_debugger_linux_DwarfParser + * Method: getRARegister + * Signature: ()I + */ +extern "C" +JNIEXPORT jint JNICALL Java_sun_jvm_hotspot_debugger_linux_DwarfParser_getRARegister + (JNIEnv *env, jobject this_obj) { + DwarfParser *parser = reinterpret_cast(get_dwarf_context(env, this_obj)); + + switch (parser->get_ra_register()) { +// DWARF_REG macro is used by DWARF_REGLIST. +#define DWARF_REG(reg, _) \ + case reg: return sa_##reg; + + DWARF_REGLIST + +#undef DWARF_REG + + default: return -1; + } +} + +/* + * Class: sun_jvm_hotspot_debugger_linux_DwarfParser * Method: getReturnAddressOffsetFromCFA * Signature: ()I */ extern "C" -JNIEXPORT jint JNICALL Java_sun_jvm_hotspot_debugger_linux_amd64_DwarfParser_getReturnAddressOffsetFromCFA +JNIEXPORT jint JNICALL Java_sun_jvm_hotspot_debugger_linux_DwarfParser_getReturnAddressOffsetFromCFA (JNIEnv *env, jobject this_obj) { DwarfParser *parser = reinterpret_cast(get_dwarf_context(env, this_obj)); return parser->get_offset_from_cfa(RA); } /* - * Class: sun_jvm_hotspot_debugger_linux_amd64_DwarfParser + * Class: sun_jvm_hotspot_debugger_linux_DwarfParser * Method: getBasePointerOffsetFromCFA * Signature: ()I */ extern "C" -JNIEXPORT jint JNICALL Java_sun_jvm_hotspot_debugger_linux_amd64_DwarfParser_getBasePointerOffsetFromCFA +JNIEXPORT jint JNICALL Java_sun_jvm_hotspot_debugger_linux_DwarfParser_getBasePointerOffsetFromCFA (JNIEnv *env, jobject this_obj) { DwarfParser *parser = reinterpret_cast(get_dwarf_context(env, this_obj)); - return parser->get_offset_from_cfa(RBP); + return parser->get_offset_from_cfa(BP); } diff --git a/src/jdk.hotspot.agent/linux/native/libsaproc/LinuxDebuggerLocal.cpp b/src/jdk.hotspot.agent/linux/native/libsaproc/LinuxDebuggerLocal.cpp index caf948019af..214e2f21ac6 100644 --- a/src/jdk.hotspot.agent/linux/native/libsaproc/LinuxDebuggerLocal.cpp +++ b/src/jdk.hotspot.agent/linux/native/libsaproc/LinuxDebuggerLocal.cpp @@ -1,6 +1,6 @@ /* - * Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2019, 2021, NTT DATA. + * Copyright (c) 2002, 2026, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2026, NTT DATA. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -289,6 +289,13 @@ JNIEXPORT void JNICALL Java_sun_jvm_hotspot_debugger_linux_LinuxDebuggerLocal_at snprintf(msg, sizeof(msg), "Can't attach to the process: %s", err_buf); THROW_NEW_DEBUGGER_EXCEPTION(msg); } + +#ifdef __aarch64__ + if (pac_enabled(ph)) { + printf("WARNING: PAC is enabled. Stack traces might be incomplete.\n"); + } +#endif + env->SetLongField(this_obj, p_ps_prochandle_ID, (jlong)(intptr_t)ph); fillThreadsAndLoadObjects(env, this_obj, ph); } @@ -313,6 +320,13 @@ JNIEXPORT void JNICALL Java_sun_jvm_hotspot_debugger_linux_LinuxDebuggerLocal_at if ( (ph = Pgrab_core(execName_cstr, coreName_cstr)) == NULL) { THROW_NEW_DEBUGGER_EXCEPTION("Can't attach to the core file. For more information, export LIBSAPROC_DEBUG=1 and try again."); } + +#ifdef __aarch64__ + if (pac_enabled(ph)) { + printf("WARNING: PAC is enabled. Stack traces might be incomplete.\n"); + } +#endif + env->SetLongField(this_obj, p_ps_prochandle_ID, (jlong)(intptr_t)ph); fillThreadsAndLoadObjects(env, this_obj, ph); } diff --git a/src/jdk.hotspot.agent/linux/native/libsaproc/dwarf.cpp b/src/jdk.hotspot.agent/linux/native/libsaproc/dwarf.cpp index 459e3cc57e9..28eb92a285f 100644 --- a/src/jdk.hotspot.agent/linux/native/libsaproc/dwarf.cpp +++ b/src/jdk.hotspot.agent/linux/native/libsaproc/dwarf.cpp @@ -217,6 +217,20 @@ void DwarfParser::parse_dwarf_instructions(uintptr_t begin, uintptr_t pc, const _state.offset_from_cfa[reg] = _initial_state.offset_from_cfa[reg]; break; } +#ifdef __aarch64__ + // SA hasn't yet supported Pointer Authetication Code (PAC), so following + // instructions would be ignored with warning message. + // https://github.com/ARM-software/abi-aa/blob/2025Q4/aadwarf64/aadwarf64.rst + case 0x2d: // DW_CFA_AARCH64_negate_ra_state + print_debug("DWARF: DW_CFA_AARCH64_negate_ra_state is unimplemented.\n", op); + break; + case 0x2c: // DW_CFA_AARCH64_negate_ra_state_with_pc + print_debug("DWARF: DW_CFA_AARCH64_negate_ra_state_with_pc is unimplemented.\n", op); + break; + case 0x2b: // DW_CFA_AARCH64_set_ra_state + print_debug("DWARF: DW_CFA_AARCH64_set_ra_state is unimplemented.\n", op); + break; +#endif default: print_debug("DWARF: Unknown opcode: 0x%x\n", op); return; diff --git a/src/jdk.hotspot.agent/linux/native/libsaproc/dwarf.hpp b/src/jdk.hotspot.agent/linux/native/libsaproc/dwarf.hpp index 0a38c9a0f2e..2bfdba65a78 100644 --- a/src/jdk.hotspot.agent/linux/native/libsaproc/dwarf.hpp +++ b/src/jdk.hotspot.agent/linux/native/libsaproc/dwarf.hpp @@ -30,30 +30,21 @@ #include "libproc_impl.h" -/* - * from System V Application Binary Interface - * AMD64 Architecture Processor Supplement - * Figure 3.38: DWARF Register Number Mapping - * https://software.intel.com/sites/default/files/article/402129/mpx-linux64-abi.pdf - */ +#ifdef __x86_64__ +#include "dwarf_regs_amd64.h" +#elif defined(__aarch64__) +#include "dwarf_regs_aarch64.h" +#endif + enum DWARF_Register { - RAX, - RDX, - RCX, - RBX, - RSI, - RDI, - RBP, - RSP, - R8, - R9, - R10, - R11, - R12, - R13, - R14, - R15, - RA, +// DWARF_REG macro is used by DWARF_REGLIST and DWARF_PSEUDO_REGLIST. +#define DWARF_REG(reg, no) \ + reg = no, + + DWARF_REGLIST + DWARF_PSEUDO_REGLIST + +#undef DWARF_REG MAX_VALUE }; @@ -94,6 +85,7 @@ class DwarfParser { bool process_dwarf(const uintptr_t pc); enum DWARF_Register get_cfa_register() { return _state.cfa_reg; } int get_cfa_offset() { return _state.cfa_offset; } + enum DWARF_Register get_ra_register() { return _state.return_address_reg; } int get_offset_from_cfa(enum DWARF_Register reg) { return _state.offset_from_cfa[reg]; } bool is_in(long pc) { diff --git a/src/jdk.hotspot.agent/linux/native/libsaproc/dwarf_regs_aarch64.h b/src/jdk.hotspot.agent/linux/native/libsaproc/dwarf_regs_aarch64.h new file mode 100644 index 00000000000..5a95e9405e1 --- /dev/null +++ b/src/jdk.hotspot.agent/linux/native/libsaproc/dwarf_regs_aarch64.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2026, NTT DATA. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef DWARF_REGS_AARCH64_H +#define DWARF_REGS_AARCH64_H + +#define THREAD_CONTEXT_CLASS "sun/jvm/hotspot/debugger/aarch64/AARCH64ThreadContext" + +/* + * from DWARF for the Arm (R) 64-bit Architecture (AArch64) + * https://github.com/ARM-software/abi-aa/blob/2025Q4/aadwarf64/aadwarf64.rst + * 4.1 DWARF register names + */ +#define DWARF_REGLIST \ + DWARF_REG(R0, 0) \ + DWARF_REG(R1, 1) \ + DWARF_REG(R2, 2) \ + DWARF_REG(R3, 3) \ + DWARF_REG(R4, 4) \ + DWARF_REG(R5, 5) \ + DWARF_REG(R6, 6) \ + DWARF_REG(R7, 7) \ + DWARF_REG(R8, 8) \ + DWARF_REG(R9, 9) \ + DWARF_REG(R10, 10) \ + DWARF_REG(R11, 11) \ + DWARF_REG(R12, 12) \ + DWARF_REG(R13, 13) \ + DWARF_REG(R14, 14) \ + DWARF_REG(R15, 15) \ + DWARF_REG(R16, 16) \ + DWARF_REG(R17, 17) \ + DWARF_REG(R18, 18) \ + DWARF_REG(R19, 19) \ + DWARF_REG(R20, 20) \ + DWARF_REG(R21, 21) \ + DWARF_REG(R22, 22) \ + DWARF_REG(R23, 23) \ + DWARF_REG(R24, 24) \ + DWARF_REG(R25, 25) \ + DWARF_REG(R26, 26) \ + DWARF_REG(R27, 27) \ + DWARF_REG(R28, 28) \ + DWARF_REG(FP, 29) \ + DWARF_REG(LR, 30) \ + DWARF_REG(SP, 31) \ + DWARF_REG(PC, 32) + +// RA_SIGN_STATE might be needed in future to handle PAC. +#define DWARF_PSEUDO_REGLIST + +/* Aliases */ +#define BP FP +#define RA LR + +#endif diff --git a/src/jdk.hotspot.agent/linux/native/libsaproc/dwarf_regs_amd64.h b/src/jdk.hotspot.agent/linux/native/libsaproc/dwarf_regs_amd64.h new file mode 100644 index 00000000000..8226bc0864c --- /dev/null +++ b/src/jdk.hotspot.agent/linux/native/libsaproc/dwarf_regs_amd64.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2026, NTT DATA. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef DWARF_REGS_AMD64_H +#define DWARF_REGS_AMD64_H + +#define THREAD_CONTEXT_CLASS "sun/jvm/hotspot/debugger/amd64/AMD64ThreadContext" + +/* + * from System V Application Binary Interface + * AMD64 Architecture Processor Supplement + * https://refspecs.linuxbase.org/elf/x86_64-abi-0.99.pdf + * Figure 3.36: DWARF Register Number Mapping + */ +#define DWARF_REGLIST \ + DWARF_REG(RAX, 0) \ + DWARF_REG(RDX, 1) \ + DWARF_REG(RCX, 2) \ + DWARF_REG(RBX, 3) \ + DWARF_REG(RSI, 4) \ + DWARF_REG(RDI, 5) \ + DWARF_REG(RBP, 6) \ + DWARF_REG(RSP, 7) \ + DWARF_REG(R8, 8) \ + DWARF_REG(R9, 9) \ + DWARF_REG(R10, 10) \ + DWARF_REG(R11, 11) \ + DWARF_REG(R12, 12) \ + DWARF_REG(R13, 13) \ + DWARF_REG(R14, 14) \ + DWARF_REG(R15, 15) + +#define DWARF_PSEUDO_REGLIST \ + DWARF_REG(RA, 16) + +/* Aliases */ +#define BP RBP + +#endif diff --git a/src/jdk.hotspot.agent/linux/native/libsaproc/libproc.h b/src/jdk.hotspot.agent/linux/native/libsaproc/libproc.h index a69496e77a4..c584131e285 100644 --- a/src/jdk.hotspot.agent/linux/native/libsaproc/libproc.h +++ b/src/jdk.hotspot.agent/linux/native/libsaproc/libproc.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -119,6 +119,10 @@ struct ps_prochandle* get_proc_handle(JNIEnv* env, jobject this_obj); void throw_new_debugger_exception(JNIEnv* env, const char* errMsg); +#ifdef __aarch64__ +bool pac_enabled(struct ps_prochandle* ph); +#endif + #ifdef __cplusplus } #endif diff --git a/src/jdk.hotspot.agent/linux/native/libsaproc/libproc_impl.c b/src/jdk.hotspot.agent/linux/native/libsaproc/libproc_impl.c index e2681be73fe..815902045cf 100644 --- a/src/jdk.hotspot.agent/linux/native/libsaproc/libproc_impl.c +++ b/src/jdk.hotspot.agent/linux/native/libsaproc/libproc_impl.c @@ -182,13 +182,14 @@ static bool fill_addr_info(lib_info* lib) { return false; } + long page_size = sysconf(_SC_PAGE_SIZE); lib->end = (uintptr_t)-1L; lib->exec_start = (uintptr_t)-1L; lib->exec_end = (uintptr_t)-1L; for (ph = phbuf, cnt = 0; cnt < ehdr.e_phnum; cnt++, ph++) { if (ph->p_type == PT_LOAD) { uintptr_t aligned_start = lib->base + align_down(ph->p_vaddr, ph->p_align); - uintptr_t aligned_end = aligned_start + align_up(ph->p_memsz, ph->p_align); + uintptr_t aligned_end = aligned_start + align_up(ph->p_memsz, page_size); if ((lib->end == (uintptr_t)-1L) || (lib->end < aligned_end)) { lib->end = aligned_end; } @@ -477,6 +478,12 @@ struct lib_info *find_lib_by_address(struct ps_prochandle* ph, uintptr_t pc) { return NULL; } +#ifdef __aarch64__ +bool pac_enabled(struct ps_prochandle* ph) { + return ph->pac_enabled; +} +#endif + //-------------------------------------------------------------------------- // proc service functions diff --git a/src/jdk.hotspot.agent/linux/native/libsaproc/libproc_impl.h b/src/jdk.hotspot.agent/linux/native/libsaproc/libproc_impl.h index 62b1b4d0d6b..d5aa74e73ad 100644 --- a/src/jdk.hotspot.agent/linux/native/libsaproc/libproc_impl.h +++ b/src/jdk.hotspot.agent/linux/native/libsaproc/libproc_impl.h @@ -115,6 +115,10 @@ struct ps_prochandle { int num_threads; thread_info* threads; // head of thread list struct core_data* core; // data only used for core dumps, NULL for process +#ifdef __aarch64__ + // true if the HWCAP_PACA variant of Pointer Authentication Code (PAC) is enabled. + bool pac_enabled; +#endif }; #ifdef __cplusplus diff --git a/src/jdk.hotspot.agent/linux/native/libsaproc/ps_core.c b/src/jdk.hotspot.agent/linux/native/libsaproc/ps_core.c index 6298f569aaf..c500360f39d 100644 --- a/src/jdk.hotspot.agent/linux/native/libsaproc/ps_core.c +++ b/src/jdk.hotspot.agent/linux/native/libsaproc/ps_core.c @@ -39,6 +39,12 @@ #include "proc_service.h" #include "salibelf.h" +// HWCAP_PACA was introduced in glibc 2.30 +// https://sourceware.org/git/?p=glibc.git;a=commit;h=a2e57f89a35e6056c9488428e68c4889e114ef71 +#if defined(__aarch64__) && !defined(HWCAP_PACA) +#define HWCAP_PACA (1 << 30) +#endif + // This file has the libproc implementation to read core files. // For live processes, refer to ps_proc.c. Portions of this is adapted // /modelled after Solaris libproc.so (in particular Pcore.c) @@ -290,6 +296,10 @@ static bool core_handle_note(struct ps_prochandle* ph, ELF_PHDR* note_phdr) { break; } else if (auxv->a_type == AT_SYSINFO_EHDR) { ph->core->vdso_addr = auxv->a_un.a_val; +#ifdef __aarch64__ + } else if (auxv->a_type == AT_HWCAP) { + ph->pac_enabled = auxv->a_un.a_val & HWCAP_PACA; +#endif } auxv++; } @@ -610,23 +620,38 @@ static uintptr_t calc_prelinked_load_address(struct ps_prochandle* ph, int lib_f return load_addr; } +static int handle_vdso_internal(struct ps_prochandle* ph, char* vdso_name, char* lib_name, size_t lib_name_len) { + int lib_fd; + struct utsname uts; + uname(&uts); + + char *vdso_path = (char*)malloc(lib_name_len); + snprintf(vdso_path, lib_name_len, "/lib/modules/%s/vdso/%s", uts.release, vdso_name); + print_debug("Try to open vDSO: %s\n", vdso_path); + lib_fd = pathmap_open(vdso_path); + if (lib_fd != -1) { + print_debug("replace vDSO: %s -> %s\n", lib_name, vdso_path); + strncpy(lib_name, vdso_path, lib_name_len); + } + + free(vdso_path); + return lib_fd; +} + // Check for vDSO binary in kernel directory (/lib/modules//vdso), // rewrite the given lib_name string if found. // Otherwise copy vDSO memory in coredump to temporal file generated by tmpfile(). // Returns FD for vDSO (should be closed by caller), or -1 on error. static int handle_vdso(struct ps_prochandle* ph, char* lib_name, size_t lib_name_len) { int lib_fd; - struct utsname uts; - uname(&uts); // Check vDSO binary first (for referring debuginfo if possible). - char *vdso_path = (char*)malloc(lib_name_len); - snprintf(vdso_path, lib_name_len, "/lib/modules/%s/vdso/vdso64.so", uts.release); - lib_fd = pathmap_open(vdso_path); - if (lib_fd != -1) { - print_debug("replace vDSO: %s -> %s\n", lib_name, vdso_path); - strncpy(lib_name, vdso_path, lib_name_len); - } else { + lib_fd = handle_vdso_internal(ph, "vdso64.so", lib_name, lib_name_len); + if (lib_fd == -1) { + // Try again with vdso.so + lib_fd = handle_vdso_internal(ph, "vdso.so", lib_name, lib_name_len); + } + if (lib_fd == -1) { // Copy vDSO memory segment from core to temporal memory // if vDSO binary is not available. FILE* tmpf = tmpfile(); @@ -644,7 +669,6 @@ static int handle_vdso(struct ps_prochandle* ph, char* lib_name, size_t lib_name } } - free(vdso_path); return lib_fd; } diff --git a/src/jdk.hotspot.agent/linux/native/libsaproc/ps_proc.c b/src/jdk.hotspot.agent/linux/native/libsaproc/ps_proc.c index fdaa30c3f5d..9cbde7319f0 100644 --- a/src/jdk.hotspot.agent/linux/native/libsaproc/ps_proc.c +++ b/src/jdk.hotspot.agent/linux/native/libsaproc/ps_proc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,6 +36,17 @@ #include #include "libproc_impl.h" +#ifdef __aarch64__ +#include + +// HWCAP_PACA was introduced in glibc 2.30 +// https://sourceware.org/git/?p=glibc.git;a=commit;h=a2e57f89a35e6056c9488428e68c4889e114ef71 +#ifndef HWCAP_PACA +#define HWCAP_PACA (1 << 30) +#endif + +#endif + #if defined(x86_64) && !defined(amd64) #define amd64 1 #endif @@ -460,6 +471,10 @@ Pgrab(pid_t pid, char* err_buf, size_t err_buf_len) { return NULL; } +#ifdef __aarch64__ + ph->pac_enabled = HWCAP_PACA & getauxval(AT_HWCAP); +#endif + // initialize ps_prochandle ph->pid = pid; if (add_thread_info(ph, ph->pid) == NULL) { diff --git a/src/jdk.hotspot.agent/linux/native/libsaproc/symtab.c b/src/jdk.hotspot.agent/linux/native/libsaproc/symtab.c index c8f3fb2ed4c..8f8ce28be1e 100644 --- a/src/jdk.hotspot.agent/linux/native/libsaproc/symtab.c +++ b/src/jdk.hotspot.agent/linux/native/libsaproc/symtab.c @@ -417,10 +417,14 @@ static struct symtab* build_symtab_internal(int fd, const char *filename, bool t uintptr_t sym_value; char *sym_name = symtab->strs + syms->st_name; - // skip non-object and non-function symbols + // skip non-object and non-function symbols, but STT_NOTYPE is allowed for + // signal trampoline. int st_type = ELF_ST_TYPE(syms->st_info); - if ( st_type != STT_FUNC && st_type != STT_OBJECT) + if (st_type != STT_FUNC && + st_type != STT_OBJECT && + st_type != STT_NOTYPE) { continue; + } // skip empty strings and undefined symbols if (*sym_name == '\0' || syms->st_shndx == SHN_UNDEF) continue; diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/cdbg/CFrame.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/cdbg/CFrame.java index bc366ef02b5..86f9e990af8 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/cdbg/CFrame.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/cdbg/CFrame.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,7 +39,7 @@ public interface CFrame { public CFrame sender(ThreadProxy th); /** Find sender frame with given FP and PC */ - public default CFrame sender(ThreadProxy th, Address sp, Address fp, Address pc) { + public default CFrame sender(ThreadProxy th, Address senderSP, Address senderFP, Address senderPC) { return sender(th); } diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/DwarfCFrame.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/DwarfCFrame.java new file mode 100644 index 00000000000..7baa399ea75 --- /dev/null +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/DwarfCFrame.java @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2026, NTT DATA. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package sun.jvm.hotspot.debugger.linux; + +import sun.jvm.hotspot.debugger.Address; +import sun.jvm.hotspot.debugger.ThreadProxy; +import sun.jvm.hotspot.debugger.UnalignedAddressException; +import sun.jvm.hotspot.debugger.UnmappedAddressException; +import sun.jvm.hotspot.debugger.cdbg.CFrame; +import sun.jvm.hotspot.debugger.cdbg.ClosestSymbol; +import sun.jvm.hotspot.debugger.cdbg.basic.BasicCFrame; +import sun.jvm.hotspot.runtime.VM; + +public class DwarfCFrame extends BasicCFrame { + + private Address sp; + private Address fp; + private Address pc; + private Address cfa; + private LinuxDebugger linuxDbg; + private DwarfParser dwarf; + private boolean use1ByteBeforeToLookup; + + /** + * @return DwarfParser instance for the PC, null if native library relates to the pc not found. + * @throws DebuggerException if DWARF processing is failed. + * For example: pc is not covered in this DWARF, Common Information Entry (CIE) has + * language personality routine and/or Language Data Area (LSDA). + */ + protected static DwarfParser createDwarfParser(LinuxDebugger linuxDbg, Address pc) { + Address libptr = linuxDbg.findLibPtrByAddress(pc); + if (libptr != null) { + DwarfParser dwarf = new DwarfParser(libptr); + dwarf.processDwarf(pc); + return dwarf; + } + return null; + } + + protected DwarfCFrame(LinuxDebugger linuxDbg, Address sp, Address fp, Address cfa, Address pc, DwarfParser dwarf) { + this(linuxDbg, sp, fp, cfa, pc, dwarf, false); + } + + protected DwarfCFrame(LinuxDebugger linuxDbg, Address sp, Address fp, Address cfa, Address pc, DwarfParser dwarf, boolean use1ByteBeforeToLookup) { + super(linuxDbg.getCDebugger()); + this.sp = sp; + this.fp = fp; + this.cfa = cfa; + this.pc = pc; + this.linuxDbg = linuxDbg; + this.dwarf = dwarf; + this.use1ByteBeforeToLookup = use1ByteBeforeToLookup; + } + + public Address sp() { + return sp; + } + + public Address fp() { + return fp; + } + + public Address pc() { + return pc; + } + + public LinuxDebugger linuxDbg() { + return linuxDbg; + } + + public DwarfParser dwarf() { + return dwarf; + } + + // override base class impl to avoid ELF parsing + @Override + public ClosestSymbol closestSymbolToPC() { + Address symAddr = use1ByteBeforeToLookup ? pc.addOffsetTo(-1) : pc; + var sym = linuxDbg.lookup(linuxDbg.getAddressValue(symAddr)); + + // Returns a special symbol if the address is signal trampoline, + // otherwise returns closest symbol generated by LinuxDebugger. + return linuxDbg.isSignalTrampoline(symAddr) + ? new ClosestSymbol(sym.getName() + " ", 0) + : sym; + } + + @Override + public Address localVariableBase() { + return (dwarf != null && dwarf.isBPOffsetAvailable()) + ? cfa.addOffsetTo(dwarf.getBasePointerOffsetFromCFA()) + : fp; + } + + protected boolean isValidFrame(Address senderCFA, Address senderFP) { + // Both CFA and FP must not be null. + if (senderCFA == null && senderFP == null) { + return false; + } + + // FP must not be null if CFA is null - it happens between Java frame and Native frame. + // We cannot validate FP value because it might be used as GPR. Thus returns true + // if FP is not null. + if (senderCFA == null && senderFP != null) { + return true; + } + + // senderCFA must be greater than current CFA. + if (senderCFA != null && senderCFA.greaterThanOrEqual(cfa)) { + return true; + } + + // Otherwise, the frame is not valid. + return false; + } + + protected Address getSenderPC(Address senderPC) { + if (senderPC != null) { + return senderPC; + } + + try { + return dwarf == null + ? fp.getAddressAt(VM.getVM().getAddressSize()) // Current frame is Java + : cfa.getAddressAt(dwarf.getReturnAddressOffsetFromCFA()); // current frame is Native + } catch (UnmappedAddressException | UnalignedAddressException _) { + // Sender PC is invalid - maybe bottom of stack + return null; + } + } + + protected Address getSenderSP(Address senderSP) { + if (senderSP != null) { + return senderSP; + } else if (dwarf == null) { + // Current frame is Java - skip saved BP and RA + return fp.addOffsetTo(2 * VM.getVM().getAddressSize()); + } else { + // Current frame is Native + // CFA points SP at the call site in the previous frame. + // See 6.4 Call Frame Information in DWARF Debugging Information Format + // https://dwarfstd.org/dwarf4std.html + return cfa; + } + } + + protected Address getSenderFP(Address senderFP) { + if (senderFP != null) { + return senderFP; + } else if (dwarf == null) { // Current frame is Java + return fp.getAddressAt(0); + } else { // Current frame is Native + return dwarf.isBPOffsetAvailable() + ? cfa.getAddressAt(dwarf.getBasePointerOffsetFromCFA()) + : fp; + } + } + + @Override + public CFrame sender(ThreadProxy th) { + return sender(th, null, null, null); + } + +} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/amd64/DwarfParser.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/DwarfParser.java similarity index 95% rename from src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/amd64/DwarfParser.java rename to src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/DwarfParser.java index 53351c918d3..3e8099cf75b 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/amd64/DwarfParser.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/DwarfParser.java @@ -23,7 +23,7 @@ * */ -package sun.jvm.hotspot.debugger.linux.amd64; +package sun.jvm.hotspot.debugger.linux; import java.lang.ref.Cleaner; import sun.jvm.hotspot.debugger.Address; @@ -75,6 +75,8 @@ public class DwarfParser { public native int getCFARegister(); public native int getCFAOffset(); + public native int getOffsetFromCFA(int sareg); + public native int getRARegister(); public native int getReturnAddressOffsetFromCFA(); public native int getBasePointerOffsetFromCFA(); } diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/LinuxCDebugger.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/LinuxCDebugger.java index 15f6615421c..57ba419aa9d 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/LinuxCDebugger.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/LinuxCDebugger.java @@ -91,13 +91,7 @@ class LinuxCDebugger implements CDebugger { return new LinuxPPC64CFrame(dbg, sp, pc, LinuxDebuggerLocal.getAddressSize()); } else if (cpu.equals("aarch64")) { AARCH64ThreadContext context = (AARCH64ThreadContext) thread.getContext(); - Address sp = context.getRegisterAsAddress(AARCH64ThreadContext.SP); - if (sp == null) return null; - Address fp = context.getRegisterAsAddress(AARCH64ThreadContext.FP); - if (fp == null) return null; - Address pc = context.getRegisterAsAddress(AARCH64ThreadContext.PC); - if (pc == null) return null; - return new LinuxAARCH64CFrame(dbg, sp, fp, pc); + return LinuxAARCH64CFrame.getTopFrame(dbg, context); } else if (cpu.equals("riscv64")) { RISCV64ThreadContext context = (RISCV64ThreadContext) thread.getContext(); Address sp = context.getRegisterAsAddress(RISCV64ThreadContext.SP); diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebugger.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebugger.java index a53b8a0a282..c09af9881dd 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebugger.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebugger.java @@ -33,12 +33,17 @@ import sun.jvm.hotspot.debugger.cdbg.*; by the architecture-specific subpackages. */ public interface LinuxDebugger extends JVMDebugger { - // SIGHANDLER_NAMES holds the name of signal handler. - public static final List SIGHANDLER_NAMES = List.of( + // SIGTRAMP_NAMES holds the name of signal trampoline. + public static final List SIGTRAMP_NAMES = List.of( // For AMD64 // - sysdeps/unix/sysv/linux/x86_64/libc_sigaction.c in glibc // - gdb/amd64-linux-tdep.c in GDB - "__restore_rt" + "__restore_rt", + + // For AArch64 + // - arch/arm64/kernel/vdso/vdso.lds.S in Linux kernel + "__kernel_rt_sigreturn", + "VDSO_sigtramp" ); public String addressValueToString(long address) throws DebuggerException; diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebuggerLocal.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebuggerLocal.java index 9a75511e44d..856981bb73c 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebuggerLocal.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebuggerLocal.java @@ -133,7 +133,7 @@ public class LinuxDebuggerLocal extends DebuggerBase implements LinuxDebugger { @Override public boolean isSignalTrampoline(Address pc) { var sym = lookup(getAddressValue(pc)); - return sym == null ? false : SIGHANDLER_NAMES.contains(sym.getName()); + return sym == null ? false : SIGTRAMP_NAMES.contains(sym.getName()); } // Note on Linux threads are really processes. When target process is diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/aarch64/LinuxAARCH64CFrame.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/aarch64/LinuxAARCH64CFrame.java index 5f76e6308e9..c55aca2155c 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/aarch64/LinuxAARCH64CFrame.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/aarch64/LinuxAARCH64CFrame.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, Red Hat Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -25,73 +25,109 @@ package sun.jvm.hotspot.debugger.linux.aarch64; +import java.util.function.Function; + import sun.jvm.hotspot.debugger.*; import sun.jvm.hotspot.debugger.aarch64.*; import sun.jvm.hotspot.debugger.linux.*; import sun.jvm.hotspot.debugger.cdbg.*; -import sun.jvm.hotspot.debugger.cdbg.basic.*; import sun.jvm.hotspot.code.*; import sun.jvm.hotspot.runtime.*; import sun.jvm.hotspot.runtime.aarch64.*; -public final class LinuxAARCH64CFrame extends BasicCFrame { - public LinuxAARCH64CFrame(LinuxDebugger dbg, Address sp, Address fp, Address pc) { - super(dbg.getCDebugger()); - this.sp = sp; - this.fp = fp; - this.pc = pc; - this.dbg = dbg; +public final class LinuxAARCH64CFrame extends DwarfCFrame { + + private Address lr; + + private static LinuxAARCH64CFrame getFrameFromReg(LinuxDebugger linuxDbg, Function getreg) { + Address pc = getreg.apply(AARCH64ThreadContext.PC); + Address sp = getreg.apply(AARCH64ThreadContext.SP); + Address fp = getreg.apply(AARCH64ThreadContext.FP); + Address lr = getreg.apply(AARCH64ThreadContext.LR); + Address cfa = null; + DwarfParser dwarf = createDwarfParser(linuxDbg, pc); + + if (dwarf != null) { // Native frame + cfa = getreg.apply(dwarf.getCFARegister()) + .addOffsetTo(dwarf.getCFAOffset()); + } + + return (fp == null && cfa == null) + ? null + : new LinuxAARCH64CFrame(linuxDbg, sp, fp, cfa, pc, lr, dwarf); } - // override base class impl to avoid ELF parsing - public ClosestSymbol closestSymbolToPC() { - // try native lookup in debugger. - return dbg.lookup(dbg.getAddressValue(pc())); + public static LinuxAARCH64CFrame getTopFrame(LinuxDebugger linuxDbg, ThreadContext context) { + return getFrameFromReg(linuxDbg, context::getRegisterAsAddress); } - public Address pc() { - return pc; + private LinuxAARCH64CFrame(LinuxDebugger linuxDbg, Address sp, Address fp, Address cfa, Address pc, Address lr, DwarfParser dwarf) { + this(linuxDbg, sp, fp, cfa, pc, lr, dwarf, false); } - public Address localVariableBase() { - return fp; + private LinuxAARCH64CFrame(LinuxDebugger linuxDbg, Address sp, Address fp, Address cfa, Address pc, DwarfParser dwarf) { + this(linuxDbg, sp, fp, cfa, pc, null, dwarf, false); } - @Override - public CFrame sender(ThreadProxy thread) { - return sender(thread, null, null, null); + private LinuxAARCH64CFrame(LinuxDebugger linuxDbg, Address sp, Address fp, Address cfa, Address pc, DwarfParser dwarf, boolean use1ByteBeforeToLookup) { + this(linuxDbg, sp, fp, cfa, pc, null, dwarf, use1ByteBeforeToLookup); } - @Override - public CFrame sender(ThreadProxy thread, Address nextSP, Address nextFP, Address nextPC) { - // Check fp - // Skip if both nextFP and nextPC are given - do not need to load from fp. - if (nextFP == null && nextPC == null) { - if (fp == null) { - return null; + private LinuxAARCH64CFrame(LinuxDebugger linuxDbg, Address sp, Address fp, Address cfa, Address pc, Address lr, DwarfParser dwarf, boolean use1ByteBeforeToLookup) { + super(linuxDbg, sp, fp, cfa, pc, dwarf, use1ByteBeforeToLookup); + + if (dwarf != null) { + // Prioritize to use RA from DWARF instead of LR + var senderPCFromDwarf = getSenderPC(null); + if (senderPCFromDwarf != null) { + lr = senderPCFromDwarf; + } else if (lr != null) { + // We should set passed lr to LR of this frame, + // but throws DebuggerException if lr is not used for RA. + var raReg = dwarf.getRARegister(); + if (raReg != AARCH64ThreadContext.LR) { + throw new DebuggerException("Unexpected RA register: " + raReg); + } } + } - // Check alignment of fp - if (dbg.getAddressValue(fp) % (2 * ADDRESS_SIZE) != 0) { + this.lr = lr; + } + + private Address getSenderCFA(DwarfParser senderDwarf, Address senderSP, Address senderFP) { + if (senderDwarf == null) { // Sender frame is Java + // CFA is not available on Java frame + return null; + } + + // Sender frame is Native + int senderCFAReg = senderDwarf.getCFARegister(); + return switch(senderCFAReg){ + case AARCH64ThreadContext.FP -> senderFP.addOffsetTo(senderDwarf.getCFAOffset()); + case AARCH64ThreadContext.SP -> senderSP.addOffsetTo(senderDwarf.getCFAOffset()); + default -> throw new DebuggerException("Unsupported CFA register: " + senderCFAReg); + }; + } + + @Override + public CFrame sender(ThreadProxy thread, Address senderSP, Address senderFP, Address senderPC) { + if (linuxDbg().isSignalTrampoline(pc())) { + // SP points signal context + // https://github.com/torvalds/linux/blob/v6.17/arch/arm64/kernel/signal.c#L1357 + return getFrameFromReg(linuxDbg(), r -> LinuxAARCH64ThreadContext.getRegFromSignalTrampoline(sp(), r.intValue())); + } + + if (senderPC == null) { + // Use getSenderPC() if current frame is Java because we cannot rely on lr in this case. + senderPC = dwarf() == null ? getSenderPC(null) : lr; + if (senderPC == null) { return null; } } - if (nextFP == null) { - nextFP = fp.getAddressAt(0 * ADDRESS_SIZE); - } - if (nextFP == null) { - return null; - } + senderFP = getSenderFP(senderFP); - if (nextPC == null) { - nextPC = fp.getAddressAt(1 * ADDRESS_SIZE); - } - if (nextPC == null) { - return null; - } - - if (nextSP == null) { + if (senderSP == null) { CodeCache cc = VM.getVM().getCodeCache(); CodeBlob currentBlob = cc.findBlobUnsafe(pc()); @@ -99,29 +135,62 @@ public final class LinuxAARCH64CFrame extends BasicCFrame { if (currentBlob != null && (currentBlob.isContinuationStub() || currentBlob.isNativeMethod())) { // Use FP since it should always be valid for these cases. // TODO: These should be walked as Frames not CFrames. - nextSP = fp.addOffsetTo(2 * ADDRESS_SIZE); + senderSP = fp().addOffsetTo(2 * VM.getVM().getAddressSize()); } else { - CodeBlob codeBlob = cc.findBlobUnsafe(nextPC); + CodeBlob codeBlob = cc.findBlobUnsafe(senderPC); boolean useCodeBlob = codeBlob != null && codeBlob.getFrameSize() > 0; - nextSP = useCodeBlob ? nextFP.addOffsetTo((2 * ADDRESS_SIZE) - codeBlob.getFrameSize()) : nextFP; + senderSP = useCodeBlob ? senderFP.addOffsetTo((2 * VM.getVM().getAddressSize()) - codeBlob.getFrameSize()) : getSenderSP(null); } } - if (nextSP == null) { + if (senderSP == null) { return null; } - return new LinuxAARCH64CFrame(dbg, nextSP, nextFP, nextPC); + DwarfParser senderDwarf = null; + boolean fallback = false; + try { + senderDwarf = createDwarfParser(linuxDbg(), senderPC); + } catch (DebuggerException _) { + // Try again with PC-1 in case PC is just outside function bounds, + // due to function ending with a `call` instruction. + try { + senderDwarf = createDwarfParser(linuxDbg(), senderPC.addOffsetTo(-1)); + fallback = true; + } catch (DebuggerException _) { + if (linuxDbg().isSignalTrampoline(senderPC)) { + // We can use the caller frame if it is a signal trampoline. + // DWARF processing might fail because vdso.so .eh_frame is not required on aarch64. + return new LinuxAARCH64CFrame(linuxDbg(), senderSP, senderFP, null, senderPC, senderDwarf); + } + + // DWARF processing should succeed when the frame is native + // but it might fail if Common Information Entry (CIE) has language + // personality routine and/or Language Specific Data Area (LSDA). + return null; + } + } + + try { + Address senderCFA = getSenderCFA(senderDwarf, senderSP, senderFP); + return isValidFrame(senderCFA, senderFP) + ? new LinuxAARCH64CFrame(linuxDbg(), senderSP, senderFP, senderCFA, senderPC, senderDwarf, fallback) + : null; + } catch (DebuggerException e) { + if (linuxDbg().isSignalTrampoline(senderPC)) { + // We can use the caller frame if it is a signal trampoline. + // getSenderCFA() might fail because DwarfParser cannot find out CFA register. + return new LinuxAARCH64CFrame(linuxDbg(), senderSP, senderFP, null, senderPC, senderDwarf, fallback); + } + + // Rethrow the original exception if getSenderCFA() failed + // and the caller is not signal trampoline. + throw e; + } } @Override public Frame toFrame() { - return new AARCH64Frame(sp, fp, pc); + return new AARCH64Frame(sp(), fp(), pc()); } - // package/class internals only - private static final int ADDRESS_SIZE = 8; - private Address pc; - private Address sp; - private Address fp; - private LinuxDebugger dbg; } diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/aarch64/LinuxAARCH64ThreadContext.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/aarch64/LinuxAARCH64ThreadContext.java index 77003168671..422ca001624 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/aarch64/LinuxAARCH64ThreadContext.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/aarch64/LinuxAARCH64ThreadContext.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, Red Hat Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -28,6 +28,7 @@ package sun.jvm.hotspot.debugger.linux.aarch64; import sun.jvm.hotspot.debugger.*; import sun.jvm.hotspot.debugger.aarch64.*; import sun.jvm.hotspot.debugger.linux.*; +import sun.jvm.hotspot.runtime.*; public class LinuxAARCH64ThreadContext extends AARCH64ThreadContext { private LinuxDebugger debugger; @@ -44,4 +45,24 @@ public class LinuxAARCH64ThreadContext extends AARCH64ThreadContext { public Address getRegisterAsAddress(int index) { return debugger.newAddress(getRegister(index)); } + + public static Address getRegFromSignalTrampoline(Address sp, int index) { + // ucontext_t locates at 2nd element of rt_sigframe. + // See definition of rt_sigframe in arch/arm/kernel/signal.h + // in Linux Kernel. + Address addrUContext = sp.addOffsetTo(128); // sizeof(siginfo_t) = 128 + Address addrUCMContext = addrUContext.addOffsetTo(176); // offsetof(ucontext_t, uc_mcontext) = 176 + + Address ptrCallerSP = addrUCMContext.addOffsetTo(256); // offsetof(uc_mcontext, sp) = 256 + Address ptrCallerPC = addrUCMContext.addOffsetTo(264); // offsetof(uc_mcontext, pc) = 264 + Address ptrCallerRegs = addrUCMContext.addOffsetTo(8); // offsetof(uc_mcontext, regs) = 8 + + return switch(index) { + case AARCH64ThreadContext.FP -> ptrCallerRegs.getAddressAt(AARCH64ThreadContext.FP * VM.getVM().getAddressSize()); + case AARCH64ThreadContext.LR -> ptrCallerRegs.getAddressAt(AARCH64ThreadContext.LR * VM.getVM().getAddressSize()); + case AARCH64ThreadContext.SP -> ptrCallerSP.getAddressAt(0); + case AARCH64ThreadContext.PC -> ptrCallerPC.getAddressAt(0); + default -> throw new IllegalArgumentException("Unsupported register index: " + index); + }; + } } diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/amd64/LinuxAMD64CFrame.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/amd64/LinuxAMD64CFrame.java index 4d3d9d5998d..e58e2facdd7 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/amd64/LinuxAMD64CFrame.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/amd64/LinuxAMD64CFrame.java @@ -29,181 +29,82 @@ import java.util.function.Function; import sun.jvm.hotspot.debugger.*; import sun.jvm.hotspot.debugger.amd64.*; import sun.jvm.hotspot.debugger.linux.*; -import sun.jvm.hotspot.debugger.linux.amd64.*; import sun.jvm.hotspot.debugger.cdbg.*; -import sun.jvm.hotspot.debugger.cdbg.basic.*; import sun.jvm.hotspot.runtime.*; import sun.jvm.hotspot.runtime.amd64.*; -public final class LinuxAMD64CFrame extends BasicCFrame { +public final class LinuxAMD64CFrame extends DwarfCFrame { - private static LinuxAMD64CFrame getFrameFromReg(LinuxDebugger dbg, Function getreg) { + private static LinuxAMD64CFrame getFrameFromReg(LinuxDebugger linuxDbg, Function getreg) { Address rip = getreg.apply(AMD64ThreadContext.RIP); Address rsp = getreg.apply(AMD64ThreadContext.RSP); Address rbp = getreg.apply(AMD64ThreadContext.RBP); - Address libptr = dbg.findLibPtrByAddress(rip); Address cfa = null; - DwarfParser dwarf = null; - - if (libptr != null) { // Native frame - dwarf = new DwarfParser(libptr); - try { - dwarf.processDwarf(rip); - } catch (DebuggerException e) { - // DWARF processing should succeed when the frame is native - // but it might fail if Common Information Entry (CIE) has language - // personality routine and/or Language Specific Data Area (LSDA). - return new LinuxAMD64CFrame(dbg, rsp, rbp, cfa, rip, dwarf, true); - } + DwarfParser dwarf = createDwarfParser(linuxDbg, rip); + if (dwarf != null) { // Native frame cfa = getreg.apply(dwarf.getCFARegister()) .addOffsetTo(dwarf.getCFAOffset()); } return (rbp == null && cfa == null) ? null - : new LinuxAMD64CFrame(dbg, rsp, rbp, cfa, rip, dwarf); + : new LinuxAMD64CFrame(linuxDbg, rsp, rbp, cfa, rip, dwarf); } - public static LinuxAMD64CFrame getTopFrame(LinuxDebugger dbg, ThreadContext context) { - return getFrameFromReg(dbg, context::getRegisterAsAddress); + public static LinuxAMD64CFrame getTopFrame(LinuxDebugger linuxDbg, ThreadContext context) { + return getFrameFromReg(linuxDbg, context::getRegisterAsAddress); } - private LinuxAMD64CFrame(LinuxDebugger dbg, Address rsp, Address rbp, Address cfa, Address rip, DwarfParser dwarf) { - this(dbg, rsp, rbp, cfa, rip, dwarf, false); + private LinuxAMD64CFrame(LinuxDebugger linuxDbg, Address rsp, Address rbp, Address cfa, Address rip, DwarfParser dwarf) { + this(linuxDbg, rsp, rbp, cfa, rip, dwarf, false); } - private LinuxAMD64CFrame(LinuxDebugger dbg, Address rsp, Address rbp, Address cfa, Address rip, DwarfParser dwarf, boolean use1ByteBeforeToLookup) { - super(dbg.getCDebugger()); - this.rsp = rsp; - this.rbp = rbp; - this.cfa = cfa; - this.rip = rip; - this.dbg = dbg; - this.dwarf = dwarf; - this.use1ByteBeforeToLookup = use1ByteBeforeToLookup; + private LinuxAMD64CFrame(LinuxDebugger linuxDbg, Address rsp, Address rbp, Address cfa, Address rip, DwarfParser dwarf, boolean use1ByteBeforeToLookup) { + super(linuxDbg, rsp, rbp, cfa, rip, dwarf, use1ByteBeforeToLookup); } - // override base class impl to avoid ELF parsing - public ClosestSymbol closestSymbolToPC() { - Address symAddr = use1ByteBeforeToLookup ? pc().addOffsetTo(-1) : pc(); - var sym = dbg.lookup(dbg.getAddressValue(symAddr)); - - // Returns a special symbol if the address is signal handler, - // otherwise returns closest symbol generated by LinuxDebugger. - return dbg.isSignalTrampoline(symAddr) - ? new ClosestSymbol(sym.getName() + " ", 0) - : sym; - } - - public Address pc() { - return rip; - } - - public Address localVariableBase() { - return (dwarf != null && dwarf.isBPOffsetAvailable()) - ? cfa.addOffsetTo(dwarf.getBasePointerOffsetFromCFA()) - : rbp; - } - - private Address getNextPC() { - try { - return dwarf == null - ? rbp.getAddressAt(ADDRESS_SIZE) // Java frame - : cfa.getAddressAt(dwarf.getReturnAddressOffsetFromCFA()); // Native frame - } catch (UnmappedAddressException | UnalignedAddressException e) { - return null; - } - } - - private boolean isValidFrame(Address nextCFA, Address nextRBP) { - // Both CFA and RBP must not be null. - if (nextCFA == null && nextRBP == null) { - return false; - } - - // RBP must not be null if CFA is null - it happens between Java frame and Native frame. - // We cannot validate RBP value because it might be used as GPR. Thus returns true - // if RBP is not null. - if (nextCFA == null && nextRBP != null) { - return true; - } - - // nextCFA must be greater than current CFA. - if (nextCFA != null && nextCFA.greaterThanOrEqual(cfa)) { - return true; - } - - // Otherwise, the frame is not valid. - return false; - } - - private Address getNextRSP() { - return dwarf == null ? rbp.addOffsetTo(2 * ADDRESS_SIZE) // Java frame - skip saved BP and RA - : cfa.addOffsetTo(dwarf.getReturnAddressOffsetFromCFA()) - .addOffsetTo(ADDRESS_SIZE); // Native frame - } - - private Address getNextRBP(Address senderFP) { - if (senderFP != null) { - return senderFP; - } else if (dwarf == null) { // Current frame is Java - return rbp.getAddressAt(0); - } else { // Current frame is Native - return dwarf.isBPOffsetAvailable() - ? cfa.getAddressAt(dwarf.getBasePointerOffsetFromCFA()) - : rbp; - } - } - - private Address getNextCFA(DwarfParser nextDwarf, Address senderFP, Address senderPC) { - if (nextDwarf == null) { // Next frame is Java + private Address getSenderCFA(DwarfParser senderDwarf, Address senderSP, Address senderFP) { + if (senderDwarf == null) { // Sender frame is Java // CFA is not available on Java frame return null; } - // Next frame is Native - int nextCFAReg = nextDwarf.getCFARegister(); - return switch(nextCFAReg){ - case AMD64ThreadContext.RBP -> getNextRBP(senderFP).addOffsetTo(nextDwarf.getCFAOffset()); - case AMD64ThreadContext.RSP -> getNextRSP().addOffsetTo(nextDwarf.getCFAOffset()); - default -> throw new DebuggerException("Unsupported CFA register: " + nextCFAReg); + // Sender frame is Native + int senderCFAReg = senderDwarf.getCFARegister(); + return switch(senderCFAReg){ + case AMD64ThreadContext.RBP -> senderFP.addOffsetTo(senderDwarf.getCFAOffset()); + case AMD64ThreadContext.RSP -> senderSP.addOffsetTo(senderDwarf.getCFAOffset()); + default -> throw new DebuggerException("Unsupported CFA register: " + senderCFAReg); }; } @Override - public CFrame sender(ThreadProxy th) { - return sender(th, null, null, null); - } - - @Override - public CFrame sender(ThreadProxy th, Address sp, Address fp, Address pc) { - if (dbg.isSignalTrampoline(pc())) { + public CFrame sender(ThreadProxy th, Address senderSP, Address senderFP, Address senderPC) { + if (linuxDbg().isSignalTrampoline(pc())) { // RSP points signal context // https://github.com/torvalds/linux/blob/v6.17/arch/x86/kernel/signal.c#L94 - return getFrameFromReg(dbg, r -> LinuxAMD64ThreadContext.getRegFromSignalTrampoline(this.rsp, r.intValue())); + return getFrameFromReg(linuxDbg(), r -> LinuxAMD64ThreadContext.getRegFromSignalTrampoline(sp(), r.intValue())); } - ThreadContext context = th.getContext(); - - Address nextRSP = sp != null ? sp : getNextRSP(); - if (nextRSP == null) { + senderSP = getSenderSP(senderSP); + if (senderSP == null) { return null; } - Address nextPC = pc != null ? pc : getNextPC(); - if (nextPC == null) { + senderPC = getSenderPC(senderPC); + if (senderPC == null) { return null; } - DwarfParser nextDwarf = null; + DwarfParser senderDwarf = null; boolean fallback = false; try { - nextDwarf = createDwarfParser(nextPC); + senderDwarf = createDwarfParser(linuxDbg(), senderPC); } catch (DebuggerException _) { - // Try again with RIP-1 in case RIP is just outside function bounds, + // Try again with PC-1 in case PC is just outside function bounds, // due to function ending with a `call` instruction. try { - nextDwarf = createDwarfParser(nextPC.addOffsetTo(-1)); + senderDwarf = createDwarfParser(linuxDbg(), senderPC.addOffsetTo(-1)); fallback = true; } catch (DebuggerException _) { // DWARF processing should succeed when the frame is native @@ -213,56 +114,29 @@ public final class LinuxAMD64CFrame extends BasicCFrame { } } - Address nextRBP = getNextRBP(fp); + senderFP = getSenderFP(senderFP); try { - Address nextCFA = getNextCFA(nextDwarf, fp, nextPC); - return isValidFrame(nextCFA, nextRBP) - ? new LinuxAMD64CFrame(dbg, nextRSP, nextRBP, nextCFA, nextPC, nextDwarf, fallback) + Address senderCFA = getSenderCFA(senderDwarf, senderSP, senderFP); + return isValidFrame(senderCFA, senderFP) + ? new LinuxAMD64CFrame(linuxDbg(), senderSP, senderFP, senderCFA, senderPC, senderDwarf, fallback) : null; } catch (DebuggerException e) { - if (dbg.isSignalTrampoline(nextPC)) { - // We can through the caller frame if it is signal trampoline. - // getNextCFA() might fail because DwarfParser cannot find out CFA register. - return new LinuxAMD64CFrame(dbg, nextRSP, nextRBP, null, nextPC, nextDwarf, fallback); + if (linuxDbg().isSignalTrampoline(senderPC)) { + // We can use the caller frame if it is a signal trampoline. + // getSenderCFA() might fail because DwarfParser cannot find out CFA register. + return new LinuxAMD64CFrame(linuxDbg(), senderSP, senderFP, null, senderPC, senderDwarf, fallback); } - // Rethrow the original exception if getNextCFA() failed + // Rethrow the original exception if getSenderCFA() failed // and the caller is not signal trampoline. throw e; } } - private DwarfParser createDwarfParser(Address pc) throws DebuggerException { - DwarfParser nextDwarf = null; - Address libptr = dbg.findLibPtrByAddress(pc); - if (libptr != null) { - try { - nextDwarf = new DwarfParser(libptr); - } catch (DebuggerException _) { - // Bail out to Java frame - } - } - - if (nextDwarf != null) { - nextDwarf.processDwarf(pc); - } - - return nextDwarf; - } - @Override public Frame toFrame() { - return new AMD64Frame(rsp, localVariableBase(), rip); + return new AMD64Frame(sp(), localVariableBase(), pc()); } - // package/class internals only - private static final int ADDRESS_SIZE = 8; - private Address rsp; - private Address rbp; - private Address rip; - private Address cfa; - private LinuxDebugger dbg; - private DwarfParser dwarf; - private boolean use1ByteBeforeToLookup; } diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Array.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Array.java index c4eeaf4a367..61067e63707 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Array.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Array.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -83,10 +83,8 @@ public class Array extends Oop { } if (VM.getVM().isCompactObjectHeadersEnabled()) { lengthOffsetInBytes = Oop.getHeaderSize(); - } else if (VM.getVM().isCompressedKlassPointersEnabled()) { - lengthOffsetInBytes = typeSize - VM.getVM().getIntSize(); } else { - lengthOffsetInBytes = typeSize; + lengthOffsetInBytes = typeSize - VM.getVM().getIntSize(); } return lengthOffsetInBytes; } diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Instance.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Instance.java index 66efbe3484a..fea4fdaabc2 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Instance.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Instance.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -57,10 +57,8 @@ public class Instance extends Oop { public static long getHeaderSize() { if (VM.getVM().isCompactObjectHeadersEnabled()) { return Oop.getHeaderSize(); - } else if (VM.getVM().isCompressedKlassPointersEnabled()) { - return typeSize - VM.getVM().getIntSize(); } else { - return typeSize; + return typeSize - VM.getVM().getIntSize(); } } diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Oop.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Oop.java index 75ad4ab1d66..951499974fa 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Oop.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Oop.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -51,8 +51,7 @@ public class Oop { headerSize = markType.getSize(); } else { headerSize = type.getSize(); - klass = new MetadataField(type.getAddressField("_metadata._klass"), 0); - compressedKlass = new NarrowKlassField(type.getAddressField("_metadata._compressed_klass"), 0); + compressedKlass = new NarrowKlassField(type.getAddressField("_compressed_klass"), 0); } } @@ -75,7 +74,6 @@ public class Oop { public static long getHeaderSize() { return headerSize; } // Header size in bytes. private static CIntField mark; - private static MetadataField klass; private static NarrowKlassField compressedKlass; // Accessors for declared fields @@ -83,12 +81,9 @@ public class Oop { public Klass getKlass() { if (VM.getVM().isCompactObjectHeadersEnabled()) { - assert(VM.getVM().isCompressedKlassPointersEnabled()); return getMark().getKlass(); - } else if (VM.getVM().isCompressedKlassPointersEnabled()) { - return (Klass)compressedKlass.getValue(getHandle()); } else { - return (Klass)klass.getValue(getHandle()); + return (Klass)compressedKlass.getValue(getHandle()); } } @@ -157,11 +152,7 @@ public class Oop { if (doVMFields) { visitor.doCInt(mark, true); if (!VM.getVM().isCompactObjectHeadersEnabled()) { - if (VM.getVM().isCompressedKlassPointersEnabled()) { - visitor.doMetadata(compressedKlass, true); - } else { - visitor.doMetadata(klass, true); - } + visitor.doMetadata(compressedKlass, true); } } } @@ -220,10 +211,8 @@ public class Oop { if (VM.getVM().isCompactObjectHeadersEnabled()) { Mark mark = new Mark(handle); return mark.getKlass(); - } else if (VM.getVM().isCompressedKlassPointersEnabled()) { - return (Klass)Metadata.instantiateWrapperFor(handle.getCompKlassAddressAt(compressedKlass.getOffset())); } else { - return (Klass)Metadata.instantiateWrapperFor(handle.getAddressAt(klass.getOffset())); + return (Klass)Metadata.instantiateWrapperFor(handle.getCompKlassAddressAt(compressedKlass.getOffset())); } } }; diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Continuation.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Continuation.java new file mode 100644 index 00000000000..72ba053f451 --- /dev/null +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Continuation.java @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2026, NTT DATA. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +package sun.jvm.hotspot.runtime; + +import sun.jvm.hotspot.debugger.Address; + + +public class Continuation { + + public static boolean isReturnBarrierEntry(Address senderPC) { + if (!Continuations.enabled()) { + return false; + } + return VM.getVM().getStubRoutines().contReturnBarrier().equals(senderPC); + } + + public static boolean isSPInContinuation(ContinuationEntry entry, Address sp) { + return entry.getEntrySP().greaterThan(sp); + } + + public static ContinuationEntry getContinuationEntryForSP(JavaThread thread, Address sp) { + ContinuationEntry entry = thread.getContEntry(); + while (entry != null && !isSPInContinuation(entry, sp)) { + entry = entry.getParent(); + } + return entry; + } + + public static Frame continuationBottomSender(JavaThread thread, Frame callee, Address senderSP) { + ContinuationEntry ce = getContinuationEntryForSP(thread, callee.getSP()); + Frame entry = ce.toFrame(); + if (callee.isInterpretedFrame()) { + entry.setSP(senderSP); // sp != unextended_sp + } + return entry; + } + +} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/ContinuationEntry.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/ContinuationEntry.java index 73152bdee84..7d8a2ba5993 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/ContinuationEntry.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/ContinuationEntry.java @@ -1,6 +1,6 @@ /* - * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2025, NTT DATA. + * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2025, 2026, NTT DATA. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,12 +26,17 @@ package sun.jvm.hotspot.runtime; import sun.jvm.hotspot.debugger.*; -import sun.jvm.hotspot.runtime.*; +import sun.jvm.hotspot.runtime.aarch64.*; +import sun.jvm.hotspot.runtime.amd64.*; +import sun.jvm.hotspot.runtime.ppc64.*; +import sun.jvm.hotspot.runtime.riscv64.*; import sun.jvm.hotspot.types.*; +import sun.jvm.hotspot.utilities.*; -public class ContinuationEntry extends VMObject { +public abstract class ContinuationEntry extends VMObject { private static long size; + private static AddressField parentField; private static Address returnPC; static { @@ -41,13 +46,28 @@ public class ContinuationEntry extends VMObject { private static synchronized void initialize(TypeDataBase db) throws WrongTypeException { Type type = db.lookupType("ContinuationEntry"); size = type.getSize(); + parentField = type.getAddressField("_parent"); returnPC = type.getAddressField("_return_pc").getValue(); } + public static ContinuationEntry create(Address addr) { + return switch (VM.getVM().getDebugger().getCPU()) { + case "amd64" -> VMObjectFactory.newObject(AMD64ContinuationEntry.class, addr); + case "aarch64" -> VMObjectFactory.newObject(AARCH64ContinuationEntry.class, addr); + case "riscv64" -> VMObjectFactory.newObject(RISCV64ContinuationEntry.class, addr); + case "ppc64" -> VMObjectFactory.newObject(PPC64ContinuationEntry.class, addr); + default -> throw new UnsupportedPlatformException("Continuation is not yet implemented."); + }; + } + public ContinuationEntry(Address addr) { super(addr); } + public ContinuationEntry getParent() { + return create(parentField.getValue(addr)); + } + public Address getEntryPC() { return returnPC; } @@ -60,4 +80,6 @@ public class ContinuationEntry extends VMObject { return this.getAddress().addOffsetTo(size); } + public abstract Frame toFrame(); + } diff --git a/src/hotspot/share/cds/aotGrowableArray.inline.hpp b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Continuations.java similarity index 72% rename from src/hotspot/share/cds/aotGrowableArray.inline.hpp rename to src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Continuations.java index 8c6e8cb6503..884f8764ba5 100644 --- a/src/hotspot/share/cds/aotGrowableArray.inline.hpp +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Continuations.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2026, NTT DATA. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -21,17 +22,12 @@ * questions. * */ +package sun.jvm.hotspot.runtime; -#ifndef SHARE_CDS_AOTGROWABLEARRAY_INLINE_HPP -#define SHARE_CDS_AOTGROWABLEARRAY_INLINE_HPP +public class Continuations { -#include "cds/aotGrowableArray.hpp" + public static boolean enabled() { + return VM.getVM().getCommandLineFlag("VMContinuations").getBool(); + } -#include "memory/metaspaceClosure.hpp" - -template -void AOTGrowableArray::metaspace_pointers_do(MetaspaceClosure* it) { - it->push_c_array(AOTGrowableArray::data_addr(), AOTGrowableArray::capacity()); } - -#endif // SHARE_CDS_AOTGROWABLEARRAY_INLINE_HPP diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Frame.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Frame.java index ee9e0ecdafd..978fb39ad1c 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Frame.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Frame.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -138,6 +138,7 @@ public abstract class Frame implements Cloneable { } public abstract Address getSP(); + public abstract void setSP(Address newSP); public abstract Address getID(); public abstract Address getFP(); diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/JavaThread.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/JavaThread.java index 826b5cecfd5..c18bcf8cd37 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/JavaThread.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/JavaThread.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -343,7 +343,7 @@ public class JavaThread extends Thread { } public ContinuationEntry getContEntry() { - return VMObjectFactory.newObject(ContinuationEntry.class, contEntryField.getValue(addr)); + return ContinuationEntry.create(contEntryField.getValue(addr)); } /** Gets the Java-side thread object for this JavaThread */ diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/StubRoutines.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/StubRoutines.java index 38a3103ac50..85d8c8cd3b6 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/StubRoutines.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/StubRoutines.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,6 +34,7 @@ import sun.jvm.hotspot.utilities.Observer; public class StubRoutines { private static AddressField callStubReturnAddressField; + private static AddressField contReturnBarrierField; static { VM.registerVMInitializedObserver(new Observer() { @@ -46,6 +47,7 @@ public class StubRoutines { private static synchronized void initialize(TypeDataBase db) { Type type = db.lookupType("StubRoutines"); callStubReturnAddressField = type.getAddressField("_call_stub_return_address"); + contReturnBarrierField = type.getAddressField("_cont_returnBarrier"); } public StubRoutines() { @@ -59,4 +61,9 @@ public class StubRoutines { return (addr.equals(returnPC)); } } + + public Address contReturnBarrier() { + return contReturnBarrierField.getValue(); + } + } diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/ThreadLocalAllocBuffer.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/ThreadLocalAllocBuffer.java index 11f03a6003e..683e4b67935 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/ThreadLocalAllocBuffer.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/ThreadLocalAllocBuffer.java @@ -76,10 +76,9 @@ public class ThreadLocalAllocBuffer extends VMObject { private long endReserve() { long labAlignmentReserve = VM.getVM().getLabAlignmentReserve(); - long reserveForAllocationPrefetch = VM.getVM().getReserveForAllocationPrefetch(); long heapWordSize = VM.getVM().getHeapWordSize(); - return Math.max(labAlignmentReserve, reserveForAllocationPrefetch) * heapWordSize; + return labAlignmentReserve * heapWordSize; } /** Support for iteration over heap -- not sure how this will diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java index dc27a4fc59e..2ec96121934 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -123,7 +123,6 @@ public class VM { private int invocationEntryBCI; private ReversePtrs revPtrs; private VMRegImpl vmregImpl; - private int reserveForAllocationPrefetch; private int labAlignmentReserve; // System.getProperties from debuggee VM @@ -145,7 +144,6 @@ public class VM { private static CIntegerType boolType; private Boolean sharingEnabled; private Boolean compressedOopsEnabled; - private Boolean compressedKlassPointersEnabled; private Boolean compactObjectHeadersEnabled; // command line flags supplied to VM - see struct JVMFlag in jvmFlag.hpp @@ -447,8 +445,6 @@ public class VM { boolType = (CIntegerType) db.lookupType("bool"); Type threadLocalAllocBuffer = db.lookupType("ThreadLocalAllocBuffer"); - CIntegerField reserveForAllocationPrefetchField = threadLocalAllocBuffer.getCIntegerField("_reserve_for_allocation_prefetch"); - reserveForAllocationPrefetch = (int)reserveForAllocationPrefetchField.getCInteger(intType); Type collectedHeap = db.lookupType("CollectedHeap"); CIntegerField labAlignmentReserveField = collectedHeap.getCIntegerField("_lab_alignment_reserve"); @@ -518,11 +514,7 @@ public class VM { heapOopSize = (int)getOopSize(); } - if (isCompressedKlassPointersEnabled()) { - klassPtrSize = (int)getIntSize(); - } else { - klassPtrSize = (int)getOopSize(); // same as an oop - } + klassPtrSize = (int)getIntSize(); } /** This could be used by a reflective runtime system */ @@ -915,10 +907,6 @@ public class VM { return vmInternalInfo; } - public int getReserveForAllocationPrefetch() { - return reserveForAllocationPrefetch; - } - public int getLabAlignmentReserve() { return labAlignmentReserve; } @@ -945,15 +933,6 @@ public class VM { return compressedOopsEnabled.booleanValue(); } - public boolean isCompressedKlassPointersEnabled() { - if (compressedKlassPointersEnabled == null) { - Flag flag = getCommandLineFlag("UseCompressedClassPointers"); - compressedKlassPointersEnabled = (flag == null) ? Boolean.FALSE: - (flag.getBool()? Boolean.TRUE: Boolean.FALSE); - } - return compressedKlassPointersEnabled.booleanValue(); - } - public boolean isCompactObjectHeadersEnabled() { if (compactObjectHeadersEnabled == null) { Flag flag = getCommandLineFlag("UseCompactObjectHeaders"); diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/aarch64/AARCH64ContinuationEntry.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/aarch64/AARCH64ContinuationEntry.java new file mode 100644 index 00000000000..b373167a37c --- /dev/null +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/aarch64/AARCH64ContinuationEntry.java @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2026, NTT DATA. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +package sun.jvm.hotspot.runtime.aarch64; + +import sun.jvm.hotspot.debugger.Address; +import sun.jvm.hotspot.runtime.ContinuationEntry; +import sun.jvm.hotspot.runtime.Frame; + + +public class AARCH64ContinuationEntry extends ContinuationEntry { + + public AARCH64ContinuationEntry(Address addr) { + super(addr); + } + + @Override + public Frame toFrame() { + return new AARCH64Frame(getEntrySP(), getEntrySP(), getEntryFP(), getEntryPC()); + } + +} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/aarch64/AARCH64Frame.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/aarch64/AARCH64Frame.java index 7233d508cbc..5e73150c6cf 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/aarch64/AARCH64Frame.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/aarch64/AARCH64Frame.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2019, Red Hat Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -206,6 +206,11 @@ public class AARCH64Frame extends Frame { public Address getSP() { return raw_sp; } public Address getID() { return raw_sp; } + @Override + public void setSP(Address newSP) { + raw_sp = newSP; + } + // FIXME: not implemented yet public boolean isSignalHandlerFrameDbg() { return false; } public int getSignalNumberDbg() { return 0; } @@ -360,16 +365,6 @@ public class AARCH64Frame extends Frame { map.setLocation(fp, savedFPAddr); } - private Frame senderForContinuationStub(AARCH64RegisterMap map, CodeBlob cb) { - var contEntry = map.getThread().getContEntry(); - - Address senderSP = contEntry.getEntrySP(); - Address senderPC = contEntry.getEntryPC(); - Address senderFP = contEntry.getEntryFP(); - - return new AARCH64Frame(senderSP, senderFP, senderPC); - } - private Frame senderForCompiledFrame(AARCH64RegisterMap map, CodeBlob cb) { if (DEBUG) { System.out.println("senderForCompiledFrame"); @@ -416,6 +411,22 @@ public class AARCH64Frame extends Frame { updateMapWithSavedLink(map, savedFPAddr); } + if (Continuation.isReturnBarrierEntry(senderPC)) { + // We assume WalkContinuation is "WalkContinuation::skip". + // It is same with c'tor arguments of RegisterMap in frame::next_frame(). + // + // HotSpot code in cpu/aarch64/frame_aarch64.inline.hpp: + // + // if (Continuation::is_return_barrier_entry(sender_pc)) { + // if (map->walk_cont()) { // about to walk into an h-stack + // return Continuation::top_frame(*this, map); + // } else { + // return Continuation::continuation_bottom_sender(map->thread(), *this, l_sender_sp); + // } + // } + return Continuation.continuationBottomSender(map.getThread(), this, senderSP); + } + return new AARCH64Frame(senderSP, savedFPAddr.getAddressAt(0), senderPC); } diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/amd64/AMD64ContinuationEntry.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/amd64/AMD64ContinuationEntry.java new file mode 100644 index 00000000000..3cbebfce2f4 --- /dev/null +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/amd64/AMD64ContinuationEntry.java @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2026, NTT DATA. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +package sun.jvm.hotspot.runtime.amd64; + +import sun.jvm.hotspot.debugger.Address; +import sun.jvm.hotspot.runtime.ContinuationEntry; +import sun.jvm.hotspot.runtime.Frame; + + +public class AMD64ContinuationEntry extends ContinuationEntry { + + public AMD64ContinuationEntry(Address addr) { + super(addr); + } + + @Override + public Frame toFrame() { + return new AMD64Frame(getEntrySP(), getEntrySP(), getEntryFP(), getEntryPC()); + } + +} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/amd64/AMD64Frame.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/amd64/AMD64Frame.java index fa9d50160e1..2b78157e2b2 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/amd64/AMD64Frame.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/amd64/AMD64Frame.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -206,6 +206,11 @@ public class AMD64Frame extends Frame { public Address getSP() { return raw_sp; } public Address getID() { return raw_sp; } + @Override + public void setSP(Address newSP) { + raw_sp = newSP; + } + // FIXME: not implemented yet (should be done for Solaris) public boolean isSignalHandlerFrameDbg() { return false; } public int getSignalNumberDbg() { return 0; } @@ -258,6 +263,23 @@ public class AMD64Frame extends Frame { // update it accordingly map.setIncludeArgumentOops(false); + // HotSpot has following code in frame::sender_raw() at frame_x86.inline.hpp, however + // in_cont() should be false. + // + // if (map->in_cont()) { // already in an h-stack + // return map->stack_chunk()->sender(*this, map); + // } + // + // in_cont() returns true if _chunk() is not null. + // + // frame::next_frame() creates RegisterMap instance with 4 arguments. + // It sets RegisterMap::WalkContinuation::skip to final argument (walk_cont), + // therefore _chunk will not be initialized by the following code in c'tor of RegisterMap. + // + // if (walk_cont == WalkContinuation::include && thread != nullptr && thread->last_continuation() != nullptr) { + // _chunk = stackChunkHandle(Thread::current()->handle_area()->allocate_null_handle(), true /* dummy */); + // } + if (isEntryFrame()) return senderForEntryFrame(map); if (isInterpretedFrame()) return senderForInterpreterFrame(map); @@ -360,16 +382,6 @@ public class AMD64Frame extends Frame { map.setLocation(rbp, savedFPAddr); } - private Frame senderForContinuationStub(AMD64RegisterMap map, CodeBlob cb) { - var contEntry = map.getThread().getContEntry(); - - Address senderSP = contEntry.getEntrySP(); - Address senderPC = contEntry.getEntryPC(); - Address senderFP = contEntry.getEntryFP(); - - return new AMD64Frame(senderSP, senderFP, senderPC); - } - private Frame senderForCompiledFrame(AMD64RegisterMap map, CodeBlob cb) { if (DEBUG) { System.out.println("senderForCompiledFrame"); @@ -408,6 +420,22 @@ public class AMD64Frame extends Frame { updateMapWithSavedLink(map, savedFPAddr); } + if (Continuation.isReturnBarrierEntry(senderPC)) { + // We assume WalkContinuation is "WalkContinuation::skip". + // It is same with c'tor arguments of RegisterMap in frame::next_frame(). + // + // HotSpot code in cpu/x86/frame_x86.inline.hpp: + // + // if (Continuation::is_return_barrier_entry(sender_pc)) { + // if (map->walk_cont()) { // about to walk into an h-stack + // return Continuation::top_frame(*this, map); + // } else { + // return Continuation::continuation_bottom_sender(map->thread(), *this, sender_sp); + // } + // } + return Continuation.continuationBottomSender(map.getThread(), this, senderSP); + } + return new AMD64Frame(senderSP, savedFPAddr.getAddressAt(0), senderPC); } diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/ppc64/PPC64ContinuationEntry.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/ppc64/PPC64ContinuationEntry.java new file mode 100644 index 00000000000..fac71cc9953 --- /dev/null +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/ppc64/PPC64ContinuationEntry.java @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2026, NTT DATA. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +package sun.jvm.hotspot.runtime.ppc64; + +import sun.jvm.hotspot.debugger.Address; +import sun.jvm.hotspot.runtime.ContinuationEntry; +import sun.jvm.hotspot.runtime.Frame; + + +public class PPC64ContinuationEntry extends ContinuationEntry { + + public PPC64ContinuationEntry(Address addr) { + super(addr); + } + + @Override + public Frame toFrame() { + return new PPC64Frame(getEntrySP(), getEntrySP(), getEntryFP(), getEntryPC()); + } + +} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/ppc64/PPC64Frame.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/ppc64/PPC64Frame.java index cae034c9613..a663d016011 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/ppc64/PPC64Frame.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/ppc64/PPC64Frame.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -198,6 +198,11 @@ public class PPC64Frame extends Frame { public Address getSP() { return raw_sp; } public Address getID() { return raw_sp; } + @Override + public void setSP(Address newSP) { + raw_sp = newSP; + } + // FIXME: not implemented yet (should be done for Solaris/PPC64) public boolean isSignalHandlerFrameDbg() { return false; } public int getSignalNumberDbg() { return 0; } @@ -260,9 +265,7 @@ public class PPC64Frame extends Frame { if (cb != null) { if (cb.isUpcallStub()) { return senderForUpcallStub(map, (UpcallStub)cb); - } else if (cb.isContinuationStub()) { - return senderForContinuationStub(map, cb); - } else { + } else if (cb.getFrameSize() > 0) { return senderForCompiledFrame(map, cb); } } @@ -337,16 +340,6 @@ public class PPC64Frame extends Frame { return new PPC64Frame(sp, unextendedSP, getLink(), getSenderPC()); } - private Frame senderForContinuationStub(PPC64RegisterMap map, CodeBlob cb) { - var contEntry = map.getThread().getContEntry(); - - Address sp = contEntry.getEntrySP(); - Address pc = contEntry.getEntryPC(); - Address fp = contEntry.getEntryFP(); - - return new PPC64Frame(sp, fp, pc); - } - private Frame senderForCompiledFrame(PPC64RegisterMap map, CodeBlob cb) { if (DEBUG) { System.out.println("senderForCompiledFrame"); @@ -379,6 +372,22 @@ public class PPC64Frame extends Frame { } } + if (Continuation.isReturnBarrierEntry(senderPC)) { + // We assume WalkContinuation is "WalkContinuation::skip". + // It is same with c'tor arguments of RegisterMap in frame::next_frame(). + // + // HotSpot code in cpu/ppc/frame_ppc.inline.hpp: + // + // if (Continuation::is_return_barrier_entry(sender_pc)) { + // if (map->walk_cont()) { // about to walk into an h-stack + // return Continuation::top_frame(*this, map); + // } else { + // return Continuation::continuation_bottom_sender(map->thread(), *this, l_sender_sp); + // } + // } + return Continuation.continuationBottomSender(map.getThread(), this, senderSP); + } + return new PPC64Frame(senderSP, getLink(), senderPC); } diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/riscv64/RISCV64ContinuationEntry.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/riscv64/RISCV64ContinuationEntry.java new file mode 100644 index 00000000000..ec04498a6c0 --- /dev/null +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/riscv64/RISCV64ContinuationEntry.java @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2026, NTT DATA. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +package sun.jvm.hotspot.runtime.riscv64; + +import sun.jvm.hotspot.debugger.Address; +import sun.jvm.hotspot.runtime.ContinuationEntry; +import sun.jvm.hotspot.runtime.Frame; + + +public class RISCV64ContinuationEntry extends ContinuationEntry { + + public RISCV64ContinuationEntry(Address addr) { + super(addr); + } + + @Override + public Frame toFrame() { + return new RISCV64Frame(getEntrySP(), getEntrySP(), getEntryFP(), getEntryPC()); + } + +} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/riscv64/RISCV64Frame.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/riscv64/RISCV64Frame.java index 44c8f4c679c..a35c0735979 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/riscv64/RISCV64Frame.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/riscv64/RISCV64Frame.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2019, Red Hat Inc. * Copyright (c) 2021, 2023, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -201,6 +201,11 @@ public class RISCV64Frame extends Frame { public Address getSP() { return raw_sp; } public Address getID() { return raw_sp; } + @Override + public void setSP(Address newSP) { + raw_sp = newSP; + } + // FIXME: not implemented yet public boolean isSignalHandlerFrameDbg() { return false; } public int getSignalNumberDbg() { return 0; } @@ -264,9 +269,7 @@ public class RISCV64Frame extends Frame { if (cb != null) { if (cb.isUpcallStub()) { return senderForUpcallStub(map, (UpcallStub)cb); - } else if (cb.isContinuationStub()) { - return senderForContinuationStub(map, cb); - } else { + } else if (cb.getFrameSize() > 0) { return senderForCompiledFrame(map, cb); } } @@ -354,16 +357,6 @@ public class RISCV64Frame extends Frame { map.setLocation(fp, savedFPAddr); } - private Frame senderForContinuationStub(RISCV64RegisterMap map, CodeBlob cb) { - var contEntry = map.getThread().getContEntry(); - - Address senderSP = contEntry.getEntrySP(); - Address senderPC = contEntry.getEntryPC(); - Address senderFP = contEntry.getEntryFP(); - - return new RISCV64Frame(senderSP, senderFP, senderPC); - } - private Frame senderForCompiledFrame(RISCV64RegisterMap map, CodeBlob cb) { if (DEBUG) { System.out.println("senderForCompiledFrame"); @@ -406,6 +399,22 @@ public class RISCV64Frame extends Frame { updateMapWithSavedLink(map, savedFPAddr); } + if (Continuation.isReturnBarrierEntry(senderPC)) { + // We assume WalkContinuation is "WalkContinuation::skip". + // It is same with c'tor arguments of RegisterMap in frame::next_frame(). + // + // HotSpot code in cpu/riscv/frame_riscv.inline.hpp: + // + // if (Continuation::is_return_barrier_entry(sender_pc)) { + // if (map->walk_cont()) { // about to walk into an h-stack + // return Continuation::top_frame(*this, map); + // } else { + // return Continuation::continuation_bottom_sender(map->thread(), *this, l_sender_sp); + // } + // } + return Continuation.continuationBottomSender(map.getThread(), this, senderSP); + } + return new RISCV64Frame(senderSP, savedFPAddr.getAddressAt(0), senderPC); } diff --git a/src/jdk.httpserver/share/classes/sun/net/httpserver/ContextList.java b/src/jdk.httpserver/share/classes/sun/net/httpserver/ContextList.java index 6393ca34798..14a07b3a677 100644 --- a/src/jdk.httpserver/share/classes/sun/net/httpserver/ContextList.java +++ b/src/jdk.httpserver/share/classes/sun/net/httpserver/ContextList.java @@ -186,38 +186,31 @@ class ContextList { */ PATH_PREFIX((contextPath, requestPath) -> { - // Fast-path for `/` - if ("/".equals(contextPath)) { + // Does the request path prefix match? + if (!requestPath.startsWith(contextPath)) { + return false; + } + + // Is it an exact match? + int contextPathLength = contextPath.length(); + if (requestPath.length() == contextPathLength) { return true; } - // Does the request path prefix match? - if (requestPath.startsWith(contextPath)) { - - // Is it an exact match? - int contextPathLength = contextPath.length(); - if (requestPath.length() == contextPathLength) { - return true; - } - - // Is it a path-prefix match? - assert contextPathLength > 0; - return - // Case 1: The request path starts with the context - // path, but the context path has an extra path - // separator suffix. For instance, the context path is - // `/foo/` and the request path is `/foo/bar`. - contextPath.charAt(contextPathLength - 1) == '/' || - // Case 2: The request path starts with the - // context path, but the request path has an - // extra path separator suffix. For instance, - // context path is `/foo` and the request path - // is `/foo/` or `/foo/bar`. - requestPath.charAt(contextPathLength) == '/'; - - } - - return false; + // Is it a path-prefix match? + assert contextPathLength > 0; + return + // Case 1: The request path starts with the context + // path, but the context path has an extra path + // separator suffix. For instance, the context path is + // `/foo/` and the request path is `/foo/bar`. + contextPath.charAt(contextPathLength - 1) == '/' || + // Case 2: The request path starts with the + // context path, but the request path has an + // extra path separator suffix. For instance, + // context path is `/foo` and the request path + // is `/foo/` or `/foo/bar`. + requestPath.charAt(contextPathLength) == '/'; }); diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/AbstractMask.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/AbstractMask.java index 5b762edfd3b..9ac90c08c27 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/AbstractMask.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/AbstractMask.java @@ -24,17 +24,19 @@ */ package jdk.incubator.vector; -import java.util.Objects; - -import jdk.internal.vm.annotation.ForceInline; - import jdk.internal.misc.Unsafe; - +import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.vector.VectorSupport; import static jdk.incubator.vector.VectorOperators.*; -abstract class AbstractMask extends VectorMask { +abstract sealed class AbstractMask extends VectorMask + permits ByteVector64.ByteMask64, ByteVector128.ByteMask128, ByteVector256.ByteMask256, ByteVector512.ByteMask512, ByteVectorMax.ByteMaskMax, + DoubleVector64.DoubleMask64, DoubleVector128.DoubleMask128, DoubleVector256.DoubleMask256, DoubleVector512.DoubleMask512, DoubleVectorMax.DoubleMaskMax, + FloatVector64.FloatMask64, FloatVector128.FloatMask128, FloatVector256.FloatMask256, FloatVector512.FloatMask512, FloatVectorMax.FloatMaskMax, + IntVector64.IntMask64, IntVector128.IntMask128, IntVector256.IntMask256, IntVector512.IntMask512, IntVectorMax.IntMaskMax, + LongVector64.LongMask64, LongVector128.LongMask128, LongVector256.LongMask256, LongVector512.LongMask512, LongVectorMax.LongMaskMax, + ShortVector64.ShortMask64, ShortVector128.ShortMask128, ShortVector256.ShortMask256, ShortVector512.ShortMask512, ShortVectorMax.ShortMaskMax { AbstractMask(boolean[] bits) { super(bits); } diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/AbstractShuffle.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/AbstractShuffle.java index 075400a0d4a..bea495f74fc 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/AbstractShuffle.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/AbstractShuffle.java @@ -25,10 +25,17 @@ package jdk.incubator.vector; import java.util.function.IntUnaryOperator; + import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.vector.VectorSupport; -abstract class AbstractShuffle extends VectorShuffle { +abstract sealed class AbstractShuffle extends VectorShuffle + permits ByteVector64.ByteShuffle64, ByteVector128.ByteShuffle128, ByteVector256.ByteShuffle256, ByteVector512.ByteShuffle512, ByteVectorMax.ByteShuffleMax, + DoubleVector64.DoubleShuffle64, DoubleVector128.DoubleShuffle128, DoubleVector256.DoubleShuffle256, DoubleVector512.DoubleShuffle512, DoubleVectorMax.DoubleShuffleMax, + FloatVector64.FloatShuffle64, FloatVector128.FloatShuffle128, FloatVector256.FloatShuffle256, FloatVector512.FloatShuffle512, FloatVectorMax.FloatShuffleMax, + IntVector64.IntShuffle64, IntVector128.IntShuffle128, IntVector256.IntShuffle256, IntVector512.IntShuffle512, IntVectorMax.IntShuffleMax, + LongVector64.LongShuffle64, LongVector128.LongShuffle128, LongVector256.LongShuffle256, LongVector512.LongShuffle512, LongVectorMax.LongShuffleMax, + ShortVector64.ShortShuffle64, ShortVector128.ShortShuffle128, ShortVector256.ShortShuffle256, ShortVector512.ShortShuffle512, ShortVectorMax.ShortShuffleMax { static final IntUnaryOperator IDENTITY = i -> i; // Internal representation allows for a maximum index of E.MAX_VALUE - 1 diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/AbstractSpecies.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/AbstractSpecies.java index 6c834077387..3fd2be34346 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/AbstractSpecies.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/AbstractSpecies.java @@ -24,39 +24,31 @@ */ package jdk.incubator.vector; -import java.lang.foreign.MemorySegment; -import jdk.internal.vm.annotation.ForceInline; -import jdk.internal.vm.annotation.Stable; import java.lang.reflect.Array; -import java.nio.ByteOrder; import java.util.Arrays; import java.util.function.Function; import java.util.function.IntUnaryOperator; -abstract class AbstractSpecies extends jdk.internal.vm.vector.VectorSupport.VectorSpecies - implements VectorSpecies { - @Stable +import jdk.internal.vm.annotation.ForceInline; +import jdk.internal.vm.annotation.Stable; +import jdk.internal.vm.annotation.TrustFinalFields; + +@TrustFinalFields +abstract sealed class AbstractSpecies extends jdk.internal.vm.vector.VectorSupport.VectorSpecies + implements VectorSpecies + permits ByteVector.ByteSpecies, DoubleVector.DoubleSpecies, FloatVector.FloatSpecies, + IntVector.IntSpecies, LongVector.LongSpecies, ShortVector.ShortSpecies { final VectorShape vectorShape; - @Stable final LaneType laneType; - @Stable final int laneCount; - @Stable final int laneCountLog2P1; - @Stable final Class> vectorType; - @Stable final Class> maskType; - @Stable final Class> shuffleType; - @Stable final Function> vectorFactory; - @Stable final VectorShape indexShape; - @Stable final int maxScale, minScale; - @Stable final int vectorBitSize, vectorByteSize; AbstractSpecies(VectorShape vectorShape, diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/AbstractVector.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/AbstractVector.java index 80260c2bd30..ea8112cc2ae 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/AbstractVector.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/AbstractVector.java @@ -25,22 +25,17 @@ package jdk.incubator.vector; import java.lang.foreign.MemorySegment; +import java.nio.ByteOrder; +import java.util.function.IntUnaryOperator; -import jdk.internal.foreign.AbstractMemorySegmentImpl; -import jdk.internal.foreign.Utils; import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.vector.VectorSupport; -import java.lang.foreign.ValueLayout; -import java.lang.reflect.Array; -import java.nio.ByteOrder; -import java.util.Objects; -import java.util.function.IntUnaryOperator; - import static jdk.incubator.vector.VectorOperators.*; @SuppressWarnings("cast") -abstract class AbstractVector extends Vector { +abstract sealed class AbstractVector extends Vector + permits ByteVector, DoubleVector, FloatVector, IntVector, LongVector, ShortVector { /** * The order of vector bytes when stored in natural, * array elements of the same lane type. diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ByteVector.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ByteVector.java index 846032cb5c6..7231ada3273 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ByteVector.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ByteVector.java @@ -49,7 +49,8 @@ import static jdk.incubator.vector.VectorOperators.*; * {@code byte} values. */ @SuppressWarnings("cast") // warning: redundant cast -public abstract class ByteVector extends AbstractVector { +public abstract sealed class ByteVector extends AbstractVector + permits ByteVector64, ByteVector128, ByteVector256, ByteVector512, ByteVectorMax { ByteVector(byte[] vec) { super(vec); diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ByteVector128.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ByteVector128.java index c38e8d0f8a0..36ea8d081a8 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ByteVector128.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ByteVector128.java @@ -25,22 +25,22 @@ package jdk.incubator.vector; import java.lang.foreign.MemorySegment; -import java.lang.foreign.ValueLayout; import java.nio.ByteOrder; import java.util.Arrays; import java.util.Objects; import java.util.function.IntUnaryOperator; +import jdk.internal.ValueBased; import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.vector.VectorSupport; -import static jdk.internal.vm.vector.VectorSupport.*; - import static jdk.incubator.vector.VectorOperators.*; +import static jdk.internal.vm.vector.VectorSupport.*; // -- This file was mechanically generated: Do not edit! -- // @SuppressWarnings("cast") // warning: redundant cast +@ValueBased final class ByteVector128 extends ByteVector { static final ByteSpecies VSPECIES = (ByteSpecies) ByteVector.SPECIES_128; @@ -371,7 +371,7 @@ final class ByteVector128 extends ByteVector { @Override @ForceInline public final ByteShuffle128 toShuffle() { - return (ByteShuffle128) toShuffle(vspecies(), false); + return (ByteShuffle128) toShuffle(VSPECIES, false); } // Specialized unary testing @@ -598,7 +598,7 @@ final class ByteVector128 extends ByteVector { } // Mask - + @ValueBased static final class ByteMask128 extends AbstractMask { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -646,7 +646,7 @@ final class ByteVector128 extends ByteVector { @Override ByteMask128 uOp(MUnOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); for (int i = 0; i < res.length; i++) { res[i] = f.apply(i, bits[i]); @@ -656,7 +656,7 @@ final class ByteVector128 extends ByteVector { @Override ByteMask128 bOp(VectorMask m, MBinOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); boolean[] mbits = ((ByteMask128)m).getBits(); for (int i = 0; i < res.length; i++) { @@ -806,16 +806,16 @@ final class ByteVector128 extends ByteVector { @ForceInline public boolean anyTrue() { return VectorSupport.test(BT_ne, ByteMask128.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> anyTrueHelper(((ByteMask128)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> anyTrueHelper(((ByteMask128)m).getBits())); } @Override @ForceInline public boolean allTrue() { return VectorSupport.test(BT_overflow, ByteMask128.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> allTrueHelper(((ByteMask128)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> allTrueHelper(((ByteMask128)m).getBits())); } @ForceInline @@ -823,7 +823,7 @@ final class ByteVector128 extends ByteVector { static ByteMask128 maskAll(boolean bit) { return VectorSupport.fromBitsCoerced(ByteMask128.class, LANEBITS_TYPE_ORDINAL, VLENGTH, (bit ? -1 : 0), MODE_BROADCAST, null, - (v, __) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); + (v, _) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); } private static final ByteMask128 TRUE_MASK = new ByteMask128(true); private static final ByteMask128 FALSE_MASK = new ByteMask128(false); @@ -831,7 +831,7 @@ final class ByteVector128 extends ByteVector { } // Shuffle - + @ValueBased static final class ByteShuffle128 extends AbstractShuffle { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -883,7 +883,7 @@ final class ByteVector128 extends ByteVector { @Override ByteVector128 toBitsVector0() { - return ((ByteVector128) vspecies().asIntegral().dummyVector()).vectorFactory(indices()); + return ((ByteVector128) VSPECIES.asIntegral().dummyVector()).vectorFactory(indices()); } @Override @@ -934,7 +934,7 @@ final class ByteVector128 extends ByteVector { @ForceInline public final ByteMask128 laneIsValid() { return (ByteMask128) toBitsVector().compare(VectorOperators.GE, 0) - .cast(vspecies()); + .cast(VSPECIES); } @ForceInline @@ -942,7 +942,7 @@ final class ByteVector128 extends ByteVector { public final ByteShuffle128 rearrange(VectorShuffle shuffle) { ByteShuffle128 concreteShuffle = (ByteShuffle128) shuffle; return (ByteShuffle128) toBitsVector().rearrange(concreteShuffle) - .toShuffle(vspecies(), false); + .toShuffle(VSPECIES, false); } @ForceInline @@ -955,7 +955,7 @@ final class ByteVector128 extends ByteVector { v = (ByteVector128) v.blend(v.lanewise(VectorOperators.ADD, length()), v.compare(VectorOperators.LT, 0)); } - return (ByteShuffle128) v.toShuffle(vspecies(), false); + return (ByteShuffle128) v.toShuffle(VSPECIES, false); } private static byte[] prepare(int[] indices, int offset) { diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ByteVector256.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ByteVector256.java index 0eec0c56e37..a11268ea40d 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ByteVector256.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ByteVector256.java @@ -25,22 +25,22 @@ package jdk.incubator.vector; import java.lang.foreign.MemorySegment; -import java.lang.foreign.ValueLayout; import java.nio.ByteOrder; import java.util.Arrays; import java.util.Objects; import java.util.function.IntUnaryOperator; +import jdk.internal.ValueBased; import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.vector.VectorSupport; -import static jdk.internal.vm.vector.VectorSupport.*; - import static jdk.incubator.vector.VectorOperators.*; +import static jdk.internal.vm.vector.VectorSupport.*; // -- This file was mechanically generated: Do not edit! -- // @SuppressWarnings("cast") // warning: redundant cast +@ValueBased final class ByteVector256 extends ByteVector { static final ByteSpecies VSPECIES = (ByteSpecies) ByteVector.SPECIES_256; @@ -371,7 +371,7 @@ final class ByteVector256 extends ByteVector { @Override @ForceInline public final ByteShuffle256 toShuffle() { - return (ByteShuffle256) toShuffle(vspecies(), false); + return (ByteShuffle256) toShuffle(VSPECIES, false); } // Specialized unary testing @@ -630,7 +630,7 @@ final class ByteVector256 extends ByteVector { } // Mask - + @ValueBased static final class ByteMask256 extends AbstractMask { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -678,7 +678,7 @@ final class ByteVector256 extends ByteVector { @Override ByteMask256 uOp(MUnOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); for (int i = 0; i < res.length; i++) { res[i] = f.apply(i, bits[i]); @@ -688,7 +688,7 @@ final class ByteVector256 extends ByteVector { @Override ByteMask256 bOp(VectorMask m, MBinOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); boolean[] mbits = ((ByteMask256)m).getBits(); for (int i = 0; i < res.length; i++) { @@ -838,16 +838,16 @@ final class ByteVector256 extends ByteVector { @ForceInline public boolean anyTrue() { return VectorSupport.test(BT_ne, ByteMask256.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> anyTrueHelper(((ByteMask256)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> anyTrueHelper(((ByteMask256)m).getBits())); } @Override @ForceInline public boolean allTrue() { return VectorSupport.test(BT_overflow, ByteMask256.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> allTrueHelper(((ByteMask256)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> allTrueHelper(((ByteMask256)m).getBits())); } @ForceInline @@ -855,7 +855,7 @@ final class ByteVector256 extends ByteVector { static ByteMask256 maskAll(boolean bit) { return VectorSupport.fromBitsCoerced(ByteMask256.class, LANEBITS_TYPE_ORDINAL, VLENGTH, (bit ? -1 : 0), MODE_BROADCAST, null, - (v, __) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); + (v, _) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); } private static final ByteMask256 TRUE_MASK = new ByteMask256(true); private static final ByteMask256 FALSE_MASK = new ByteMask256(false); @@ -863,7 +863,7 @@ final class ByteVector256 extends ByteVector { } // Shuffle - + @ValueBased static final class ByteShuffle256 extends AbstractShuffle { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -915,7 +915,7 @@ final class ByteVector256 extends ByteVector { @Override ByteVector256 toBitsVector0() { - return ((ByteVector256) vspecies().asIntegral().dummyVector()).vectorFactory(indices()); + return ((ByteVector256) VSPECIES.asIntegral().dummyVector()).vectorFactory(indices()); } @Override @@ -966,7 +966,7 @@ final class ByteVector256 extends ByteVector { @ForceInline public final ByteMask256 laneIsValid() { return (ByteMask256) toBitsVector().compare(VectorOperators.GE, 0) - .cast(vspecies()); + .cast(VSPECIES); } @ForceInline @@ -974,7 +974,7 @@ final class ByteVector256 extends ByteVector { public final ByteShuffle256 rearrange(VectorShuffle shuffle) { ByteShuffle256 concreteShuffle = (ByteShuffle256) shuffle; return (ByteShuffle256) toBitsVector().rearrange(concreteShuffle) - .toShuffle(vspecies(), false); + .toShuffle(VSPECIES, false); } @ForceInline @@ -987,7 +987,7 @@ final class ByteVector256 extends ByteVector { v = (ByteVector256) v.blend(v.lanewise(VectorOperators.ADD, length()), v.compare(VectorOperators.LT, 0)); } - return (ByteShuffle256) v.toShuffle(vspecies(), false); + return (ByteShuffle256) v.toShuffle(VSPECIES, false); } private static byte[] prepare(int[] indices, int offset) { diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ByteVector512.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ByteVector512.java index 138319b60d4..707254e034e 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ByteVector512.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ByteVector512.java @@ -25,22 +25,22 @@ package jdk.incubator.vector; import java.lang.foreign.MemorySegment; -import java.lang.foreign.ValueLayout; import java.nio.ByteOrder; import java.util.Arrays; import java.util.Objects; import java.util.function.IntUnaryOperator; +import jdk.internal.ValueBased; import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.vector.VectorSupport; -import static jdk.internal.vm.vector.VectorSupport.*; - import static jdk.incubator.vector.VectorOperators.*; +import static jdk.internal.vm.vector.VectorSupport.*; // -- This file was mechanically generated: Do not edit! -- // @SuppressWarnings("cast") // warning: redundant cast +@ValueBased final class ByteVector512 extends ByteVector { static final ByteSpecies VSPECIES = (ByteSpecies) ByteVector.SPECIES_512; @@ -371,7 +371,7 @@ final class ByteVector512 extends ByteVector { @Override @ForceInline public final ByteShuffle512 toShuffle() { - return (ByteShuffle512) toShuffle(vspecies(), false); + return (ByteShuffle512) toShuffle(VSPECIES, false); } // Specialized unary testing @@ -694,7 +694,7 @@ final class ByteVector512 extends ByteVector { } // Mask - + @ValueBased static final class ByteMask512 extends AbstractMask { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -742,7 +742,7 @@ final class ByteVector512 extends ByteVector { @Override ByteMask512 uOp(MUnOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); for (int i = 0; i < res.length; i++) { res[i] = f.apply(i, bits[i]); @@ -752,7 +752,7 @@ final class ByteVector512 extends ByteVector { @Override ByteMask512 bOp(VectorMask m, MBinOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); boolean[] mbits = ((ByteMask512)m).getBits(); for (int i = 0; i < res.length; i++) { @@ -902,16 +902,16 @@ final class ByteVector512 extends ByteVector { @ForceInline public boolean anyTrue() { return VectorSupport.test(BT_ne, ByteMask512.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> anyTrueHelper(((ByteMask512)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> anyTrueHelper(((ByteMask512)m).getBits())); } @Override @ForceInline public boolean allTrue() { return VectorSupport.test(BT_overflow, ByteMask512.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> allTrueHelper(((ByteMask512)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> allTrueHelper(((ByteMask512)m).getBits())); } @ForceInline @@ -919,7 +919,7 @@ final class ByteVector512 extends ByteVector { static ByteMask512 maskAll(boolean bit) { return VectorSupport.fromBitsCoerced(ByteMask512.class, LANEBITS_TYPE_ORDINAL, VLENGTH, (bit ? -1 : 0), MODE_BROADCAST, null, - (v, __) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); + (v, _) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); } private static final ByteMask512 TRUE_MASK = new ByteMask512(true); private static final ByteMask512 FALSE_MASK = new ByteMask512(false); @@ -927,7 +927,7 @@ final class ByteVector512 extends ByteVector { } // Shuffle - + @ValueBased static final class ByteShuffle512 extends AbstractShuffle { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -979,7 +979,7 @@ final class ByteVector512 extends ByteVector { @Override ByteVector512 toBitsVector0() { - return ((ByteVector512) vspecies().asIntegral().dummyVector()).vectorFactory(indices()); + return ((ByteVector512) VSPECIES.asIntegral().dummyVector()).vectorFactory(indices()); } @Override @@ -1030,7 +1030,7 @@ final class ByteVector512 extends ByteVector { @ForceInline public final ByteMask512 laneIsValid() { return (ByteMask512) toBitsVector().compare(VectorOperators.GE, 0) - .cast(vspecies()); + .cast(VSPECIES); } @ForceInline @@ -1038,7 +1038,7 @@ final class ByteVector512 extends ByteVector { public final ByteShuffle512 rearrange(VectorShuffle shuffle) { ByteShuffle512 concreteShuffle = (ByteShuffle512) shuffle; return (ByteShuffle512) toBitsVector().rearrange(concreteShuffle) - .toShuffle(vspecies(), false); + .toShuffle(VSPECIES, false); } @ForceInline @@ -1051,7 +1051,7 @@ final class ByteVector512 extends ByteVector { v = (ByteVector512) v.blend(v.lanewise(VectorOperators.ADD, length()), v.compare(VectorOperators.LT, 0)); } - return (ByteShuffle512) v.toShuffle(vspecies(), false); + return (ByteShuffle512) v.toShuffle(VSPECIES, false); } private static byte[] prepare(int[] indices, int offset) { diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ByteVector64.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ByteVector64.java index d7c7c78534b..d304edfc0c7 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ByteVector64.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ByteVector64.java @@ -25,22 +25,22 @@ package jdk.incubator.vector; import java.lang.foreign.MemorySegment; -import java.lang.foreign.ValueLayout; import java.nio.ByteOrder; import java.util.Arrays; import java.util.Objects; import java.util.function.IntUnaryOperator; +import jdk.internal.ValueBased; import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.vector.VectorSupport; -import static jdk.internal.vm.vector.VectorSupport.*; - import static jdk.incubator.vector.VectorOperators.*; +import static jdk.internal.vm.vector.VectorSupport.*; // -- This file was mechanically generated: Do not edit! -- // @SuppressWarnings("cast") // warning: redundant cast +@ValueBased final class ByteVector64 extends ByteVector { static final ByteSpecies VSPECIES = (ByteSpecies) ByteVector.SPECIES_64; @@ -371,7 +371,7 @@ final class ByteVector64 extends ByteVector { @Override @ForceInline public final ByteShuffle64 toShuffle() { - return (ByteShuffle64) toShuffle(vspecies(), false); + return (ByteShuffle64) toShuffle(VSPECIES, false); } // Specialized unary testing @@ -582,7 +582,7 @@ final class ByteVector64 extends ByteVector { } // Mask - + @ValueBased static final class ByteMask64 extends AbstractMask { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -630,7 +630,7 @@ final class ByteVector64 extends ByteVector { @Override ByteMask64 uOp(MUnOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); for (int i = 0; i < res.length; i++) { res[i] = f.apply(i, bits[i]); @@ -640,7 +640,7 @@ final class ByteVector64 extends ByteVector { @Override ByteMask64 bOp(VectorMask m, MBinOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); boolean[] mbits = ((ByteMask64)m).getBits(); for (int i = 0; i < res.length; i++) { @@ -790,16 +790,16 @@ final class ByteVector64 extends ByteVector { @ForceInline public boolean anyTrue() { return VectorSupport.test(BT_ne, ByteMask64.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> anyTrueHelper(((ByteMask64)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> anyTrueHelper(((ByteMask64)m).getBits())); } @Override @ForceInline public boolean allTrue() { return VectorSupport.test(BT_overflow, ByteMask64.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> allTrueHelper(((ByteMask64)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> allTrueHelper(((ByteMask64)m).getBits())); } @ForceInline @@ -807,7 +807,7 @@ final class ByteVector64 extends ByteVector { static ByteMask64 maskAll(boolean bit) { return VectorSupport.fromBitsCoerced(ByteMask64.class, LANEBITS_TYPE_ORDINAL, VLENGTH, (bit ? -1 : 0), MODE_BROADCAST, null, - (v, __) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); + (v, _) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); } private static final ByteMask64 TRUE_MASK = new ByteMask64(true); private static final ByteMask64 FALSE_MASK = new ByteMask64(false); @@ -815,7 +815,7 @@ final class ByteVector64 extends ByteVector { } // Shuffle - + @ValueBased static final class ByteShuffle64 extends AbstractShuffle { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -867,7 +867,7 @@ final class ByteVector64 extends ByteVector { @Override ByteVector64 toBitsVector0() { - return ((ByteVector64) vspecies().asIntegral().dummyVector()).vectorFactory(indices()); + return ((ByteVector64) VSPECIES.asIntegral().dummyVector()).vectorFactory(indices()); } @Override @@ -918,7 +918,7 @@ final class ByteVector64 extends ByteVector { @ForceInline public final ByteMask64 laneIsValid() { return (ByteMask64) toBitsVector().compare(VectorOperators.GE, 0) - .cast(vspecies()); + .cast(VSPECIES); } @ForceInline @@ -926,7 +926,7 @@ final class ByteVector64 extends ByteVector { public final ByteShuffle64 rearrange(VectorShuffle shuffle) { ByteShuffle64 concreteShuffle = (ByteShuffle64) shuffle; return (ByteShuffle64) toBitsVector().rearrange(concreteShuffle) - .toShuffle(vspecies(), false); + .toShuffle(VSPECIES, false); } @ForceInline @@ -939,7 +939,7 @@ final class ByteVector64 extends ByteVector { v = (ByteVector64) v.blend(v.lanewise(VectorOperators.ADD, length()), v.compare(VectorOperators.LT, 0)); } - return (ByteShuffle64) v.toShuffle(vspecies(), false); + return (ByteShuffle64) v.toShuffle(VSPECIES, false); } private static byte[] prepare(int[] indices, int offset) { diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ByteVectorMax.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ByteVectorMax.java index 636aa83893a..0084995346b 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ByteVectorMax.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ByteVectorMax.java @@ -25,22 +25,22 @@ package jdk.incubator.vector; import java.lang.foreign.MemorySegment; -import java.lang.foreign.ValueLayout; import java.nio.ByteOrder; import java.util.Arrays; import java.util.Objects; import java.util.function.IntUnaryOperator; +import jdk.internal.ValueBased; import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.vector.VectorSupport; -import static jdk.internal.vm.vector.VectorSupport.*; - import static jdk.incubator.vector.VectorOperators.*; +import static jdk.internal.vm.vector.VectorSupport.*; // -- This file was mechanically generated: Do not edit! -- // @SuppressWarnings("cast") // warning: redundant cast +@ValueBased final class ByteVectorMax extends ByteVector { static final ByteSpecies VSPECIES = (ByteSpecies) ByteVector.SPECIES_MAX; @@ -371,7 +371,7 @@ final class ByteVectorMax extends ByteVector { @Override @ForceInline public final ByteShuffleMax toShuffle() { - return (ByteShuffleMax) toShuffle(vspecies(), false); + return (ByteShuffleMax) toShuffle(VSPECIES, false); } // Specialized unary testing @@ -568,7 +568,7 @@ final class ByteVectorMax extends ByteVector { } // Mask - + @ValueBased static final class ByteMaskMax extends AbstractMask { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -616,7 +616,7 @@ final class ByteVectorMax extends ByteVector { @Override ByteMaskMax uOp(MUnOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); for (int i = 0; i < res.length; i++) { res[i] = f.apply(i, bits[i]); @@ -626,7 +626,7 @@ final class ByteVectorMax extends ByteVector { @Override ByteMaskMax bOp(VectorMask m, MBinOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); boolean[] mbits = ((ByteMaskMax)m).getBits(); for (int i = 0; i < res.length; i++) { @@ -776,16 +776,16 @@ final class ByteVectorMax extends ByteVector { @ForceInline public boolean anyTrue() { return VectorSupport.test(BT_ne, ByteMaskMax.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> anyTrueHelper(((ByteMaskMax)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> anyTrueHelper(((ByteMaskMax)m).getBits())); } @Override @ForceInline public boolean allTrue() { return VectorSupport.test(BT_overflow, ByteMaskMax.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> allTrueHelper(((ByteMaskMax)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> allTrueHelper(((ByteMaskMax)m).getBits())); } @ForceInline @@ -793,7 +793,7 @@ final class ByteVectorMax extends ByteVector { static ByteMaskMax maskAll(boolean bit) { return VectorSupport.fromBitsCoerced(ByteMaskMax.class, LANEBITS_TYPE_ORDINAL, VLENGTH, (bit ? -1 : 0), MODE_BROADCAST, null, - (v, __) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); + (v, _) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); } private static final ByteMaskMax TRUE_MASK = new ByteMaskMax(true); private static final ByteMaskMax FALSE_MASK = new ByteMaskMax(false); @@ -801,7 +801,7 @@ final class ByteVectorMax extends ByteVector { } // Shuffle - + @ValueBased static final class ByteShuffleMax extends AbstractShuffle { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -853,7 +853,7 @@ final class ByteVectorMax extends ByteVector { @Override ByteVectorMax toBitsVector0() { - return ((ByteVectorMax) vspecies().asIntegral().dummyVector()).vectorFactory(indices()); + return ((ByteVectorMax) VSPECIES.asIntegral().dummyVector()).vectorFactory(indices()); } @Override @@ -904,7 +904,7 @@ final class ByteVectorMax extends ByteVector { @ForceInline public final ByteMaskMax laneIsValid() { return (ByteMaskMax) toBitsVector().compare(VectorOperators.GE, 0) - .cast(vspecies()); + .cast(VSPECIES); } @ForceInline @@ -912,7 +912,7 @@ final class ByteVectorMax extends ByteVector { public final ByteShuffleMax rearrange(VectorShuffle shuffle) { ByteShuffleMax concreteShuffle = (ByteShuffleMax) shuffle; return (ByteShuffleMax) toBitsVector().rearrange(concreteShuffle) - .toShuffle(vspecies(), false); + .toShuffle(VSPECIES, false); } @ForceInline @@ -925,7 +925,7 @@ final class ByteVectorMax extends ByteVector { v = (ByteVectorMax) v.blend(v.lanewise(VectorOperators.ADD, length()), v.compare(VectorOperators.LT, 0)); } - return (ByteShuffleMax) v.toShuffle(vspecies(), false); + return (ByteShuffleMax) v.toShuffle(VSPECIES, false); } private static byte[] prepare(int[] indices, int offset) { diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/CPUFeatures.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/CPUFeatures.java index c0d8ef03ada..05b5f6f69f4 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/CPUFeatures.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/CPUFeatures.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,7 +36,7 @@ import static jdk.internal.vm.vector.Utils.debug; /** * Enumerates CPU ISA extensions supported by the JVM on the current hardware. */ -/*package-private*/ class CPUFeatures { +/*package-private*/ final class CPUFeatures { private static final Set features = getCPUFeatures(); private static Set getCPUFeatures() { @@ -74,9 +74,6 @@ import static jdk.internal.vm.vector.Utils.debug; debug("AVX=%b; AVX2=%b; AVX512F=%b; AVX512DQ=%b", SUPPORTS_AVX, SUPPORTS_AVX2, SUPPORTS_AVX512F, SUPPORTS_AVX512DQ); - assert SUPPORTS_AVX512F == (VectorShape.getMaxVectorBitSize(int.class) == 512); - assert SUPPORTS_AVX2 == (VectorShape.getMaxVectorBitSize(byte.class) >= 256); - assert SUPPORTS_AVX == (VectorShape.getMaxVectorBitSize(float.class) >= 256); } } diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/DoubleVector.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/DoubleVector.java index 5e7c97dc56d..6f9b5e53ead 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/DoubleVector.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/DoubleVector.java @@ -49,7 +49,8 @@ import static jdk.incubator.vector.VectorOperators.*; * {@code double} values. */ @SuppressWarnings("cast") // warning: redundant cast -public abstract class DoubleVector extends AbstractVector { +public abstract sealed class DoubleVector extends AbstractVector + permits DoubleVector64, DoubleVector128, DoubleVector256, DoubleVector512, DoubleVectorMax { DoubleVector(double[] vec) { super(vec); diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/DoubleVector128.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/DoubleVector128.java index 1140d377e9b..8d3ec21ec9b 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/DoubleVector128.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/DoubleVector128.java @@ -31,16 +31,17 @@ import java.util.Arrays; import java.util.Objects; import java.util.function.IntUnaryOperator; +import jdk.internal.ValueBased; import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.vector.VectorSupport; -import static jdk.internal.vm.vector.VectorSupport.*; - import static jdk.incubator.vector.VectorOperators.*; +import static jdk.internal.vm.vector.VectorSupport.*; // -- This file was mechanically generated: Do not edit! -- // @SuppressWarnings("cast") // warning: redundant cast +@ValueBased final class DoubleVector128 extends DoubleVector { static final DoubleSpecies VSPECIES = (DoubleSpecies) DoubleVector.SPECIES_128; @@ -358,7 +359,7 @@ final class DoubleVector128 extends DoubleVector { @Override @ForceInline public final DoubleShuffle128 toShuffle() { - return (DoubleShuffle128) toShuffle(vspecies(), false); + return (DoubleShuffle128) toShuffle(VSPECIES, false); } // Specialized unary testing @@ -559,7 +560,7 @@ final class DoubleVector128 extends DoubleVector { } // Mask - + @ValueBased static final class DoubleMask128 extends AbstractMask { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -607,7 +608,7 @@ final class DoubleVector128 extends DoubleVector { @Override DoubleMask128 uOp(MUnOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); for (int i = 0; i < res.length; i++) { res[i] = f.apply(i, bits[i]); @@ -617,7 +618,7 @@ final class DoubleVector128 extends DoubleVector { @Override DoubleMask128 bOp(VectorMask m, MBinOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); boolean[] mbits = ((DoubleMask128)m).getBits(); for (int i = 0; i < res.length; i++) { @@ -767,16 +768,16 @@ final class DoubleVector128 extends DoubleVector { @ForceInline public boolean anyTrue() { return VectorSupport.test(BT_ne, DoubleMask128.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> anyTrueHelper(((DoubleMask128)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> anyTrueHelper(((DoubleMask128)m).getBits())); } @Override @ForceInline public boolean allTrue() { return VectorSupport.test(BT_overflow, DoubleMask128.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> allTrueHelper(((DoubleMask128)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> allTrueHelper(((DoubleMask128)m).getBits())); } @ForceInline @@ -784,7 +785,7 @@ final class DoubleVector128 extends DoubleVector { static DoubleMask128 maskAll(boolean bit) { return VectorSupport.fromBitsCoerced(DoubleMask128.class, LANEBITS_TYPE_ORDINAL, VLENGTH, (bit ? -1 : 0), MODE_BROADCAST, null, - (v, __) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); + (v, _) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); } private static final DoubleMask128 TRUE_MASK = new DoubleMask128(true); private static final DoubleMask128 FALSE_MASK = new DoubleMask128(false); @@ -792,7 +793,7 @@ final class DoubleVector128 extends DoubleVector { } // Shuffle - + @ValueBased static final class DoubleShuffle128 extends AbstractShuffle { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -833,7 +834,7 @@ final class DoubleVector128 extends DoubleVector { @Override @ForceInline public DoubleVector128 toVector() { - return (DoubleVector128) toBitsVector().castShape(vspecies(), 0); + return (DoubleVector128) toBitsVector().castShape(VSPECIES, 0); } @Override @@ -844,7 +845,7 @@ final class DoubleVector128 extends DoubleVector { @Override LongVector128 toBitsVector0() { - return ((LongVector128) vspecies().asIntegral().dummyVector()).vectorFactory(indices()); + return ((LongVector128) VSPECIES.asIntegral().dummyVector()).vectorFactory(indices()); } @Override @@ -918,7 +919,7 @@ final class DoubleVector128 extends DoubleVector { @ForceInline public final DoubleMask128 laneIsValid() { return (DoubleMask128) toBitsVector().compare(VectorOperators.GE, 0) - .cast(vspecies()); + .cast(VSPECIES); } @ForceInline @@ -926,7 +927,7 @@ final class DoubleVector128 extends DoubleVector { public final DoubleShuffle128 rearrange(VectorShuffle shuffle) { DoubleShuffle128 concreteShuffle = (DoubleShuffle128) shuffle; return (DoubleShuffle128) toBitsVector().rearrange(concreteShuffle.cast(LongVector.SPECIES_128)) - .toShuffle(vspecies(), false); + .toShuffle(VSPECIES, false); } @ForceInline @@ -939,7 +940,7 @@ final class DoubleVector128 extends DoubleVector { v = (LongVector128) v.blend(v.lanewise(VectorOperators.ADD, length()), v.compare(VectorOperators.LT, 0)); } - return (DoubleShuffle128) v.toShuffle(vspecies(), false); + return (DoubleShuffle128) v.toShuffle(VSPECIES, false); } private static long[] prepare(int[] indices, int offset) { diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/DoubleVector256.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/DoubleVector256.java index 59b7913cfcb..c6bb4b7e3d3 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/DoubleVector256.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/DoubleVector256.java @@ -31,16 +31,17 @@ import java.util.Arrays; import java.util.Objects; import java.util.function.IntUnaryOperator; +import jdk.internal.ValueBased; import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.vector.VectorSupport; -import static jdk.internal.vm.vector.VectorSupport.*; - import static jdk.incubator.vector.VectorOperators.*; +import static jdk.internal.vm.vector.VectorSupport.*; // -- This file was mechanically generated: Do not edit! -- // @SuppressWarnings("cast") // warning: redundant cast +@ValueBased final class DoubleVector256 extends DoubleVector { static final DoubleSpecies VSPECIES = (DoubleSpecies) DoubleVector.SPECIES_256; @@ -358,7 +359,7 @@ final class DoubleVector256 extends DoubleVector { @Override @ForceInline public final DoubleShuffle256 toShuffle() { - return (DoubleShuffle256) toShuffle(vspecies(), false); + return (DoubleShuffle256) toShuffle(VSPECIES, false); } // Specialized unary testing @@ -563,7 +564,7 @@ final class DoubleVector256 extends DoubleVector { } // Mask - + @ValueBased static final class DoubleMask256 extends AbstractMask { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -611,7 +612,7 @@ final class DoubleVector256 extends DoubleVector { @Override DoubleMask256 uOp(MUnOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); for (int i = 0; i < res.length; i++) { res[i] = f.apply(i, bits[i]); @@ -621,7 +622,7 @@ final class DoubleVector256 extends DoubleVector { @Override DoubleMask256 bOp(VectorMask m, MBinOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); boolean[] mbits = ((DoubleMask256)m).getBits(); for (int i = 0; i < res.length; i++) { @@ -771,16 +772,16 @@ final class DoubleVector256 extends DoubleVector { @ForceInline public boolean anyTrue() { return VectorSupport.test(BT_ne, DoubleMask256.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> anyTrueHelper(((DoubleMask256)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> anyTrueHelper(((DoubleMask256)m).getBits())); } @Override @ForceInline public boolean allTrue() { return VectorSupport.test(BT_overflow, DoubleMask256.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> allTrueHelper(((DoubleMask256)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> allTrueHelper(((DoubleMask256)m).getBits())); } @ForceInline @@ -788,7 +789,7 @@ final class DoubleVector256 extends DoubleVector { static DoubleMask256 maskAll(boolean bit) { return VectorSupport.fromBitsCoerced(DoubleMask256.class, LANEBITS_TYPE_ORDINAL, VLENGTH, (bit ? -1 : 0), MODE_BROADCAST, null, - (v, __) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); + (v, _) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); } private static final DoubleMask256 TRUE_MASK = new DoubleMask256(true); private static final DoubleMask256 FALSE_MASK = new DoubleMask256(false); @@ -796,7 +797,7 @@ final class DoubleVector256 extends DoubleVector { } // Shuffle - + @ValueBased static final class DoubleShuffle256 extends AbstractShuffle { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -837,7 +838,7 @@ final class DoubleVector256 extends DoubleVector { @Override @ForceInline public DoubleVector256 toVector() { - return (DoubleVector256) toBitsVector().castShape(vspecies(), 0); + return (DoubleVector256) toBitsVector().castShape(VSPECIES, 0); } @Override @@ -848,7 +849,7 @@ final class DoubleVector256 extends DoubleVector { @Override LongVector256 toBitsVector0() { - return ((LongVector256) vspecies().asIntegral().dummyVector()).vectorFactory(indices()); + return ((LongVector256) VSPECIES.asIntegral().dummyVector()).vectorFactory(indices()); } @Override @@ -922,7 +923,7 @@ final class DoubleVector256 extends DoubleVector { @ForceInline public final DoubleMask256 laneIsValid() { return (DoubleMask256) toBitsVector().compare(VectorOperators.GE, 0) - .cast(vspecies()); + .cast(VSPECIES); } @ForceInline @@ -930,7 +931,7 @@ final class DoubleVector256 extends DoubleVector { public final DoubleShuffle256 rearrange(VectorShuffle shuffle) { DoubleShuffle256 concreteShuffle = (DoubleShuffle256) shuffle; return (DoubleShuffle256) toBitsVector().rearrange(concreteShuffle.cast(LongVector.SPECIES_256)) - .toShuffle(vspecies(), false); + .toShuffle(VSPECIES, false); } @ForceInline @@ -943,7 +944,7 @@ final class DoubleVector256 extends DoubleVector { v = (LongVector256) v.blend(v.lanewise(VectorOperators.ADD, length()), v.compare(VectorOperators.LT, 0)); } - return (DoubleShuffle256) v.toShuffle(vspecies(), false); + return (DoubleShuffle256) v.toShuffle(VSPECIES, false); } private static long[] prepare(int[] indices, int offset) { diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/DoubleVector512.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/DoubleVector512.java index 8ed21953394..fb1441efc63 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/DoubleVector512.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/DoubleVector512.java @@ -31,16 +31,17 @@ import java.util.Arrays; import java.util.Objects; import java.util.function.IntUnaryOperator; +import jdk.internal.ValueBased; import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.vector.VectorSupport; -import static jdk.internal.vm.vector.VectorSupport.*; - import static jdk.incubator.vector.VectorOperators.*; +import static jdk.internal.vm.vector.VectorSupport.*; // -- This file was mechanically generated: Do not edit! -- // @SuppressWarnings("cast") // warning: redundant cast +@ValueBased final class DoubleVector512 extends DoubleVector { static final DoubleSpecies VSPECIES = (DoubleSpecies) DoubleVector.SPECIES_512; @@ -358,7 +359,7 @@ final class DoubleVector512 extends DoubleVector { @Override @ForceInline public final DoubleShuffle512 toShuffle() { - return (DoubleShuffle512) toShuffle(vspecies(), false); + return (DoubleShuffle512) toShuffle(VSPECIES, false); } // Specialized unary testing @@ -571,7 +572,7 @@ final class DoubleVector512 extends DoubleVector { } // Mask - + @ValueBased static final class DoubleMask512 extends AbstractMask { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -619,7 +620,7 @@ final class DoubleVector512 extends DoubleVector { @Override DoubleMask512 uOp(MUnOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); for (int i = 0; i < res.length; i++) { res[i] = f.apply(i, bits[i]); @@ -629,7 +630,7 @@ final class DoubleVector512 extends DoubleVector { @Override DoubleMask512 bOp(VectorMask m, MBinOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); boolean[] mbits = ((DoubleMask512)m).getBits(); for (int i = 0; i < res.length; i++) { @@ -779,16 +780,16 @@ final class DoubleVector512 extends DoubleVector { @ForceInline public boolean anyTrue() { return VectorSupport.test(BT_ne, DoubleMask512.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> anyTrueHelper(((DoubleMask512)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> anyTrueHelper(((DoubleMask512)m).getBits())); } @Override @ForceInline public boolean allTrue() { return VectorSupport.test(BT_overflow, DoubleMask512.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> allTrueHelper(((DoubleMask512)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> allTrueHelper(((DoubleMask512)m).getBits())); } @ForceInline @@ -796,7 +797,7 @@ final class DoubleVector512 extends DoubleVector { static DoubleMask512 maskAll(boolean bit) { return VectorSupport.fromBitsCoerced(DoubleMask512.class, LANEBITS_TYPE_ORDINAL, VLENGTH, (bit ? -1 : 0), MODE_BROADCAST, null, - (v, __) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); + (v, _) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); } private static final DoubleMask512 TRUE_MASK = new DoubleMask512(true); private static final DoubleMask512 FALSE_MASK = new DoubleMask512(false); @@ -804,7 +805,7 @@ final class DoubleVector512 extends DoubleVector { } // Shuffle - + @ValueBased static final class DoubleShuffle512 extends AbstractShuffle { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -845,7 +846,7 @@ final class DoubleVector512 extends DoubleVector { @Override @ForceInline public DoubleVector512 toVector() { - return (DoubleVector512) toBitsVector().castShape(vspecies(), 0); + return (DoubleVector512) toBitsVector().castShape(VSPECIES, 0); } @Override @@ -856,7 +857,7 @@ final class DoubleVector512 extends DoubleVector { @Override LongVector512 toBitsVector0() { - return ((LongVector512) vspecies().asIntegral().dummyVector()).vectorFactory(indices()); + return ((LongVector512) VSPECIES.asIntegral().dummyVector()).vectorFactory(indices()); } @Override @@ -930,7 +931,7 @@ final class DoubleVector512 extends DoubleVector { @ForceInline public final DoubleMask512 laneIsValid() { return (DoubleMask512) toBitsVector().compare(VectorOperators.GE, 0) - .cast(vspecies()); + .cast(VSPECIES); } @ForceInline @@ -938,7 +939,7 @@ final class DoubleVector512 extends DoubleVector { public final DoubleShuffle512 rearrange(VectorShuffle shuffle) { DoubleShuffle512 concreteShuffle = (DoubleShuffle512) shuffle; return (DoubleShuffle512) toBitsVector().rearrange(concreteShuffle.cast(LongVector.SPECIES_512)) - .toShuffle(vspecies(), false); + .toShuffle(VSPECIES, false); } @ForceInline @@ -951,7 +952,7 @@ final class DoubleVector512 extends DoubleVector { v = (LongVector512) v.blend(v.lanewise(VectorOperators.ADD, length()), v.compare(VectorOperators.LT, 0)); } - return (DoubleShuffle512) v.toShuffle(vspecies(), false); + return (DoubleShuffle512) v.toShuffle(VSPECIES, false); } private static long[] prepare(int[] indices, int offset) { diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/DoubleVector64.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/DoubleVector64.java index 7e1a8cf768d..5583cff80e1 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/DoubleVector64.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/DoubleVector64.java @@ -31,16 +31,17 @@ import java.util.Arrays; import java.util.Objects; import java.util.function.IntUnaryOperator; +import jdk.internal.ValueBased; import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.vector.VectorSupport; -import static jdk.internal.vm.vector.VectorSupport.*; - import static jdk.incubator.vector.VectorOperators.*; +import static jdk.internal.vm.vector.VectorSupport.*; // -- This file was mechanically generated: Do not edit! -- // @SuppressWarnings("cast") // warning: redundant cast +@ValueBased final class DoubleVector64 extends DoubleVector { static final DoubleSpecies VSPECIES = (DoubleSpecies) DoubleVector.SPECIES_64; @@ -358,7 +359,7 @@ final class DoubleVector64 extends DoubleVector { @Override @ForceInline public final DoubleShuffle64 toShuffle() { - return (DoubleShuffle64) toShuffle(vspecies(), false); + return (DoubleShuffle64) toShuffle(VSPECIES, false); } // Specialized unary testing @@ -557,7 +558,7 @@ final class DoubleVector64 extends DoubleVector { } // Mask - + @ValueBased static final class DoubleMask64 extends AbstractMask { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -605,7 +606,7 @@ final class DoubleVector64 extends DoubleVector { @Override DoubleMask64 uOp(MUnOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); for (int i = 0; i < res.length; i++) { res[i] = f.apply(i, bits[i]); @@ -615,7 +616,7 @@ final class DoubleVector64 extends DoubleVector { @Override DoubleMask64 bOp(VectorMask m, MBinOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); boolean[] mbits = ((DoubleMask64)m).getBits(); for (int i = 0; i < res.length; i++) { @@ -765,16 +766,16 @@ final class DoubleVector64 extends DoubleVector { @ForceInline public boolean anyTrue() { return VectorSupport.test(BT_ne, DoubleMask64.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> anyTrueHelper(((DoubleMask64)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> anyTrueHelper(((DoubleMask64)m).getBits())); } @Override @ForceInline public boolean allTrue() { return VectorSupport.test(BT_overflow, DoubleMask64.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> allTrueHelper(((DoubleMask64)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> allTrueHelper(((DoubleMask64)m).getBits())); } @ForceInline @@ -782,7 +783,7 @@ final class DoubleVector64 extends DoubleVector { static DoubleMask64 maskAll(boolean bit) { return VectorSupport.fromBitsCoerced(DoubleMask64.class, LANEBITS_TYPE_ORDINAL, VLENGTH, (bit ? -1 : 0), MODE_BROADCAST, null, - (v, __) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); + (v, _) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); } private static final DoubleMask64 TRUE_MASK = new DoubleMask64(true); private static final DoubleMask64 FALSE_MASK = new DoubleMask64(false); @@ -790,7 +791,7 @@ final class DoubleVector64 extends DoubleVector { } // Shuffle - + @ValueBased static final class DoubleShuffle64 extends AbstractShuffle { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -831,7 +832,7 @@ final class DoubleVector64 extends DoubleVector { @Override @ForceInline public DoubleVector64 toVector() { - return (DoubleVector64) toBitsVector().castShape(vspecies(), 0); + return (DoubleVector64) toBitsVector().castShape(VSPECIES, 0); } @Override @@ -842,7 +843,7 @@ final class DoubleVector64 extends DoubleVector { @Override LongVector64 toBitsVector0() { - return ((LongVector64) vspecies().asIntegral().dummyVector()).vectorFactory(indices()); + return ((LongVector64) VSPECIES.asIntegral().dummyVector()).vectorFactory(indices()); } @Override @@ -916,7 +917,7 @@ final class DoubleVector64 extends DoubleVector { @ForceInline public final DoubleMask64 laneIsValid() { return (DoubleMask64) toBitsVector().compare(VectorOperators.GE, 0) - .cast(vspecies()); + .cast(VSPECIES); } @ForceInline @@ -924,7 +925,7 @@ final class DoubleVector64 extends DoubleVector { public final DoubleShuffle64 rearrange(VectorShuffle shuffle) { DoubleShuffle64 concreteShuffle = (DoubleShuffle64) shuffle; return (DoubleShuffle64) toBitsVector().rearrange(concreteShuffle.cast(LongVector.SPECIES_64)) - .toShuffle(vspecies(), false); + .toShuffle(VSPECIES, false); } @ForceInline @@ -937,7 +938,7 @@ final class DoubleVector64 extends DoubleVector { v = (LongVector64) v.blend(v.lanewise(VectorOperators.ADD, length()), v.compare(VectorOperators.LT, 0)); } - return (DoubleShuffle64) v.toShuffle(vspecies(), false); + return (DoubleShuffle64) v.toShuffle(VSPECIES, false); } private static long[] prepare(int[] indices, int offset) { diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/DoubleVectorMax.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/DoubleVectorMax.java index 46c090e066e..41272a5a5e5 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/DoubleVectorMax.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/DoubleVectorMax.java @@ -31,16 +31,17 @@ import java.util.Arrays; import java.util.Objects; import java.util.function.IntUnaryOperator; +import jdk.internal.ValueBased; import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.vector.VectorSupport; -import static jdk.internal.vm.vector.VectorSupport.*; - import static jdk.incubator.vector.VectorOperators.*; +import static jdk.internal.vm.vector.VectorSupport.*; // -- This file was mechanically generated: Do not edit! -- // @SuppressWarnings("cast") // warning: redundant cast +@ValueBased final class DoubleVectorMax extends DoubleVector { static final DoubleSpecies VSPECIES = (DoubleSpecies) DoubleVector.SPECIES_MAX; @@ -358,7 +359,7 @@ final class DoubleVectorMax extends DoubleVector { @Override @ForceInline public final DoubleShuffleMax toShuffle() { - return (DoubleShuffleMax) toShuffle(vspecies(), false); + return (DoubleShuffleMax) toShuffle(VSPECIES, false); } // Specialized unary testing @@ -556,7 +557,7 @@ final class DoubleVectorMax extends DoubleVector { } // Mask - + @ValueBased static final class DoubleMaskMax extends AbstractMask { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -604,7 +605,7 @@ final class DoubleVectorMax extends DoubleVector { @Override DoubleMaskMax uOp(MUnOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); for (int i = 0; i < res.length; i++) { res[i] = f.apply(i, bits[i]); @@ -614,7 +615,7 @@ final class DoubleVectorMax extends DoubleVector { @Override DoubleMaskMax bOp(VectorMask m, MBinOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); boolean[] mbits = ((DoubleMaskMax)m).getBits(); for (int i = 0; i < res.length; i++) { @@ -764,16 +765,16 @@ final class DoubleVectorMax extends DoubleVector { @ForceInline public boolean anyTrue() { return VectorSupport.test(BT_ne, DoubleMaskMax.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> anyTrueHelper(((DoubleMaskMax)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> anyTrueHelper(((DoubleMaskMax)m).getBits())); } @Override @ForceInline public boolean allTrue() { return VectorSupport.test(BT_overflow, DoubleMaskMax.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> allTrueHelper(((DoubleMaskMax)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> allTrueHelper(((DoubleMaskMax)m).getBits())); } @ForceInline @@ -781,7 +782,7 @@ final class DoubleVectorMax extends DoubleVector { static DoubleMaskMax maskAll(boolean bit) { return VectorSupport.fromBitsCoerced(DoubleMaskMax.class, LANEBITS_TYPE_ORDINAL, VLENGTH, (bit ? -1 : 0), MODE_BROADCAST, null, - (v, __) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); + (v, _) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); } private static final DoubleMaskMax TRUE_MASK = new DoubleMaskMax(true); private static final DoubleMaskMax FALSE_MASK = new DoubleMaskMax(false); @@ -789,7 +790,7 @@ final class DoubleVectorMax extends DoubleVector { } // Shuffle - + @ValueBased static final class DoubleShuffleMax extends AbstractShuffle { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -830,7 +831,7 @@ final class DoubleVectorMax extends DoubleVector { @Override @ForceInline public DoubleVectorMax toVector() { - return (DoubleVectorMax) toBitsVector().castShape(vspecies(), 0); + return (DoubleVectorMax) toBitsVector().castShape(VSPECIES, 0); } @Override @@ -841,7 +842,7 @@ final class DoubleVectorMax extends DoubleVector { @Override LongVectorMax toBitsVector0() { - return ((LongVectorMax) vspecies().asIntegral().dummyVector()).vectorFactory(indices()); + return ((LongVectorMax) VSPECIES.asIntegral().dummyVector()).vectorFactory(indices()); } @Override @@ -915,7 +916,7 @@ final class DoubleVectorMax extends DoubleVector { @ForceInline public final DoubleMaskMax laneIsValid() { return (DoubleMaskMax) toBitsVector().compare(VectorOperators.GE, 0) - .cast(vspecies()); + .cast(VSPECIES); } @ForceInline @@ -923,7 +924,7 @@ final class DoubleVectorMax extends DoubleVector { public final DoubleShuffleMax rearrange(VectorShuffle shuffle) { DoubleShuffleMax concreteShuffle = (DoubleShuffleMax) shuffle; return (DoubleShuffleMax) toBitsVector().rearrange(concreteShuffle.cast(LongVector.SPECIES_MAX)) - .toShuffle(vspecies(), false); + .toShuffle(VSPECIES, false); } @ForceInline @@ -936,7 +937,7 @@ final class DoubleVectorMax extends DoubleVector { v = (LongVectorMax) v.blend(v.lanewise(VectorOperators.ADD, length()), v.compare(VectorOperators.LT, 0)); } - return (DoubleShuffleMax) v.toShuffle(vspecies(), false); + return (DoubleShuffleMax) v.toShuffle(VSPECIES, false); } private static long[] prepare(int[] indices, int offset) { diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/Float16Consts.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/Float16Consts.java index 48c4d2199b1..b70b11b0a49 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/Float16Consts.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/Float16Consts.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2024, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,7 +34,7 @@ import static jdk.incubator.vector.Float16.SIZE; * {@code Float16} type. */ -class Float16Consts { +final class Float16Consts { /** * Don't let anyone instantiate this class. */ diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/FloatVector.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/FloatVector.java index 5862a295fa3..cdf2532e4d9 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/FloatVector.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/FloatVector.java @@ -49,7 +49,8 @@ import static jdk.incubator.vector.VectorOperators.*; * {@code float} values. */ @SuppressWarnings("cast") // warning: redundant cast -public abstract class FloatVector extends AbstractVector { +public abstract sealed class FloatVector extends AbstractVector + permits FloatVector64, FloatVector128, FloatVector256, FloatVector512, FloatVectorMax { FloatVector(float[] vec) { super(vec); diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/FloatVector128.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/FloatVector128.java index 1e3867e84fc..24888e966da 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/FloatVector128.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/FloatVector128.java @@ -25,22 +25,22 @@ package jdk.incubator.vector; import java.lang.foreign.MemorySegment; -import java.lang.foreign.ValueLayout; import java.nio.ByteOrder; import java.util.Arrays; import java.util.Objects; import java.util.function.IntUnaryOperator; +import jdk.internal.ValueBased; import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.vector.VectorSupport; -import static jdk.internal.vm.vector.VectorSupport.*; - import static jdk.incubator.vector.VectorOperators.*; +import static jdk.internal.vm.vector.VectorSupport.*; // -- This file was mechanically generated: Do not edit! -- // @SuppressWarnings("cast") // warning: redundant cast +@ValueBased final class FloatVector128 extends FloatVector { static final FloatSpecies VSPECIES = (FloatSpecies) FloatVector.SPECIES_128; @@ -358,7 +358,7 @@ final class FloatVector128 extends FloatVector { @Override @ForceInline public final FloatShuffle128 toShuffle() { - return (FloatShuffle128) toShuffle(vspecies(), false); + return (FloatShuffle128) toShuffle(VSPECIES, false); } // Specialized unary testing @@ -563,7 +563,7 @@ final class FloatVector128 extends FloatVector { } // Mask - + @ValueBased static final class FloatMask128 extends AbstractMask { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -611,7 +611,7 @@ final class FloatVector128 extends FloatVector { @Override FloatMask128 uOp(MUnOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); for (int i = 0; i < res.length; i++) { res[i] = f.apply(i, bits[i]); @@ -621,7 +621,7 @@ final class FloatVector128 extends FloatVector { @Override FloatMask128 bOp(VectorMask m, MBinOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); boolean[] mbits = ((FloatMask128)m).getBits(); for (int i = 0; i < res.length; i++) { @@ -771,16 +771,16 @@ final class FloatVector128 extends FloatVector { @ForceInline public boolean anyTrue() { return VectorSupport.test(BT_ne, FloatMask128.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> anyTrueHelper(((FloatMask128)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> anyTrueHelper(((FloatMask128)m).getBits())); } @Override @ForceInline public boolean allTrue() { return VectorSupport.test(BT_overflow, FloatMask128.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> allTrueHelper(((FloatMask128)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> allTrueHelper(((FloatMask128)m).getBits())); } @ForceInline @@ -788,7 +788,7 @@ final class FloatVector128 extends FloatVector { static FloatMask128 maskAll(boolean bit) { return VectorSupport.fromBitsCoerced(FloatMask128.class, LANEBITS_TYPE_ORDINAL, VLENGTH, (bit ? -1 : 0), MODE_BROADCAST, null, - (v, __) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); + (v, _) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); } private static final FloatMask128 TRUE_MASK = new FloatMask128(true); private static final FloatMask128 FALSE_MASK = new FloatMask128(false); @@ -796,7 +796,7 @@ final class FloatVector128 extends FloatVector { } // Shuffle - + @ValueBased static final class FloatShuffle128 extends AbstractShuffle { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -837,7 +837,7 @@ final class FloatVector128 extends FloatVector { @Override @ForceInline public FloatVector128 toVector() { - return (FloatVector128) toBitsVector().castShape(vspecies(), 0); + return (FloatVector128) toBitsVector().castShape(VSPECIES, 0); } @Override @@ -848,7 +848,7 @@ final class FloatVector128 extends FloatVector { @Override IntVector128 toBitsVector0() { - return ((IntVector128) vspecies().asIntegral().dummyVector()).vectorFactory(indices()); + return ((IntVector128) VSPECIES.asIntegral().dummyVector()).vectorFactory(indices()); } @Override @@ -873,7 +873,7 @@ final class FloatVector128 extends FloatVector { @ForceInline public final FloatMask128 laneIsValid() { return (FloatMask128) toBitsVector().compare(VectorOperators.GE, 0) - .cast(vspecies()); + .cast(VSPECIES); } @ForceInline @@ -881,7 +881,7 @@ final class FloatVector128 extends FloatVector { public final FloatShuffle128 rearrange(VectorShuffle shuffle) { FloatShuffle128 concreteShuffle = (FloatShuffle128) shuffle; return (FloatShuffle128) toBitsVector().rearrange(concreteShuffle.cast(IntVector.SPECIES_128)) - .toShuffle(vspecies(), false); + .toShuffle(VSPECIES, false); } @ForceInline @@ -894,7 +894,7 @@ final class FloatVector128 extends FloatVector { v = (IntVector128) v.blend(v.lanewise(VectorOperators.ADD, length()), v.compare(VectorOperators.LT, 0)); } - return (FloatShuffle128) v.toShuffle(vspecies(), false); + return (FloatShuffle128) v.toShuffle(VSPECIES, false); } private static int[] prepare(int[] indices, int offset) { diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/FloatVector256.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/FloatVector256.java index f267025972d..ecbd80046f4 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/FloatVector256.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/FloatVector256.java @@ -25,22 +25,22 @@ package jdk.incubator.vector; import java.lang.foreign.MemorySegment; -import java.lang.foreign.ValueLayout; import java.nio.ByteOrder; import java.util.Arrays; import java.util.Objects; import java.util.function.IntUnaryOperator; +import jdk.internal.ValueBased; import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.vector.VectorSupport; -import static jdk.internal.vm.vector.VectorSupport.*; - import static jdk.incubator.vector.VectorOperators.*; +import static jdk.internal.vm.vector.VectorSupport.*; // -- This file was mechanically generated: Do not edit! -- // @SuppressWarnings("cast") // warning: redundant cast +@ValueBased final class FloatVector256 extends FloatVector { static final FloatSpecies VSPECIES = (FloatSpecies) FloatVector.SPECIES_256; @@ -358,7 +358,7 @@ final class FloatVector256 extends FloatVector { @Override @ForceInline public final FloatShuffle256 toShuffle() { - return (FloatShuffle256) toShuffle(vspecies(), false); + return (FloatShuffle256) toShuffle(VSPECIES, false); } // Specialized unary testing @@ -571,7 +571,7 @@ final class FloatVector256 extends FloatVector { } // Mask - + @ValueBased static final class FloatMask256 extends AbstractMask { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -619,7 +619,7 @@ final class FloatVector256 extends FloatVector { @Override FloatMask256 uOp(MUnOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); for (int i = 0; i < res.length; i++) { res[i] = f.apply(i, bits[i]); @@ -629,7 +629,7 @@ final class FloatVector256 extends FloatVector { @Override FloatMask256 bOp(VectorMask m, MBinOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); boolean[] mbits = ((FloatMask256)m).getBits(); for (int i = 0; i < res.length; i++) { @@ -779,16 +779,16 @@ final class FloatVector256 extends FloatVector { @ForceInline public boolean anyTrue() { return VectorSupport.test(BT_ne, FloatMask256.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> anyTrueHelper(((FloatMask256)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> anyTrueHelper(((FloatMask256)m).getBits())); } @Override @ForceInline public boolean allTrue() { return VectorSupport.test(BT_overflow, FloatMask256.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> allTrueHelper(((FloatMask256)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> allTrueHelper(((FloatMask256)m).getBits())); } @ForceInline @@ -796,7 +796,7 @@ final class FloatVector256 extends FloatVector { static FloatMask256 maskAll(boolean bit) { return VectorSupport.fromBitsCoerced(FloatMask256.class, LANEBITS_TYPE_ORDINAL, VLENGTH, (bit ? -1 : 0), MODE_BROADCAST, null, - (v, __) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); + (v, _) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); } private static final FloatMask256 TRUE_MASK = new FloatMask256(true); private static final FloatMask256 FALSE_MASK = new FloatMask256(false); @@ -804,7 +804,7 @@ final class FloatVector256 extends FloatVector { } // Shuffle - + @ValueBased static final class FloatShuffle256 extends AbstractShuffle { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -845,7 +845,7 @@ final class FloatVector256 extends FloatVector { @Override @ForceInline public FloatVector256 toVector() { - return (FloatVector256) toBitsVector().castShape(vspecies(), 0); + return (FloatVector256) toBitsVector().castShape(VSPECIES, 0); } @Override @@ -856,7 +856,7 @@ final class FloatVector256 extends FloatVector { @Override IntVector256 toBitsVector0() { - return ((IntVector256) vspecies().asIntegral().dummyVector()).vectorFactory(indices()); + return ((IntVector256) VSPECIES.asIntegral().dummyVector()).vectorFactory(indices()); } @Override @@ -881,7 +881,7 @@ final class FloatVector256 extends FloatVector { @ForceInline public final FloatMask256 laneIsValid() { return (FloatMask256) toBitsVector().compare(VectorOperators.GE, 0) - .cast(vspecies()); + .cast(VSPECIES); } @ForceInline @@ -889,7 +889,7 @@ final class FloatVector256 extends FloatVector { public final FloatShuffle256 rearrange(VectorShuffle shuffle) { FloatShuffle256 concreteShuffle = (FloatShuffle256) shuffle; return (FloatShuffle256) toBitsVector().rearrange(concreteShuffle.cast(IntVector.SPECIES_256)) - .toShuffle(vspecies(), false); + .toShuffle(VSPECIES, false); } @ForceInline @@ -902,7 +902,7 @@ final class FloatVector256 extends FloatVector { v = (IntVector256) v.blend(v.lanewise(VectorOperators.ADD, length()), v.compare(VectorOperators.LT, 0)); } - return (FloatShuffle256) v.toShuffle(vspecies(), false); + return (FloatShuffle256) v.toShuffle(VSPECIES, false); } private static int[] prepare(int[] indices, int offset) { diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/FloatVector512.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/FloatVector512.java index 439e26f0d89..b5a934dd90c 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/FloatVector512.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/FloatVector512.java @@ -25,22 +25,22 @@ package jdk.incubator.vector; import java.lang.foreign.MemorySegment; -import java.lang.foreign.ValueLayout; import java.nio.ByteOrder; import java.util.Arrays; import java.util.Objects; import java.util.function.IntUnaryOperator; +import jdk.internal.ValueBased; import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.vector.VectorSupport; -import static jdk.internal.vm.vector.VectorSupport.*; - import static jdk.incubator.vector.VectorOperators.*; +import static jdk.internal.vm.vector.VectorSupport.*; // -- This file was mechanically generated: Do not edit! -- // @SuppressWarnings("cast") // warning: redundant cast +@ValueBased final class FloatVector512 extends FloatVector { static final FloatSpecies VSPECIES = (FloatSpecies) FloatVector.SPECIES_512; @@ -358,7 +358,7 @@ final class FloatVector512 extends FloatVector { @Override @ForceInline public final FloatShuffle512 toShuffle() { - return (FloatShuffle512) toShuffle(vspecies(), false); + return (FloatShuffle512) toShuffle(VSPECIES, false); } // Specialized unary testing @@ -587,7 +587,7 @@ final class FloatVector512 extends FloatVector { } // Mask - + @ValueBased static final class FloatMask512 extends AbstractMask { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -635,7 +635,7 @@ final class FloatVector512 extends FloatVector { @Override FloatMask512 uOp(MUnOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); for (int i = 0; i < res.length; i++) { res[i] = f.apply(i, bits[i]); @@ -645,7 +645,7 @@ final class FloatVector512 extends FloatVector { @Override FloatMask512 bOp(VectorMask m, MBinOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); boolean[] mbits = ((FloatMask512)m).getBits(); for (int i = 0; i < res.length; i++) { @@ -795,16 +795,16 @@ final class FloatVector512 extends FloatVector { @ForceInline public boolean anyTrue() { return VectorSupport.test(BT_ne, FloatMask512.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> anyTrueHelper(((FloatMask512)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> anyTrueHelper(((FloatMask512)m).getBits())); } @Override @ForceInline public boolean allTrue() { return VectorSupport.test(BT_overflow, FloatMask512.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> allTrueHelper(((FloatMask512)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> allTrueHelper(((FloatMask512)m).getBits())); } @ForceInline @@ -812,7 +812,7 @@ final class FloatVector512 extends FloatVector { static FloatMask512 maskAll(boolean bit) { return VectorSupport.fromBitsCoerced(FloatMask512.class, LANEBITS_TYPE_ORDINAL, VLENGTH, (bit ? -1 : 0), MODE_BROADCAST, null, - (v, __) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); + (v, _) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); } private static final FloatMask512 TRUE_MASK = new FloatMask512(true); private static final FloatMask512 FALSE_MASK = new FloatMask512(false); @@ -820,7 +820,7 @@ final class FloatVector512 extends FloatVector { } // Shuffle - + @ValueBased static final class FloatShuffle512 extends AbstractShuffle { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -861,7 +861,7 @@ final class FloatVector512 extends FloatVector { @Override @ForceInline public FloatVector512 toVector() { - return (FloatVector512) toBitsVector().castShape(vspecies(), 0); + return (FloatVector512) toBitsVector().castShape(VSPECIES, 0); } @Override @@ -872,7 +872,7 @@ final class FloatVector512 extends FloatVector { @Override IntVector512 toBitsVector0() { - return ((IntVector512) vspecies().asIntegral().dummyVector()).vectorFactory(indices()); + return ((IntVector512) VSPECIES.asIntegral().dummyVector()).vectorFactory(indices()); } @Override @@ -897,7 +897,7 @@ final class FloatVector512 extends FloatVector { @ForceInline public final FloatMask512 laneIsValid() { return (FloatMask512) toBitsVector().compare(VectorOperators.GE, 0) - .cast(vspecies()); + .cast(VSPECIES); } @ForceInline @@ -905,7 +905,7 @@ final class FloatVector512 extends FloatVector { public final FloatShuffle512 rearrange(VectorShuffle shuffle) { FloatShuffle512 concreteShuffle = (FloatShuffle512) shuffle; return (FloatShuffle512) toBitsVector().rearrange(concreteShuffle.cast(IntVector.SPECIES_512)) - .toShuffle(vspecies(), false); + .toShuffle(VSPECIES, false); } @ForceInline @@ -918,7 +918,7 @@ final class FloatVector512 extends FloatVector { v = (IntVector512) v.blend(v.lanewise(VectorOperators.ADD, length()), v.compare(VectorOperators.LT, 0)); } - return (FloatShuffle512) v.toShuffle(vspecies(), false); + return (FloatShuffle512) v.toShuffle(VSPECIES, false); } private static int[] prepare(int[] indices, int offset) { diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/FloatVector64.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/FloatVector64.java index 9e81a52d27b..4d3118739ea 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/FloatVector64.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/FloatVector64.java @@ -25,22 +25,22 @@ package jdk.incubator.vector; import java.lang.foreign.MemorySegment; -import java.lang.foreign.ValueLayout; import java.nio.ByteOrder; import java.util.Arrays; import java.util.Objects; import java.util.function.IntUnaryOperator; +import jdk.internal.ValueBased; import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.vector.VectorSupport; -import static jdk.internal.vm.vector.VectorSupport.*; - import static jdk.incubator.vector.VectorOperators.*; +import static jdk.internal.vm.vector.VectorSupport.*; // -- This file was mechanically generated: Do not edit! -- // @SuppressWarnings("cast") // warning: redundant cast +@ValueBased final class FloatVector64 extends FloatVector { static final FloatSpecies VSPECIES = (FloatSpecies) FloatVector.SPECIES_64; @@ -358,7 +358,7 @@ final class FloatVector64 extends FloatVector { @Override @ForceInline public final FloatShuffle64 toShuffle() { - return (FloatShuffle64) toShuffle(vspecies(), false); + return (FloatShuffle64) toShuffle(VSPECIES, false); } // Specialized unary testing @@ -559,7 +559,7 @@ final class FloatVector64 extends FloatVector { } // Mask - + @ValueBased static final class FloatMask64 extends AbstractMask { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -607,7 +607,7 @@ final class FloatVector64 extends FloatVector { @Override FloatMask64 uOp(MUnOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); for (int i = 0; i < res.length; i++) { res[i] = f.apply(i, bits[i]); @@ -617,7 +617,7 @@ final class FloatVector64 extends FloatVector { @Override FloatMask64 bOp(VectorMask m, MBinOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); boolean[] mbits = ((FloatMask64)m).getBits(); for (int i = 0; i < res.length; i++) { @@ -767,16 +767,16 @@ final class FloatVector64 extends FloatVector { @ForceInline public boolean anyTrue() { return VectorSupport.test(BT_ne, FloatMask64.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> anyTrueHelper(((FloatMask64)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> anyTrueHelper(((FloatMask64)m).getBits())); } @Override @ForceInline public boolean allTrue() { return VectorSupport.test(BT_overflow, FloatMask64.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> allTrueHelper(((FloatMask64)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> allTrueHelper(((FloatMask64)m).getBits())); } @ForceInline @@ -784,7 +784,7 @@ final class FloatVector64 extends FloatVector { static FloatMask64 maskAll(boolean bit) { return VectorSupport.fromBitsCoerced(FloatMask64.class, LANEBITS_TYPE_ORDINAL, VLENGTH, (bit ? -1 : 0), MODE_BROADCAST, null, - (v, __) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); + (v, _) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); } private static final FloatMask64 TRUE_MASK = new FloatMask64(true); private static final FloatMask64 FALSE_MASK = new FloatMask64(false); @@ -792,7 +792,7 @@ final class FloatVector64 extends FloatVector { } // Shuffle - + @ValueBased static final class FloatShuffle64 extends AbstractShuffle { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -833,7 +833,7 @@ final class FloatVector64 extends FloatVector { @Override @ForceInline public FloatVector64 toVector() { - return (FloatVector64) toBitsVector().castShape(vspecies(), 0); + return (FloatVector64) toBitsVector().castShape(VSPECIES, 0); } @Override @@ -844,7 +844,7 @@ final class FloatVector64 extends FloatVector { @Override IntVector64 toBitsVector0() { - return ((IntVector64) vspecies().asIntegral().dummyVector()).vectorFactory(indices()); + return ((IntVector64) VSPECIES.asIntegral().dummyVector()).vectorFactory(indices()); } @Override @@ -869,7 +869,7 @@ final class FloatVector64 extends FloatVector { @ForceInline public final FloatMask64 laneIsValid() { return (FloatMask64) toBitsVector().compare(VectorOperators.GE, 0) - .cast(vspecies()); + .cast(VSPECIES); } @ForceInline @@ -877,7 +877,7 @@ final class FloatVector64 extends FloatVector { public final FloatShuffle64 rearrange(VectorShuffle shuffle) { FloatShuffle64 concreteShuffle = (FloatShuffle64) shuffle; return (FloatShuffle64) toBitsVector().rearrange(concreteShuffle.cast(IntVector.SPECIES_64)) - .toShuffle(vspecies(), false); + .toShuffle(VSPECIES, false); } @ForceInline @@ -890,7 +890,7 @@ final class FloatVector64 extends FloatVector { v = (IntVector64) v.blend(v.lanewise(VectorOperators.ADD, length()), v.compare(VectorOperators.LT, 0)); } - return (FloatShuffle64) v.toShuffle(vspecies(), false); + return (FloatShuffle64) v.toShuffle(VSPECIES, false); } private static int[] prepare(int[] indices, int offset) { diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/FloatVectorMax.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/FloatVectorMax.java index 4813f153544..f115a1c79b8 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/FloatVectorMax.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/FloatVectorMax.java @@ -25,22 +25,22 @@ package jdk.incubator.vector; import java.lang.foreign.MemorySegment; -import java.lang.foreign.ValueLayout; import java.nio.ByteOrder; import java.util.Arrays; import java.util.Objects; import java.util.function.IntUnaryOperator; +import jdk.internal.ValueBased; import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.vector.VectorSupport; -import static jdk.internal.vm.vector.VectorSupport.*; - import static jdk.incubator.vector.VectorOperators.*; +import static jdk.internal.vm.vector.VectorSupport.*; // -- This file was mechanically generated: Do not edit! -- // @SuppressWarnings("cast") // warning: redundant cast +@ValueBased final class FloatVectorMax extends FloatVector { static final FloatSpecies VSPECIES = (FloatSpecies) FloatVector.SPECIES_MAX; @@ -358,7 +358,7 @@ final class FloatVectorMax extends FloatVector { @Override @ForceInline public final FloatShuffleMax toShuffle() { - return (FloatShuffleMax) toShuffle(vspecies(), false); + return (FloatShuffleMax) toShuffle(VSPECIES, false); } // Specialized unary testing @@ -556,7 +556,7 @@ final class FloatVectorMax extends FloatVector { } // Mask - + @ValueBased static final class FloatMaskMax extends AbstractMask { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -604,7 +604,7 @@ final class FloatVectorMax extends FloatVector { @Override FloatMaskMax uOp(MUnOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); for (int i = 0; i < res.length; i++) { res[i] = f.apply(i, bits[i]); @@ -614,7 +614,7 @@ final class FloatVectorMax extends FloatVector { @Override FloatMaskMax bOp(VectorMask m, MBinOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); boolean[] mbits = ((FloatMaskMax)m).getBits(); for (int i = 0; i < res.length; i++) { @@ -764,16 +764,16 @@ final class FloatVectorMax extends FloatVector { @ForceInline public boolean anyTrue() { return VectorSupport.test(BT_ne, FloatMaskMax.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> anyTrueHelper(((FloatMaskMax)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> anyTrueHelper(((FloatMaskMax)m).getBits())); } @Override @ForceInline public boolean allTrue() { return VectorSupport.test(BT_overflow, FloatMaskMax.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> allTrueHelper(((FloatMaskMax)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> allTrueHelper(((FloatMaskMax)m).getBits())); } @ForceInline @@ -781,7 +781,7 @@ final class FloatVectorMax extends FloatVector { static FloatMaskMax maskAll(boolean bit) { return VectorSupport.fromBitsCoerced(FloatMaskMax.class, LANEBITS_TYPE_ORDINAL, VLENGTH, (bit ? -1 : 0), MODE_BROADCAST, null, - (v, __) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); + (v, _) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); } private static final FloatMaskMax TRUE_MASK = new FloatMaskMax(true); private static final FloatMaskMax FALSE_MASK = new FloatMaskMax(false); @@ -789,7 +789,7 @@ final class FloatVectorMax extends FloatVector { } // Shuffle - + @ValueBased static final class FloatShuffleMax extends AbstractShuffle { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -830,7 +830,7 @@ final class FloatVectorMax extends FloatVector { @Override @ForceInline public FloatVectorMax toVector() { - return (FloatVectorMax) toBitsVector().castShape(vspecies(), 0); + return (FloatVectorMax) toBitsVector().castShape(VSPECIES, 0); } @Override @@ -841,7 +841,7 @@ final class FloatVectorMax extends FloatVector { @Override IntVectorMax toBitsVector0() { - return ((IntVectorMax) vspecies().asIntegral().dummyVector()).vectorFactory(indices()); + return ((IntVectorMax) VSPECIES.asIntegral().dummyVector()).vectorFactory(indices()); } @Override @@ -866,7 +866,7 @@ final class FloatVectorMax extends FloatVector { @ForceInline public final FloatMaskMax laneIsValid() { return (FloatMaskMax) toBitsVector().compare(VectorOperators.GE, 0) - .cast(vspecies()); + .cast(VSPECIES); } @ForceInline @@ -874,7 +874,7 @@ final class FloatVectorMax extends FloatVector { public final FloatShuffleMax rearrange(VectorShuffle shuffle) { FloatShuffleMax concreteShuffle = (FloatShuffleMax) shuffle; return (FloatShuffleMax) toBitsVector().rearrange(concreteShuffle.cast(IntVector.SPECIES_MAX)) - .toShuffle(vspecies(), false); + .toShuffle(VSPECIES, false); } @ForceInline @@ -887,7 +887,7 @@ final class FloatVectorMax extends FloatVector { v = (IntVectorMax) v.blend(v.lanewise(VectorOperators.ADD, length()), v.compare(VectorOperators.LT, 0)); } - return (FloatShuffleMax) v.toShuffle(vspecies(), false); + return (FloatShuffleMax) v.toShuffle(VSPECIES, false); } private static int[] prepare(int[] indices, int offset) { diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/IntVector.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/IntVector.java index 445c4dfb006..37b7e3eeae4 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/IntVector.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/IntVector.java @@ -49,7 +49,8 @@ import static jdk.incubator.vector.VectorOperators.*; * {@code int} values. */ @SuppressWarnings("cast") // warning: redundant cast -public abstract class IntVector extends AbstractVector { +public abstract sealed class IntVector extends AbstractVector + permits IntVector64, IntVector128, IntVector256, IntVector512, IntVectorMax { IntVector(int[] vec) { super(vec); diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/IntVector128.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/IntVector128.java index cc8f31a4bc2..f64328e2a1e 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/IntVector128.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/IntVector128.java @@ -25,22 +25,22 @@ package jdk.incubator.vector; import java.lang.foreign.MemorySegment; -import java.lang.foreign.ValueLayout; import java.nio.ByteOrder; import java.util.Arrays; import java.util.Objects; import java.util.function.IntUnaryOperator; +import jdk.internal.ValueBased; import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.vector.VectorSupport; -import static jdk.internal.vm.vector.VectorSupport.*; - import static jdk.incubator.vector.VectorOperators.*; +import static jdk.internal.vm.vector.VectorSupport.*; // -- This file was mechanically generated: Do not edit! -- // @SuppressWarnings("cast") // warning: redundant cast +@ValueBased final class IntVector128 extends IntVector { static final IntSpecies VSPECIES = (IntSpecies) IntVector.SPECIES_128; @@ -371,7 +371,7 @@ final class IntVector128 extends IntVector { @Override @ForceInline public final IntShuffle128 toShuffle() { - return (IntShuffle128) toShuffle(vspecies(), false); + return (IntShuffle128) toShuffle(VSPECIES, false); } // Specialized unary testing @@ -574,7 +574,7 @@ final class IntVector128 extends IntVector { } // Mask - + @ValueBased static final class IntMask128 extends AbstractMask { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -622,7 +622,7 @@ final class IntVector128 extends IntVector { @Override IntMask128 uOp(MUnOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); for (int i = 0; i < res.length; i++) { res[i] = f.apply(i, bits[i]); @@ -632,7 +632,7 @@ final class IntVector128 extends IntVector { @Override IntMask128 bOp(VectorMask m, MBinOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); boolean[] mbits = ((IntMask128)m).getBits(); for (int i = 0; i < res.length; i++) { @@ -782,16 +782,16 @@ final class IntVector128 extends IntVector { @ForceInline public boolean anyTrue() { return VectorSupport.test(BT_ne, IntMask128.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> anyTrueHelper(((IntMask128)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> anyTrueHelper(((IntMask128)m).getBits())); } @Override @ForceInline public boolean allTrue() { return VectorSupport.test(BT_overflow, IntMask128.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> allTrueHelper(((IntMask128)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> allTrueHelper(((IntMask128)m).getBits())); } @ForceInline @@ -799,7 +799,7 @@ final class IntVector128 extends IntVector { static IntMask128 maskAll(boolean bit) { return VectorSupport.fromBitsCoerced(IntMask128.class, LANEBITS_TYPE_ORDINAL, VLENGTH, (bit ? -1 : 0), MODE_BROADCAST, null, - (v, __) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); + (v, _) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); } private static final IntMask128 TRUE_MASK = new IntMask128(true); private static final IntMask128 FALSE_MASK = new IntMask128(false); @@ -807,7 +807,7 @@ final class IntVector128 extends IntVector { } // Shuffle - + @ValueBased static final class IntShuffle128 extends AbstractShuffle { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -859,7 +859,7 @@ final class IntVector128 extends IntVector { @Override IntVector128 toBitsVector0() { - return ((IntVector128) vspecies().asIntegral().dummyVector()).vectorFactory(indices()); + return ((IntVector128) VSPECIES.asIntegral().dummyVector()).vectorFactory(indices()); } @Override @@ -884,7 +884,7 @@ final class IntVector128 extends IntVector { @ForceInline public final IntMask128 laneIsValid() { return (IntMask128) toBitsVector().compare(VectorOperators.GE, 0) - .cast(vspecies()); + .cast(VSPECIES); } @ForceInline @@ -892,7 +892,7 @@ final class IntVector128 extends IntVector { public final IntShuffle128 rearrange(VectorShuffle shuffle) { IntShuffle128 concreteShuffle = (IntShuffle128) shuffle; return (IntShuffle128) toBitsVector().rearrange(concreteShuffle) - .toShuffle(vspecies(), false); + .toShuffle(VSPECIES, false); } @ForceInline @@ -905,7 +905,7 @@ final class IntVector128 extends IntVector { v = (IntVector128) v.blend(v.lanewise(VectorOperators.ADD, length()), v.compare(VectorOperators.LT, 0)); } - return (IntShuffle128) v.toShuffle(vspecies(), false); + return (IntShuffle128) v.toShuffle(VSPECIES, false); } private static int[] prepare(int[] indices, int offset) { diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/IntVector256.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/IntVector256.java index 0630cd958f2..58a1667d2ac 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/IntVector256.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/IntVector256.java @@ -25,22 +25,22 @@ package jdk.incubator.vector; import java.lang.foreign.MemorySegment; -import java.lang.foreign.ValueLayout; import java.nio.ByteOrder; import java.util.Arrays; import java.util.Objects; import java.util.function.IntUnaryOperator; +import jdk.internal.ValueBased; import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.vector.VectorSupport; -import static jdk.internal.vm.vector.VectorSupport.*; - import static jdk.incubator.vector.VectorOperators.*; +import static jdk.internal.vm.vector.VectorSupport.*; // -- This file was mechanically generated: Do not edit! -- // @SuppressWarnings("cast") // warning: redundant cast +@ValueBased final class IntVector256 extends IntVector { static final IntSpecies VSPECIES = (IntSpecies) IntVector.SPECIES_256; @@ -371,7 +371,7 @@ final class IntVector256 extends IntVector { @Override @ForceInline public final IntShuffle256 toShuffle() { - return (IntShuffle256) toShuffle(vspecies(), false); + return (IntShuffle256) toShuffle(VSPECIES, false); } // Specialized unary testing @@ -582,7 +582,7 @@ final class IntVector256 extends IntVector { } // Mask - + @ValueBased static final class IntMask256 extends AbstractMask { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -630,7 +630,7 @@ final class IntVector256 extends IntVector { @Override IntMask256 uOp(MUnOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); for (int i = 0; i < res.length; i++) { res[i] = f.apply(i, bits[i]); @@ -640,7 +640,7 @@ final class IntVector256 extends IntVector { @Override IntMask256 bOp(VectorMask m, MBinOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); boolean[] mbits = ((IntMask256)m).getBits(); for (int i = 0; i < res.length; i++) { @@ -790,16 +790,16 @@ final class IntVector256 extends IntVector { @ForceInline public boolean anyTrue() { return VectorSupport.test(BT_ne, IntMask256.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> anyTrueHelper(((IntMask256)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> anyTrueHelper(((IntMask256)m).getBits())); } @Override @ForceInline public boolean allTrue() { return VectorSupport.test(BT_overflow, IntMask256.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> allTrueHelper(((IntMask256)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> allTrueHelper(((IntMask256)m).getBits())); } @ForceInline @@ -807,7 +807,7 @@ final class IntVector256 extends IntVector { static IntMask256 maskAll(boolean bit) { return VectorSupport.fromBitsCoerced(IntMask256.class, LANEBITS_TYPE_ORDINAL, VLENGTH, (bit ? -1 : 0), MODE_BROADCAST, null, - (v, __) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); + (v, _) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); } private static final IntMask256 TRUE_MASK = new IntMask256(true); private static final IntMask256 FALSE_MASK = new IntMask256(false); @@ -815,7 +815,7 @@ final class IntVector256 extends IntVector { } // Shuffle - + @ValueBased static final class IntShuffle256 extends AbstractShuffle { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -867,7 +867,7 @@ final class IntVector256 extends IntVector { @Override IntVector256 toBitsVector0() { - return ((IntVector256) vspecies().asIntegral().dummyVector()).vectorFactory(indices()); + return ((IntVector256) VSPECIES.asIntegral().dummyVector()).vectorFactory(indices()); } @Override @@ -892,7 +892,7 @@ final class IntVector256 extends IntVector { @ForceInline public final IntMask256 laneIsValid() { return (IntMask256) toBitsVector().compare(VectorOperators.GE, 0) - .cast(vspecies()); + .cast(VSPECIES); } @ForceInline @@ -900,7 +900,7 @@ final class IntVector256 extends IntVector { public final IntShuffle256 rearrange(VectorShuffle shuffle) { IntShuffle256 concreteShuffle = (IntShuffle256) shuffle; return (IntShuffle256) toBitsVector().rearrange(concreteShuffle) - .toShuffle(vspecies(), false); + .toShuffle(VSPECIES, false); } @ForceInline @@ -913,7 +913,7 @@ final class IntVector256 extends IntVector { v = (IntVector256) v.blend(v.lanewise(VectorOperators.ADD, length()), v.compare(VectorOperators.LT, 0)); } - return (IntShuffle256) v.toShuffle(vspecies(), false); + return (IntShuffle256) v.toShuffle(VSPECIES, false); } private static int[] prepare(int[] indices, int offset) { diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/IntVector512.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/IntVector512.java index 92eb5a0f2d2..ac48e589a05 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/IntVector512.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/IntVector512.java @@ -25,22 +25,22 @@ package jdk.incubator.vector; import java.lang.foreign.MemorySegment; -import java.lang.foreign.ValueLayout; import java.nio.ByteOrder; import java.util.Arrays; import java.util.Objects; import java.util.function.IntUnaryOperator; +import jdk.internal.ValueBased; import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.vector.VectorSupport; -import static jdk.internal.vm.vector.VectorSupport.*; - import static jdk.incubator.vector.VectorOperators.*; +import static jdk.internal.vm.vector.VectorSupport.*; // -- This file was mechanically generated: Do not edit! -- // @SuppressWarnings("cast") // warning: redundant cast +@ValueBased final class IntVector512 extends IntVector { static final IntSpecies VSPECIES = (IntSpecies) IntVector.SPECIES_512; @@ -371,7 +371,7 @@ final class IntVector512 extends IntVector { @Override @ForceInline public final IntShuffle512 toShuffle() { - return (IntShuffle512) toShuffle(vspecies(), false); + return (IntShuffle512) toShuffle(VSPECIES, false); } // Specialized unary testing @@ -598,7 +598,7 @@ final class IntVector512 extends IntVector { } // Mask - + @ValueBased static final class IntMask512 extends AbstractMask { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -646,7 +646,7 @@ final class IntVector512 extends IntVector { @Override IntMask512 uOp(MUnOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); for (int i = 0; i < res.length; i++) { res[i] = f.apply(i, bits[i]); @@ -656,7 +656,7 @@ final class IntVector512 extends IntVector { @Override IntMask512 bOp(VectorMask m, MBinOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); boolean[] mbits = ((IntMask512)m).getBits(); for (int i = 0; i < res.length; i++) { @@ -806,16 +806,16 @@ final class IntVector512 extends IntVector { @ForceInline public boolean anyTrue() { return VectorSupport.test(BT_ne, IntMask512.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> anyTrueHelper(((IntMask512)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> anyTrueHelper(((IntMask512)m).getBits())); } @Override @ForceInline public boolean allTrue() { return VectorSupport.test(BT_overflow, IntMask512.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> allTrueHelper(((IntMask512)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> allTrueHelper(((IntMask512)m).getBits())); } @ForceInline @@ -823,7 +823,7 @@ final class IntVector512 extends IntVector { static IntMask512 maskAll(boolean bit) { return VectorSupport.fromBitsCoerced(IntMask512.class, LANEBITS_TYPE_ORDINAL, VLENGTH, (bit ? -1 : 0), MODE_BROADCAST, null, - (v, __) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); + (v, _) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); } private static final IntMask512 TRUE_MASK = new IntMask512(true); private static final IntMask512 FALSE_MASK = new IntMask512(false); @@ -831,7 +831,7 @@ final class IntVector512 extends IntVector { } // Shuffle - + @ValueBased static final class IntShuffle512 extends AbstractShuffle { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -883,7 +883,7 @@ final class IntVector512 extends IntVector { @Override IntVector512 toBitsVector0() { - return ((IntVector512) vspecies().asIntegral().dummyVector()).vectorFactory(indices()); + return ((IntVector512) VSPECIES.asIntegral().dummyVector()).vectorFactory(indices()); } @Override @@ -908,7 +908,7 @@ final class IntVector512 extends IntVector { @ForceInline public final IntMask512 laneIsValid() { return (IntMask512) toBitsVector().compare(VectorOperators.GE, 0) - .cast(vspecies()); + .cast(VSPECIES); } @ForceInline @@ -916,7 +916,7 @@ final class IntVector512 extends IntVector { public final IntShuffle512 rearrange(VectorShuffle shuffle) { IntShuffle512 concreteShuffle = (IntShuffle512) shuffle; return (IntShuffle512) toBitsVector().rearrange(concreteShuffle) - .toShuffle(vspecies(), false); + .toShuffle(VSPECIES, false); } @ForceInline @@ -929,7 +929,7 @@ final class IntVector512 extends IntVector { v = (IntVector512) v.blend(v.lanewise(VectorOperators.ADD, length()), v.compare(VectorOperators.LT, 0)); } - return (IntShuffle512) v.toShuffle(vspecies(), false); + return (IntShuffle512) v.toShuffle(VSPECIES, false); } private static int[] prepare(int[] indices, int offset) { diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/IntVector64.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/IntVector64.java index c3f92285034..25329aa81aa 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/IntVector64.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/IntVector64.java @@ -25,22 +25,22 @@ package jdk.incubator.vector; import java.lang.foreign.MemorySegment; -import java.lang.foreign.ValueLayout; import java.nio.ByteOrder; import java.util.Arrays; import java.util.Objects; import java.util.function.IntUnaryOperator; +import jdk.internal.ValueBased; import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.vector.VectorSupport; -import static jdk.internal.vm.vector.VectorSupport.*; - import static jdk.incubator.vector.VectorOperators.*; +import static jdk.internal.vm.vector.VectorSupport.*; // -- This file was mechanically generated: Do not edit! -- // @SuppressWarnings("cast") // warning: redundant cast +@ValueBased final class IntVector64 extends IntVector { static final IntSpecies VSPECIES = (IntSpecies) IntVector.SPECIES_64; @@ -371,7 +371,7 @@ final class IntVector64 extends IntVector { @Override @ForceInline public final IntShuffle64 toShuffle() { - return (IntShuffle64) toShuffle(vspecies(), false); + return (IntShuffle64) toShuffle(VSPECIES, false); } // Specialized unary testing @@ -570,7 +570,7 @@ final class IntVector64 extends IntVector { } // Mask - + @ValueBased static final class IntMask64 extends AbstractMask { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -618,7 +618,7 @@ final class IntVector64 extends IntVector { @Override IntMask64 uOp(MUnOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); for (int i = 0; i < res.length; i++) { res[i] = f.apply(i, bits[i]); @@ -628,7 +628,7 @@ final class IntVector64 extends IntVector { @Override IntMask64 bOp(VectorMask m, MBinOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); boolean[] mbits = ((IntMask64)m).getBits(); for (int i = 0; i < res.length; i++) { @@ -778,16 +778,16 @@ final class IntVector64 extends IntVector { @ForceInline public boolean anyTrue() { return VectorSupport.test(BT_ne, IntMask64.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> anyTrueHelper(((IntMask64)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> anyTrueHelper(((IntMask64)m).getBits())); } @Override @ForceInline public boolean allTrue() { return VectorSupport.test(BT_overflow, IntMask64.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> allTrueHelper(((IntMask64)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> allTrueHelper(((IntMask64)m).getBits())); } @ForceInline @@ -795,7 +795,7 @@ final class IntVector64 extends IntVector { static IntMask64 maskAll(boolean bit) { return VectorSupport.fromBitsCoerced(IntMask64.class, LANEBITS_TYPE_ORDINAL, VLENGTH, (bit ? -1 : 0), MODE_BROADCAST, null, - (v, __) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); + (v, _) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); } private static final IntMask64 TRUE_MASK = new IntMask64(true); private static final IntMask64 FALSE_MASK = new IntMask64(false); @@ -803,7 +803,7 @@ final class IntVector64 extends IntVector { } // Shuffle - + @ValueBased static final class IntShuffle64 extends AbstractShuffle { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -855,7 +855,7 @@ final class IntVector64 extends IntVector { @Override IntVector64 toBitsVector0() { - return ((IntVector64) vspecies().asIntegral().dummyVector()).vectorFactory(indices()); + return ((IntVector64) VSPECIES.asIntegral().dummyVector()).vectorFactory(indices()); } @Override @@ -880,7 +880,7 @@ final class IntVector64 extends IntVector { @ForceInline public final IntMask64 laneIsValid() { return (IntMask64) toBitsVector().compare(VectorOperators.GE, 0) - .cast(vspecies()); + .cast(VSPECIES); } @ForceInline @@ -888,7 +888,7 @@ final class IntVector64 extends IntVector { public final IntShuffle64 rearrange(VectorShuffle shuffle) { IntShuffle64 concreteShuffle = (IntShuffle64) shuffle; return (IntShuffle64) toBitsVector().rearrange(concreteShuffle) - .toShuffle(vspecies(), false); + .toShuffle(VSPECIES, false); } @ForceInline @@ -901,7 +901,7 @@ final class IntVector64 extends IntVector { v = (IntVector64) v.blend(v.lanewise(VectorOperators.ADD, length()), v.compare(VectorOperators.LT, 0)); } - return (IntShuffle64) v.toShuffle(vspecies(), false); + return (IntShuffle64) v.toShuffle(VSPECIES, false); } private static int[] prepare(int[] indices, int offset) { diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/IntVectorMax.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/IntVectorMax.java index 8d3c251536c..348fda59381 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/IntVectorMax.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/IntVectorMax.java @@ -25,22 +25,22 @@ package jdk.incubator.vector; import java.lang.foreign.MemorySegment; -import java.lang.foreign.ValueLayout; import java.nio.ByteOrder; import java.util.Arrays; import java.util.Objects; import java.util.function.IntUnaryOperator; +import jdk.internal.ValueBased; import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.vector.VectorSupport; -import static jdk.internal.vm.vector.VectorSupport.*; - import static jdk.incubator.vector.VectorOperators.*; +import static jdk.internal.vm.vector.VectorSupport.*; // -- This file was mechanically generated: Do not edit! -- // @SuppressWarnings("cast") // warning: redundant cast +@ValueBased final class IntVectorMax extends IntVector { static final IntSpecies VSPECIES = (IntSpecies) IntVector.SPECIES_MAX; @@ -371,7 +371,7 @@ final class IntVectorMax extends IntVector { @Override @ForceInline public final IntShuffleMax toShuffle() { - return (IntShuffleMax) toShuffle(vspecies(), false); + return (IntShuffleMax) toShuffle(VSPECIES, false); } // Specialized unary testing @@ -568,7 +568,7 @@ final class IntVectorMax extends IntVector { } // Mask - + @ValueBased static final class IntMaskMax extends AbstractMask { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -616,7 +616,7 @@ final class IntVectorMax extends IntVector { @Override IntMaskMax uOp(MUnOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); for (int i = 0; i < res.length; i++) { res[i] = f.apply(i, bits[i]); @@ -626,7 +626,7 @@ final class IntVectorMax extends IntVector { @Override IntMaskMax bOp(VectorMask m, MBinOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); boolean[] mbits = ((IntMaskMax)m).getBits(); for (int i = 0; i < res.length; i++) { @@ -776,16 +776,16 @@ final class IntVectorMax extends IntVector { @ForceInline public boolean anyTrue() { return VectorSupport.test(BT_ne, IntMaskMax.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> anyTrueHelper(((IntMaskMax)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> anyTrueHelper(((IntMaskMax)m).getBits())); } @Override @ForceInline public boolean allTrue() { return VectorSupport.test(BT_overflow, IntMaskMax.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> allTrueHelper(((IntMaskMax)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> allTrueHelper(((IntMaskMax)m).getBits())); } @ForceInline @@ -793,7 +793,7 @@ final class IntVectorMax extends IntVector { static IntMaskMax maskAll(boolean bit) { return VectorSupport.fromBitsCoerced(IntMaskMax.class, LANEBITS_TYPE_ORDINAL, VLENGTH, (bit ? -1 : 0), MODE_BROADCAST, null, - (v, __) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); + (v, _) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); } private static final IntMaskMax TRUE_MASK = new IntMaskMax(true); private static final IntMaskMax FALSE_MASK = new IntMaskMax(false); @@ -812,7 +812,7 @@ final class IntVectorMax extends IntVector { } // Shuffle - + @ValueBased static final class IntShuffleMax extends AbstractShuffle { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -864,7 +864,7 @@ final class IntVectorMax extends IntVector { @Override IntVectorMax toBitsVector0() { - return ((IntVectorMax) vspecies().asIntegral().dummyVector()).vectorFactory(indices()); + return ((IntVectorMax) VSPECIES.asIntegral().dummyVector()).vectorFactory(indices()); } @Override @@ -889,7 +889,7 @@ final class IntVectorMax extends IntVector { @ForceInline public final IntMaskMax laneIsValid() { return (IntMaskMax) toBitsVector().compare(VectorOperators.GE, 0) - .cast(vspecies()); + .cast(VSPECIES); } @ForceInline @@ -897,7 +897,7 @@ final class IntVectorMax extends IntVector { public final IntShuffleMax rearrange(VectorShuffle shuffle) { IntShuffleMax concreteShuffle = (IntShuffleMax) shuffle; return (IntShuffleMax) toBitsVector().rearrange(concreteShuffle) - .toShuffle(vspecies(), false); + .toShuffle(VSPECIES, false); } @ForceInline @@ -910,7 +910,7 @@ final class IntVectorMax extends IntVector { v = (IntVectorMax) v.blend(v.lanewise(VectorOperators.ADD, length()), v.compare(VectorOperators.LT, 0)); } - return (IntShuffleMax) v.toShuffle(vspecies(), false); + return (IntShuffleMax) v.toShuffle(VSPECIES, false); } private static int[] prepare(int[] indices, int offset) { diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/LongVector.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/LongVector.java index 7ba0af6c139..36300cf892b 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/LongVector.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/LongVector.java @@ -49,7 +49,8 @@ import static jdk.incubator.vector.VectorOperators.*; * {@code long} values. */ @SuppressWarnings("cast") // warning: redundant cast -public abstract class LongVector extends AbstractVector { +public abstract sealed class LongVector extends AbstractVector + permits LongVector64, LongVector128, LongVector256, LongVector512, LongVectorMax { LongVector(long[] vec) { super(vec); diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/LongVector128.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/LongVector128.java index f8dad12ff89..7ce60b2efe0 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/LongVector128.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/LongVector128.java @@ -31,16 +31,17 @@ import java.util.Arrays; import java.util.Objects; import java.util.function.IntUnaryOperator; +import jdk.internal.ValueBased; import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.vector.VectorSupport; -import static jdk.internal.vm.vector.VectorSupport.*; - import static jdk.incubator.vector.VectorOperators.*; +import static jdk.internal.vm.vector.VectorSupport.*; // -- This file was mechanically generated: Do not edit! -- // @SuppressWarnings("cast") // warning: redundant cast +@ValueBased final class LongVector128 extends LongVector { static final LongSpecies VSPECIES = (LongSpecies) LongVector.SPECIES_128; @@ -366,7 +367,7 @@ final class LongVector128 extends LongVector { @Override @ForceInline public final LongShuffle128 toShuffle() { - return (LongShuffle128) toShuffle(vspecies(), false); + return (LongShuffle128) toShuffle(VSPECIES, false); } // Specialized unary testing @@ -560,7 +561,7 @@ final class LongVector128 extends LongVector { } // Mask - + @ValueBased static final class LongMask128 extends AbstractMask { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -608,7 +609,7 @@ final class LongVector128 extends LongVector { @Override LongMask128 uOp(MUnOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); for (int i = 0; i < res.length; i++) { res[i] = f.apply(i, bits[i]); @@ -618,7 +619,7 @@ final class LongVector128 extends LongVector { @Override LongMask128 bOp(VectorMask m, MBinOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); boolean[] mbits = ((LongMask128)m).getBits(); for (int i = 0; i < res.length; i++) { @@ -768,16 +769,16 @@ final class LongVector128 extends LongVector { @ForceInline public boolean anyTrue() { return VectorSupport.test(BT_ne, LongMask128.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> anyTrueHelper(((LongMask128)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> anyTrueHelper(((LongMask128)m).getBits())); } @Override @ForceInline public boolean allTrue() { return VectorSupport.test(BT_overflow, LongMask128.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> allTrueHelper(((LongMask128)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> allTrueHelper(((LongMask128)m).getBits())); } @ForceInline @@ -785,7 +786,7 @@ final class LongVector128 extends LongVector { static LongMask128 maskAll(boolean bit) { return VectorSupport.fromBitsCoerced(LongMask128.class, LANEBITS_TYPE_ORDINAL, VLENGTH, (bit ? -1 : 0), MODE_BROADCAST, null, - (v, __) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); + (v, _) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); } private static final LongMask128 TRUE_MASK = new LongMask128(true); private static final LongMask128 FALSE_MASK = new LongMask128(false); @@ -793,7 +794,7 @@ final class LongVector128 extends LongVector { } // Shuffle - + @ValueBased static final class LongShuffle128 extends AbstractShuffle { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -845,7 +846,7 @@ final class LongVector128 extends LongVector { @Override LongVector128 toBitsVector0() { - return ((LongVector128) vspecies().asIntegral().dummyVector()).vectorFactory(indices()); + return ((LongVector128) VSPECIES.asIntegral().dummyVector()).vectorFactory(indices()); } @Override @@ -919,7 +920,7 @@ final class LongVector128 extends LongVector { @ForceInline public final LongMask128 laneIsValid() { return (LongMask128) toBitsVector().compare(VectorOperators.GE, 0) - .cast(vspecies()); + .cast(VSPECIES); } @ForceInline @@ -927,7 +928,7 @@ final class LongVector128 extends LongVector { public final LongShuffle128 rearrange(VectorShuffle shuffle) { LongShuffle128 concreteShuffle = (LongShuffle128) shuffle; return (LongShuffle128) toBitsVector().rearrange(concreteShuffle) - .toShuffle(vspecies(), false); + .toShuffle(VSPECIES, false); } @ForceInline @@ -940,7 +941,7 @@ final class LongVector128 extends LongVector { v = (LongVector128) v.blend(v.lanewise(VectorOperators.ADD, length()), v.compare(VectorOperators.LT, 0)); } - return (LongShuffle128) v.toShuffle(vspecies(), false); + return (LongShuffle128) v.toShuffle(VSPECIES, false); } private static long[] prepare(int[] indices, int offset) { diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/LongVector256.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/LongVector256.java index 144e2c1c64d..110a54c547f 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/LongVector256.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/LongVector256.java @@ -31,16 +31,17 @@ import java.util.Arrays; import java.util.Objects; import java.util.function.IntUnaryOperator; +import jdk.internal.ValueBased; import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.vector.VectorSupport; -import static jdk.internal.vm.vector.VectorSupport.*; - import static jdk.incubator.vector.VectorOperators.*; +import static jdk.internal.vm.vector.VectorSupport.*; // -- This file was mechanically generated: Do not edit! -- // @SuppressWarnings("cast") // warning: redundant cast +@ValueBased final class LongVector256 extends LongVector { static final LongSpecies VSPECIES = (LongSpecies) LongVector.SPECIES_256; @@ -366,7 +367,7 @@ final class LongVector256 extends LongVector { @Override @ForceInline public final LongShuffle256 toShuffle() { - return (LongShuffle256) toShuffle(vspecies(), false); + return (LongShuffle256) toShuffle(VSPECIES, false); } // Specialized unary testing @@ -564,7 +565,7 @@ final class LongVector256 extends LongVector { } // Mask - + @ValueBased static final class LongMask256 extends AbstractMask { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -612,7 +613,7 @@ final class LongVector256 extends LongVector { @Override LongMask256 uOp(MUnOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); for (int i = 0; i < res.length; i++) { res[i] = f.apply(i, bits[i]); @@ -622,7 +623,7 @@ final class LongVector256 extends LongVector { @Override LongMask256 bOp(VectorMask m, MBinOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); boolean[] mbits = ((LongMask256)m).getBits(); for (int i = 0; i < res.length; i++) { @@ -772,16 +773,16 @@ final class LongVector256 extends LongVector { @ForceInline public boolean anyTrue() { return VectorSupport.test(BT_ne, LongMask256.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> anyTrueHelper(((LongMask256)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> anyTrueHelper(((LongMask256)m).getBits())); } @Override @ForceInline public boolean allTrue() { return VectorSupport.test(BT_overflow, LongMask256.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> allTrueHelper(((LongMask256)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> allTrueHelper(((LongMask256)m).getBits())); } @ForceInline @@ -789,7 +790,7 @@ final class LongVector256 extends LongVector { static LongMask256 maskAll(boolean bit) { return VectorSupport.fromBitsCoerced(LongMask256.class, LANEBITS_TYPE_ORDINAL, VLENGTH, (bit ? -1 : 0), MODE_BROADCAST, null, - (v, __) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); + (v, _) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); } private static final LongMask256 TRUE_MASK = new LongMask256(true); private static final LongMask256 FALSE_MASK = new LongMask256(false); @@ -797,7 +798,7 @@ final class LongVector256 extends LongVector { } // Shuffle - + @ValueBased static final class LongShuffle256 extends AbstractShuffle { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -849,7 +850,7 @@ final class LongVector256 extends LongVector { @Override LongVector256 toBitsVector0() { - return ((LongVector256) vspecies().asIntegral().dummyVector()).vectorFactory(indices()); + return ((LongVector256) VSPECIES.asIntegral().dummyVector()).vectorFactory(indices()); } @Override @@ -923,7 +924,7 @@ final class LongVector256 extends LongVector { @ForceInline public final LongMask256 laneIsValid() { return (LongMask256) toBitsVector().compare(VectorOperators.GE, 0) - .cast(vspecies()); + .cast(VSPECIES); } @ForceInline @@ -931,7 +932,7 @@ final class LongVector256 extends LongVector { public final LongShuffle256 rearrange(VectorShuffle shuffle) { LongShuffle256 concreteShuffle = (LongShuffle256) shuffle; return (LongShuffle256) toBitsVector().rearrange(concreteShuffle) - .toShuffle(vspecies(), false); + .toShuffle(VSPECIES, false); } @ForceInline @@ -944,7 +945,7 @@ final class LongVector256 extends LongVector { v = (LongVector256) v.blend(v.lanewise(VectorOperators.ADD, length()), v.compare(VectorOperators.LT, 0)); } - return (LongShuffle256) v.toShuffle(vspecies(), false); + return (LongShuffle256) v.toShuffle(VSPECIES, false); } private static long[] prepare(int[] indices, int offset) { diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/LongVector512.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/LongVector512.java index b49d0c7c147..3502f209c3b 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/LongVector512.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/LongVector512.java @@ -31,16 +31,17 @@ import java.util.Arrays; import java.util.Objects; import java.util.function.IntUnaryOperator; +import jdk.internal.ValueBased; import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.vector.VectorSupport; -import static jdk.internal.vm.vector.VectorSupport.*; - import static jdk.incubator.vector.VectorOperators.*; +import static jdk.internal.vm.vector.VectorSupport.*; // -- This file was mechanically generated: Do not edit! -- // @SuppressWarnings("cast") // warning: redundant cast +@ValueBased final class LongVector512 extends LongVector { static final LongSpecies VSPECIES = (LongSpecies) LongVector.SPECIES_512; @@ -366,7 +367,7 @@ final class LongVector512 extends LongVector { @Override @ForceInline public final LongShuffle512 toShuffle() { - return (LongShuffle512) toShuffle(vspecies(), false); + return (LongShuffle512) toShuffle(VSPECIES, false); } // Specialized unary testing @@ -572,7 +573,7 @@ final class LongVector512 extends LongVector { } // Mask - + @ValueBased static final class LongMask512 extends AbstractMask { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -620,7 +621,7 @@ final class LongVector512 extends LongVector { @Override LongMask512 uOp(MUnOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); for (int i = 0; i < res.length; i++) { res[i] = f.apply(i, bits[i]); @@ -630,7 +631,7 @@ final class LongVector512 extends LongVector { @Override LongMask512 bOp(VectorMask m, MBinOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); boolean[] mbits = ((LongMask512)m).getBits(); for (int i = 0; i < res.length; i++) { @@ -780,16 +781,16 @@ final class LongVector512 extends LongVector { @ForceInline public boolean anyTrue() { return VectorSupport.test(BT_ne, LongMask512.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> anyTrueHelper(((LongMask512)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> anyTrueHelper(((LongMask512)m).getBits())); } @Override @ForceInline public boolean allTrue() { return VectorSupport.test(BT_overflow, LongMask512.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> allTrueHelper(((LongMask512)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> allTrueHelper(((LongMask512)m).getBits())); } @ForceInline @@ -797,7 +798,7 @@ final class LongVector512 extends LongVector { static LongMask512 maskAll(boolean bit) { return VectorSupport.fromBitsCoerced(LongMask512.class, LANEBITS_TYPE_ORDINAL, VLENGTH, (bit ? -1 : 0), MODE_BROADCAST, null, - (v, __) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); + (v, _) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); } private static final LongMask512 TRUE_MASK = new LongMask512(true); private static final LongMask512 FALSE_MASK = new LongMask512(false); @@ -805,7 +806,7 @@ final class LongVector512 extends LongVector { } // Shuffle - + @ValueBased static final class LongShuffle512 extends AbstractShuffle { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -857,7 +858,7 @@ final class LongVector512 extends LongVector { @Override LongVector512 toBitsVector0() { - return ((LongVector512) vspecies().asIntegral().dummyVector()).vectorFactory(indices()); + return ((LongVector512) VSPECIES.asIntegral().dummyVector()).vectorFactory(indices()); } @Override @@ -931,7 +932,7 @@ final class LongVector512 extends LongVector { @ForceInline public final LongMask512 laneIsValid() { return (LongMask512) toBitsVector().compare(VectorOperators.GE, 0) - .cast(vspecies()); + .cast(VSPECIES); } @ForceInline @@ -939,7 +940,7 @@ final class LongVector512 extends LongVector { public final LongShuffle512 rearrange(VectorShuffle shuffle) { LongShuffle512 concreteShuffle = (LongShuffle512) shuffle; return (LongShuffle512) toBitsVector().rearrange(concreteShuffle) - .toShuffle(vspecies(), false); + .toShuffle(VSPECIES, false); } @ForceInline @@ -952,7 +953,7 @@ final class LongVector512 extends LongVector { v = (LongVector512) v.blend(v.lanewise(VectorOperators.ADD, length()), v.compare(VectorOperators.LT, 0)); } - return (LongShuffle512) v.toShuffle(vspecies(), false); + return (LongShuffle512) v.toShuffle(VSPECIES, false); } private static long[] prepare(int[] indices, int offset) { diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/LongVector64.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/LongVector64.java index 5e8451695bc..2a2fe4329a8 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/LongVector64.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/LongVector64.java @@ -31,16 +31,17 @@ import java.util.Arrays; import java.util.Objects; import java.util.function.IntUnaryOperator; +import jdk.internal.ValueBased; import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.vector.VectorSupport; -import static jdk.internal.vm.vector.VectorSupport.*; - import static jdk.incubator.vector.VectorOperators.*; +import static jdk.internal.vm.vector.VectorSupport.*; // -- This file was mechanically generated: Do not edit! -- // @SuppressWarnings("cast") // warning: redundant cast +@ValueBased final class LongVector64 extends LongVector { static final LongSpecies VSPECIES = (LongSpecies) LongVector.SPECIES_64; @@ -366,7 +367,7 @@ final class LongVector64 extends LongVector { @Override @ForceInline public final LongShuffle64 toShuffle() { - return (LongShuffle64) toShuffle(vspecies(), false); + return (LongShuffle64) toShuffle(VSPECIES, false); } // Specialized unary testing @@ -558,7 +559,7 @@ final class LongVector64 extends LongVector { } // Mask - + @ValueBased static final class LongMask64 extends AbstractMask { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -606,7 +607,7 @@ final class LongVector64 extends LongVector { @Override LongMask64 uOp(MUnOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); for (int i = 0; i < res.length; i++) { res[i] = f.apply(i, bits[i]); @@ -616,7 +617,7 @@ final class LongVector64 extends LongVector { @Override LongMask64 bOp(VectorMask m, MBinOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); boolean[] mbits = ((LongMask64)m).getBits(); for (int i = 0; i < res.length; i++) { @@ -766,16 +767,16 @@ final class LongVector64 extends LongVector { @ForceInline public boolean anyTrue() { return VectorSupport.test(BT_ne, LongMask64.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> anyTrueHelper(((LongMask64)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> anyTrueHelper(((LongMask64)m).getBits())); } @Override @ForceInline public boolean allTrue() { return VectorSupport.test(BT_overflow, LongMask64.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> allTrueHelper(((LongMask64)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> allTrueHelper(((LongMask64)m).getBits())); } @ForceInline @@ -783,7 +784,7 @@ final class LongVector64 extends LongVector { static LongMask64 maskAll(boolean bit) { return VectorSupport.fromBitsCoerced(LongMask64.class, LANEBITS_TYPE_ORDINAL, VLENGTH, (bit ? -1 : 0), MODE_BROADCAST, null, - (v, __) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); + (v, _) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); } private static final LongMask64 TRUE_MASK = new LongMask64(true); private static final LongMask64 FALSE_MASK = new LongMask64(false); @@ -791,7 +792,7 @@ final class LongVector64 extends LongVector { } // Shuffle - + @ValueBased static final class LongShuffle64 extends AbstractShuffle { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -843,7 +844,7 @@ final class LongVector64 extends LongVector { @Override LongVector64 toBitsVector0() { - return ((LongVector64) vspecies().asIntegral().dummyVector()).vectorFactory(indices()); + return ((LongVector64) VSPECIES.asIntegral().dummyVector()).vectorFactory(indices()); } @Override @@ -917,7 +918,7 @@ final class LongVector64 extends LongVector { @ForceInline public final LongMask64 laneIsValid() { return (LongMask64) toBitsVector().compare(VectorOperators.GE, 0) - .cast(vspecies()); + .cast(VSPECIES); } @ForceInline @@ -925,7 +926,7 @@ final class LongVector64 extends LongVector { public final LongShuffle64 rearrange(VectorShuffle shuffle) { LongShuffle64 concreteShuffle = (LongShuffle64) shuffle; return (LongShuffle64) toBitsVector().rearrange(concreteShuffle) - .toShuffle(vspecies(), false); + .toShuffle(VSPECIES, false); } @ForceInline @@ -938,7 +939,7 @@ final class LongVector64 extends LongVector { v = (LongVector64) v.blend(v.lanewise(VectorOperators.ADD, length()), v.compare(VectorOperators.LT, 0)); } - return (LongShuffle64) v.toShuffle(vspecies(), false); + return (LongShuffle64) v.toShuffle(VSPECIES, false); } private static long[] prepare(int[] indices, int offset) { diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/LongVectorMax.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/LongVectorMax.java index 3469da8f2f4..157c58e20e8 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/LongVectorMax.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/LongVectorMax.java @@ -31,16 +31,17 @@ import java.util.Arrays; import java.util.Objects; import java.util.function.IntUnaryOperator; +import jdk.internal.ValueBased; import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.vector.VectorSupport; -import static jdk.internal.vm.vector.VectorSupport.*; - import static jdk.incubator.vector.VectorOperators.*; +import static jdk.internal.vm.vector.VectorSupport.*; // -- This file was mechanically generated: Do not edit! -- // @SuppressWarnings("cast") // warning: redundant cast +@ValueBased final class LongVectorMax extends LongVector { static final LongSpecies VSPECIES = (LongSpecies) LongVector.SPECIES_MAX; @@ -366,7 +367,7 @@ final class LongVectorMax extends LongVector { @Override @ForceInline public final LongShuffleMax toShuffle() { - return (LongShuffleMax) toShuffle(vspecies(), false); + return (LongShuffleMax) toShuffle(VSPECIES, false); } // Specialized unary testing @@ -558,7 +559,7 @@ final class LongVectorMax extends LongVector { } // Mask - + @ValueBased static final class LongMaskMax extends AbstractMask { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -606,7 +607,7 @@ final class LongVectorMax extends LongVector { @Override LongMaskMax uOp(MUnOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); for (int i = 0; i < res.length; i++) { res[i] = f.apply(i, bits[i]); @@ -616,7 +617,7 @@ final class LongVectorMax extends LongVector { @Override LongMaskMax bOp(VectorMask m, MBinOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); boolean[] mbits = ((LongMaskMax)m).getBits(); for (int i = 0; i < res.length; i++) { @@ -766,16 +767,16 @@ final class LongVectorMax extends LongVector { @ForceInline public boolean anyTrue() { return VectorSupport.test(BT_ne, LongMaskMax.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> anyTrueHelper(((LongMaskMax)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> anyTrueHelper(((LongMaskMax)m).getBits())); } @Override @ForceInline public boolean allTrue() { return VectorSupport.test(BT_overflow, LongMaskMax.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> allTrueHelper(((LongMaskMax)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> allTrueHelper(((LongMaskMax)m).getBits())); } @ForceInline @@ -783,7 +784,7 @@ final class LongVectorMax extends LongVector { static LongMaskMax maskAll(boolean bit) { return VectorSupport.fromBitsCoerced(LongMaskMax.class, LANEBITS_TYPE_ORDINAL, VLENGTH, (bit ? -1 : 0), MODE_BROADCAST, null, - (v, __) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); + (v, _) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); } private static final LongMaskMax TRUE_MASK = new LongMaskMax(true); private static final LongMaskMax FALSE_MASK = new LongMaskMax(false); @@ -791,7 +792,7 @@ final class LongVectorMax extends LongVector { } // Shuffle - + @ValueBased static final class LongShuffleMax extends AbstractShuffle { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -843,7 +844,7 @@ final class LongVectorMax extends LongVector { @Override LongVectorMax toBitsVector0() { - return ((LongVectorMax) vspecies().asIntegral().dummyVector()).vectorFactory(indices()); + return ((LongVectorMax) VSPECIES.asIntegral().dummyVector()).vectorFactory(indices()); } @Override @@ -917,7 +918,7 @@ final class LongVectorMax extends LongVector { @ForceInline public final LongMaskMax laneIsValid() { return (LongMaskMax) toBitsVector().compare(VectorOperators.GE, 0) - .cast(vspecies()); + .cast(VSPECIES); } @ForceInline @@ -925,7 +926,7 @@ final class LongVectorMax extends LongVector { public final LongShuffleMax rearrange(VectorShuffle shuffle) { LongShuffleMax concreteShuffle = (LongShuffleMax) shuffle; return (LongShuffleMax) toBitsVector().rearrange(concreteShuffle) - .toShuffle(vspecies(), false); + .toShuffle(VSPECIES, false); } @ForceInline @@ -938,7 +939,7 @@ final class LongVectorMax extends LongVector { v = (LongVectorMax) v.blend(v.lanewise(VectorOperators.ADD, length()), v.compare(VectorOperators.LT, 0)); } - return (LongShuffleMax) v.toShuffle(vspecies(), false); + return (LongShuffleMax) v.toShuffle(VSPECIES, false); } private static long[] prepare(int[] indices, int offset) { diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ShortVector.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ShortVector.java index 7ba465706e8..21bc80a12bc 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ShortVector.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ShortVector.java @@ -49,7 +49,8 @@ import static jdk.incubator.vector.VectorOperators.*; * {@code short} values. */ @SuppressWarnings("cast") // warning: redundant cast -public abstract class ShortVector extends AbstractVector { +public abstract sealed class ShortVector extends AbstractVector + permits ShortVector64, ShortVector128, ShortVector256, ShortVector512, ShortVectorMax { ShortVector(short[] vec) { super(vec); diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ShortVector128.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ShortVector128.java index e989cdbdbea..22bbfce0928 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ShortVector128.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ShortVector128.java @@ -25,22 +25,22 @@ package jdk.incubator.vector; import java.lang.foreign.MemorySegment; -import java.lang.foreign.ValueLayout; import java.nio.ByteOrder; import java.util.Arrays; import java.util.Objects; import java.util.function.IntUnaryOperator; +import jdk.internal.ValueBased; import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.vector.VectorSupport; -import static jdk.internal.vm.vector.VectorSupport.*; - import static jdk.incubator.vector.VectorOperators.*; +import static jdk.internal.vm.vector.VectorSupport.*; // -- This file was mechanically generated: Do not edit! -- // @SuppressWarnings("cast") // warning: redundant cast +@ValueBased final class ShortVector128 extends ShortVector { static final ShortSpecies VSPECIES = (ShortSpecies) ShortVector.SPECIES_128; @@ -371,7 +371,7 @@ final class ShortVector128 extends ShortVector { @Override @ForceInline public final ShortShuffle128 toShuffle() { - return (ShortShuffle128) toShuffle(vspecies(), false); + return (ShortShuffle128) toShuffle(VSPECIES, false); } // Specialized unary testing @@ -582,7 +582,7 @@ final class ShortVector128 extends ShortVector { } // Mask - + @ValueBased static final class ShortMask128 extends AbstractMask { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -630,7 +630,7 @@ final class ShortVector128 extends ShortVector { @Override ShortMask128 uOp(MUnOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); for (int i = 0; i < res.length; i++) { res[i] = f.apply(i, bits[i]); @@ -640,7 +640,7 @@ final class ShortVector128 extends ShortVector { @Override ShortMask128 bOp(VectorMask m, MBinOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); boolean[] mbits = ((ShortMask128)m).getBits(); for (int i = 0; i < res.length; i++) { @@ -790,16 +790,16 @@ final class ShortVector128 extends ShortVector { @ForceInline public boolean anyTrue() { return VectorSupport.test(BT_ne, ShortMask128.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> anyTrueHelper(((ShortMask128)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> anyTrueHelper(((ShortMask128)m).getBits())); } @Override @ForceInline public boolean allTrue() { return VectorSupport.test(BT_overflow, ShortMask128.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> allTrueHelper(((ShortMask128)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> allTrueHelper(((ShortMask128)m).getBits())); } @ForceInline @@ -807,7 +807,7 @@ final class ShortVector128 extends ShortVector { static ShortMask128 maskAll(boolean bit) { return VectorSupport.fromBitsCoerced(ShortMask128.class, LANEBITS_TYPE_ORDINAL, VLENGTH, (bit ? -1 : 0), MODE_BROADCAST, null, - (v, __) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); + (v, _) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); } private static final ShortMask128 TRUE_MASK = new ShortMask128(true); private static final ShortMask128 FALSE_MASK = new ShortMask128(false); @@ -815,7 +815,7 @@ final class ShortVector128 extends ShortVector { } // Shuffle - + @ValueBased static final class ShortShuffle128 extends AbstractShuffle { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -867,7 +867,7 @@ final class ShortVector128 extends ShortVector { @Override ShortVector128 toBitsVector0() { - return ((ShortVector128) vspecies().asIntegral().dummyVector()).vectorFactory(indices()); + return ((ShortVector128) VSPECIES.asIntegral().dummyVector()).vectorFactory(indices()); } @Override @@ -906,7 +906,7 @@ final class ShortVector128 extends ShortVector { @ForceInline public final ShortMask128 laneIsValid() { return (ShortMask128) toBitsVector().compare(VectorOperators.GE, 0) - .cast(vspecies()); + .cast(VSPECIES); } @ForceInline @@ -914,7 +914,7 @@ final class ShortVector128 extends ShortVector { public final ShortShuffle128 rearrange(VectorShuffle shuffle) { ShortShuffle128 concreteShuffle = (ShortShuffle128) shuffle; return (ShortShuffle128) toBitsVector().rearrange(concreteShuffle) - .toShuffle(vspecies(), false); + .toShuffle(VSPECIES, false); } @ForceInline @@ -927,7 +927,7 @@ final class ShortVector128 extends ShortVector { v = (ShortVector128) v.blend(v.lanewise(VectorOperators.ADD, length()), v.compare(VectorOperators.LT, 0)); } - return (ShortShuffle128) v.toShuffle(vspecies(), false); + return (ShortShuffle128) v.toShuffle(VSPECIES, false); } private static short[] prepare(int[] indices, int offset) { diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ShortVector256.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ShortVector256.java index c74188e22f5..6011695bf54 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ShortVector256.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ShortVector256.java @@ -25,22 +25,22 @@ package jdk.incubator.vector; import java.lang.foreign.MemorySegment; -import java.lang.foreign.ValueLayout; import java.nio.ByteOrder; import java.util.Arrays; import java.util.Objects; import java.util.function.IntUnaryOperator; +import jdk.internal.ValueBased; import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.vector.VectorSupport; -import static jdk.internal.vm.vector.VectorSupport.*; - import static jdk.incubator.vector.VectorOperators.*; +import static jdk.internal.vm.vector.VectorSupport.*; // -- This file was mechanically generated: Do not edit! -- // @SuppressWarnings("cast") // warning: redundant cast +@ValueBased final class ShortVector256 extends ShortVector { static final ShortSpecies VSPECIES = (ShortSpecies) ShortVector.SPECIES_256; @@ -371,7 +371,7 @@ final class ShortVector256 extends ShortVector { @Override @ForceInline public final ShortShuffle256 toShuffle() { - return (ShortShuffle256) toShuffle(vspecies(), false); + return (ShortShuffle256) toShuffle(VSPECIES, false); } // Specialized unary testing @@ -598,7 +598,7 @@ final class ShortVector256 extends ShortVector { } // Mask - + @ValueBased static final class ShortMask256 extends AbstractMask { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -646,7 +646,7 @@ final class ShortVector256 extends ShortVector { @Override ShortMask256 uOp(MUnOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); for (int i = 0; i < res.length; i++) { res[i] = f.apply(i, bits[i]); @@ -656,7 +656,7 @@ final class ShortVector256 extends ShortVector { @Override ShortMask256 bOp(VectorMask m, MBinOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); boolean[] mbits = ((ShortMask256)m).getBits(); for (int i = 0; i < res.length; i++) { @@ -806,16 +806,16 @@ final class ShortVector256 extends ShortVector { @ForceInline public boolean anyTrue() { return VectorSupport.test(BT_ne, ShortMask256.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> anyTrueHelper(((ShortMask256)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> anyTrueHelper(((ShortMask256)m).getBits())); } @Override @ForceInline public boolean allTrue() { return VectorSupport.test(BT_overflow, ShortMask256.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> allTrueHelper(((ShortMask256)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> allTrueHelper(((ShortMask256)m).getBits())); } @ForceInline @@ -823,7 +823,7 @@ final class ShortVector256 extends ShortVector { static ShortMask256 maskAll(boolean bit) { return VectorSupport.fromBitsCoerced(ShortMask256.class, LANEBITS_TYPE_ORDINAL, VLENGTH, (bit ? -1 : 0), MODE_BROADCAST, null, - (v, __) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); + (v, _) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); } private static final ShortMask256 TRUE_MASK = new ShortMask256(true); private static final ShortMask256 FALSE_MASK = new ShortMask256(false); @@ -831,7 +831,7 @@ final class ShortVector256 extends ShortVector { } // Shuffle - + @ValueBased static final class ShortShuffle256 extends AbstractShuffle { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -883,7 +883,7 @@ final class ShortVector256 extends ShortVector { @Override ShortVector256 toBitsVector0() { - return ((ShortVector256) vspecies().asIntegral().dummyVector()).vectorFactory(indices()); + return ((ShortVector256) VSPECIES.asIntegral().dummyVector()).vectorFactory(indices()); } @Override @@ -922,7 +922,7 @@ final class ShortVector256 extends ShortVector { @ForceInline public final ShortMask256 laneIsValid() { return (ShortMask256) toBitsVector().compare(VectorOperators.GE, 0) - .cast(vspecies()); + .cast(VSPECIES); } @ForceInline @@ -930,7 +930,7 @@ final class ShortVector256 extends ShortVector { public final ShortShuffle256 rearrange(VectorShuffle shuffle) { ShortShuffle256 concreteShuffle = (ShortShuffle256) shuffle; return (ShortShuffle256) toBitsVector().rearrange(concreteShuffle) - .toShuffle(vspecies(), false); + .toShuffle(VSPECIES, false); } @ForceInline @@ -943,7 +943,7 @@ final class ShortVector256 extends ShortVector { v = (ShortVector256) v.blend(v.lanewise(VectorOperators.ADD, length()), v.compare(VectorOperators.LT, 0)); } - return (ShortShuffle256) v.toShuffle(vspecies(), false); + return (ShortShuffle256) v.toShuffle(VSPECIES, false); } private static short[] prepare(int[] indices, int offset) { diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ShortVector512.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ShortVector512.java index 46b5d652200..e6101d2e6be 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ShortVector512.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ShortVector512.java @@ -25,22 +25,22 @@ package jdk.incubator.vector; import java.lang.foreign.MemorySegment; -import java.lang.foreign.ValueLayout; import java.nio.ByteOrder; import java.util.Arrays; import java.util.Objects; import java.util.function.IntUnaryOperator; +import jdk.internal.ValueBased; import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.vector.VectorSupport; -import static jdk.internal.vm.vector.VectorSupport.*; - import static jdk.incubator.vector.VectorOperators.*; +import static jdk.internal.vm.vector.VectorSupport.*; // -- This file was mechanically generated: Do not edit! -- // @SuppressWarnings("cast") // warning: redundant cast +@ValueBased final class ShortVector512 extends ShortVector { static final ShortSpecies VSPECIES = (ShortSpecies) ShortVector.SPECIES_512; @@ -371,7 +371,7 @@ final class ShortVector512 extends ShortVector { @Override @ForceInline public final ShortShuffle512 toShuffle() { - return (ShortShuffle512) toShuffle(vspecies(), false); + return (ShortShuffle512) toShuffle(VSPECIES, false); } // Specialized unary testing @@ -630,7 +630,7 @@ final class ShortVector512 extends ShortVector { } // Mask - + @ValueBased static final class ShortMask512 extends AbstractMask { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -678,7 +678,7 @@ final class ShortVector512 extends ShortVector { @Override ShortMask512 uOp(MUnOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); for (int i = 0; i < res.length; i++) { res[i] = f.apply(i, bits[i]); @@ -688,7 +688,7 @@ final class ShortVector512 extends ShortVector { @Override ShortMask512 bOp(VectorMask m, MBinOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); boolean[] mbits = ((ShortMask512)m).getBits(); for (int i = 0; i < res.length; i++) { @@ -838,16 +838,16 @@ final class ShortVector512 extends ShortVector { @ForceInline public boolean anyTrue() { return VectorSupport.test(BT_ne, ShortMask512.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> anyTrueHelper(((ShortMask512)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> anyTrueHelper(((ShortMask512)m).getBits())); } @Override @ForceInline public boolean allTrue() { return VectorSupport.test(BT_overflow, ShortMask512.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> allTrueHelper(((ShortMask512)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> allTrueHelper(((ShortMask512)m).getBits())); } @ForceInline @@ -855,7 +855,7 @@ final class ShortVector512 extends ShortVector { static ShortMask512 maskAll(boolean bit) { return VectorSupport.fromBitsCoerced(ShortMask512.class, LANEBITS_TYPE_ORDINAL, VLENGTH, (bit ? -1 : 0), MODE_BROADCAST, null, - (v, __) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); + (v, _) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); } private static final ShortMask512 TRUE_MASK = new ShortMask512(true); private static final ShortMask512 FALSE_MASK = new ShortMask512(false); @@ -863,7 +863,7 @@ final class ShortVector512 extends ShortVector { } // Shuffle - + @ValueBased static final class ShortShuffle512 extends AbstractShuffle { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -915,7 +915,7 @@ final class ShortVector512 extends ShortVector { @Override ShortVector512 toBitsVector0() { - return ((ShortVector512) vspecies().asIntegral().dummyVector()).vectorFactory(indices()); + return ((ShortVector512) VSPECIES.asIntegral().dummyVector()).vectorFactory(indices()); } @Override @@ -954,7 +954,7 @@ final class ShortVector512 extends ShortVector { @ForceInline public final ShortMask512 laneIsValid() { return (ShortMask512) toBitsVector().compare(VectorOperators.GE, 0) - .cast(vspecies()); + .cast(VSPECIES); } @ForceInline @@ -962,7 +962,7 @@ final class ShortVector512 extends ShortVector { public final ShortShuffle512 rearrange(VectorShuffle shuffle) { ShortShuffle512 concreteShuffle = (ShortShuffle512) shuffle; return (ShortShuffle512) toBitsVector().rearrange(concreteShuffle) - .toShuffle(vspecies(), false); + .toShuffle(VSPECIES, false); } @ForceInline @@ -975,7 +975,7 @@ final class ShortVector512 extends ShortVector { v = (ShortVector512) v.blend(v.lanewise(VectorOperators.ADD, length()), v.compare(VectorOperators.LT, 0)); } - return (ShortShuffle512) v.toShuffle(vspecies(), false); + return (ShortShuffle512) v.toShuffle(VSPECIES, false); } private static short[] prepare(int[] indices, int offset) { diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ShortVector64.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ShortVector64.java index 66ff3efe522..31af959b4a8 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ShortVector64.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ShortVector64.java @@ -25,22 +25,22 @@ package jdk.incubator.vector; import java.lang.foreign.MemorySegment; -import java.lang.foreign.ValueLayout; import java.nio.ByteOrder; import java.util.Arrays; import java.util.Objects; import java.util.function.IntUnaryOperator; +import jdk.internal.ValueBased; import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.vector.VectorSupport; -import static jdk.internal.vm.vector.VectorSupport.*; - import static jdk.incubator.vector.VectorOperators.*; +import static jdk.internal.vm.vector.VectorSupport.*; // -- This file was mechanically generated: Do not edit! -- // @SuppressWarnings("cast") // warning: redundant cast +@ValueBased final class ShortVector64 extends ShortVector { static final ShortSpecies VSPECIES = (ShortSpecies) ShortVector.SPECIES_64; @@ -371,7 +371,7 @@ final class ShortVector64 extends ShortVector { @Override @ForceInline public final ShortShuffle64 toShuffle() { - return (ShortShuffle64) toShuffle(vspecies(), false); + return (ShortShuffle64) toShuffle(VSPECIES, false); } // Specialized unary testing @@ -574,7 +574,7 @@ final class ShortVector64 extends ShortVector { } // Mask - + @ValueBased static final class ShortMask64 extends AbstractMask { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -622,7 +622,7 @@ final class ShortVector64 extends ShortVector { @Override ShortMask64 uOp(MUnOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); for (int i = 0; i < res.length; i++) { res[i] = f.apply(i, bits[i]); @@ -632,7 +632,7 @@ final class ShortVector64 extends ShortVector { @Override ShortMask64 bOp(VectorMask m, MBinOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); boolean[] mbits = ((ShortMask64)m).getBits(); for (int i = 0; i < res.length; i++) { @@ -782,16 +782,16 @@ final class ShortVector64 extends ShortVector { @ForceInline public boolean anyTrue() { return VectorSupport.test(BT_ne, ShortMask64.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> anyTrueHelper(((ShortMask64)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> anyTrueHelper(((ShortMask64)m).getBits())); } @Override @ForceInline public boolean allTrue() { return VectorSupport.test(BT_overflow, ShortMask64.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> allTrueHelper(((ShortMask64)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> allTrueHelper(((ShortMask64)m).getBits())); } @ForceInline @@ -799,7 +799,7 @@ final class ShortVector64 extends ShortVector { static ShortMask64 maskAll(boolean bit) { return VectorSupport.fromBitsCoerced(ShortMask64.class, LANEBITS_TYPE_ORDINAL, VLENGTH, (bit ? -1 : 0), MODE_BROADCAST, null, - (v, __) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); + (v, _) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); } private static final ShortMask64 TRUE_MASK = new ShortMask64(true); private static final ShortMask64 FALSE_MASK = new ShortMask64(false); @@ -807,7 +807,7 @@ final class ShortVector64 extends ShortVector { } // Shuffle - + @ValueBased static final class ShortShuffle64 extends AbstractShuffle { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -859,7 +859,7 @@ final class ShortVector64 extends ShortVector { @Override ShortVector64 toBitsVector0() { - return ((ShortVector64) vspecies().asIntegral().dummyVector()).vectorFactory(indices()); + return ((ShortVector64) VSPECIES.asIntegral().dummyVector()).vectorFactory(indices()); } @Override @@ -898,7 +898,7 @@ final class ShortVector64 extends ShortVector { @ForceInline public final ShortMask64 laneIsValid() { return (ShortMask64) toBitsVector().compare(VectorOperators.GE, 0) - .cast(vspecies()); + .cast(VSPECIES); } @ForceInline @@ -906,7 +906,7 @@ final class ShortVector64 extends ShortVector { public final ShortShuffle64 rearrange(VectorShuffle shuffle) { ShortShuffle64 concreteShuffle = (ShortShuffle64) shuffle; return (ShortShuffle64) toBitsVector().rearrange(concreteShuffle) - .toShuffle(vspecies(), false); + .toShuffle(VSPECIES, false); } @ForceInline @@ -919,7 +919,7 @@ final class ShortVector64 extends ShortVector { v = (ShortVector64) v.blend(v.lanewise(VectorOperators.ADD, length()), v.compare(VectorOperators.LT, 0)); } - return (ShortShuffle64) v.toShuffle(vspecies(), false); + return (ShortShuffle64) v.toShuffle(VSPECIES, false); } private static short[] prepare(int[] indices, int offset) { diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ShortVectorMax.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ShortVectorMax.java index b9a9b85126b..fe0359c4711 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ShortVectorMax.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ShortVectorMax.java @@ -25,22 +25,22 @@ package jdk.incubator.vector; import java.lang.foreign.MemorySegment; -import java.lang.foreign.ValueLayout; import java.nio.ByteOrder; import java.util.Arrays; import java.util.Objects; import java.util.function.IntUnaryOperator; +import jdk.internal.ValueBased; import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.vector.VectorSupport; -import static jdk.internal.vm.vector.VectorSupport.*; - import static jdk.incubator.vector.VectorOperators.*; +import static jdk.internal.vm.vector.VectorSupport.*; // -- This file was mechanically generated: Do not edit! -- // @SuppressWarnings("cast") // warning: redundant cast +@ValueBased final class ShortVectorMax extends ShortVector { static final ShortSpecies VSPECIES = (ShortSpecies) ShortVector.SPECIES_MAX; @@ -371,7 +371,7 @@ final class ShortVectorMax extends ShortVector { @Override @ForceInline public final ShortShuffleMax toShuffle() { - return (ShortShuffleMax) toShuffle(vspecies(), false); + return (ShortShuffleMax) toShuffle(VSPECIES, false); } // Specialized unary testing @@ -568,7 +568,7 @@ final class ShortVectorMax extends ShortVector { } // Mask - + @ValueBased static final class ShortMaskMax extends AbstractMask { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -616,7 +616,7 @@ final class ShortVectorMax extends ShortVector { @Override ShortMaskMax uOp(MUnOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); for (int i = 0; i < res.length; i++) { res[i] = f.apply(i, bits[i]); @@ -626,7 +626,7 @@ final class ShortVectorMax extends ShortVector { @Override ShortMaskMax bOp(VectorMask m, MBinOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); boolean[] mbits = ((ShortMaskMax)m).getBits(); for (int i = 0; i < res.length; i++) { @@ -776,16 +776,16 @@ final class ShortVectorMax extends ShortVector { @ForceInline public boolean anyTrue() { return VectorSupport.test(BT_ne, ShortMaskMax.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> anyTrueHelper(((ShortMaskMax)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> anyTrueHelper(((ShortMaskMax)m).getBits())); } @Override @ForceInline public boolean allTrue() { return VectorSupport.test(BT_overflow, ShortMaskMax.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> allTrueHelper(((ShortMaskMax)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> allTrueHelper(((ShortMaskMax)m).getBits())); } @ForceInline @@ -793,7 +793,7 @@ final class ShortVectorMax extends ShortVector { static ShortMaskMax maskAll(boolean bit) { return VectorSupport.fromBitsCoerced(ShortMaskMax.class, LANEBITS_TYPE_ORDINAL, VLENGTH, (bit ? -1 : 0), MODE_BROADCAST, null, - (v, __) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); + (v, _) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); } private static final ShortMaskMax TRUE_MASK = new ShortMaskMax(true); private static final ShortMaskMax FALSE_MASK = new ShortMaskMax(false); @@ -801,7 +801,7 @@ final class ShortVectorMax extends ShortVector { } // Shuffle - + @ValueBased static final class ShortShuffleMax extends AbstractShuffle { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -853,7 +853,7 @@ final class ShortVectorMax extends ShortVector { @Override ShortVectorMax toBitsVector0() { - return ((ShortVectorMax) vspecies().asIntegral().dummyVector()).vectorFactory(indices()); + return ((ShortVectorMax) VSPECIES.asIntegral().dummyVector()).vectorFactory(indices()); } @Override @@ -892,7 +892,7 @@ final class ShortVectorMax extends ShortVector { @ForceInline public final ShortMaskMax laneIsValid() { return (ShortMaskMax) toBitsVector().compare(VectorOperators.GE, 0) - .cast(vspecies()); + .cast(VSPECIES); } @ForceInline @@ -900,7 +900,7 @@ final class ShortVectorMax extends ShortVector { public final ShortShuffleMax rearrange(VectorShuffle shuffle) { ShortShuffleMax concreteShuffle = (ShortShuffleMax) shuffle; return (ShortShuffleMax) toBitsVector().rearrange(concreteShuffle) - .toShuffle(vspecies(), false); + .toShuffle(VSPECIES, false); } @ForceInline @@ -913,7 +913,7 @@ final class ShortVectorMax extends ShortVector { v = (ShortVectorMax) v.blend(v.lanewise(VectorOperators.ADD, length()), v.compare(VectorOperators.LT, 0)); } - return (ShortShuffleMax) v.toShuffle(vspecies(), false); + return (ShortShuffleMax) v.toShuffle(VSPECIES, false); } private static short[] prepare(int[] indices, int offset) { diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/Util.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/Util.java index 8562d4b5d7a..133195fa54d 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/Util.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/Util.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,7 +24,7 @@ */ package jdk.incubator.vector; -/*package-private*/ class Util { +/*package-private*/ final class Util { public static void requires(boolean cond, String message) { if (!cond) { throw new InternalError(message); diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/Vector.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/Vector.java index 68b4a35067c..85b3bbca269 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/Vector.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/Vector.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1170,9 +1170,10 @@ import java.util.Arrays; * @param the boxed version of {@code ETYPE}, * the element type of a vector * + * @sealedGraph */ @SuppressWarnings("exports") -public abstract class Vector extends jdk.internal.vm.vector.VectorSupport.Vector { +public abstract sealed class Vector extends jdk.internal.vm.vector.VectorSupport.Vector permits AbstractVector { // This type is sealed within its package. // Users cannot roll their own vector types. diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/VectorIntrinsics.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/VectorIntrinsics.java index 266a843083a..f0115371d48 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/VectorIntrinsics.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/VectorIntrinsics.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,7 +28,7 @@ import jdk.internal.vm.annotation.ForceInline; import java.util.Objects; -/*non-public*/ class VectorIntrinsics { +/*non-public*/ final class VectorIntrinsics { static final int VECTOR_ACCESS_OOB_CHECK = Integer.getInteger("jdk.incubator.vector.VECTOR_ACCESS_OOB_CHECK", 2); diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/VectorMask.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/VectorMask.java index 13ee9e27e0d..607b194946b 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/VectorMask.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/VectorMask.java @@ -131,7 +131,7 @@ import java.util.Objects; * the element type of a vector */ @SuppressWarnings("exports") -public abstract class VectorMask extends jdk.internal.vm.vector.VectorSupport.VectorMask { +public abstract sealed class VectorMask extends jdk.internal.vm.vector.VectorSupport.VectorMask permits AbstractMask { VectorMask(boolean[] bits) { super(bits); } /** diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/VectorMathLibrary.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/VectorMathLibrary.java index 1c1cfcc78c7..59697733d86 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/VectorMathLibrary.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/VectorMathLibrary.java @@ -31,6 +31,7 @@ import jdk.internal.vm.vector.VectorSupport; import java.lang.foreign.MemorySegment; import java.lang.foreign.SymbolLookup; +import java.util.Locale; import java.util.function.IntFunction; import static jdk.incubator.vector.Util.requires; @@ -42,7 +43,7 @@ import static jdk.internal.vm.vector.Utils.debug; * A wrapper for native vector math libraries bundled with the JDK (SVML and SLEEF). * Binds vector operations to native implementations provided by the libraries. */ -/*package-private*/ class VectorMathLibrary { +/*package-private*/ final class VectorMathLibrary { private static final SymbolLookup LOOKUP = SymbolLookup.loaderLookup(); interface Library { @@ -141,7 +142,7 @@ import static jdk.internal.vm.vector.Utils.debug; String elemType = (vspecies.elementType() == float.class ? "f" : ""); boolean isFloatVector64 = (vspecies.elementType() == float.class) && (vspecies.length() == 2); // FloatVector64 or FloatVectorMax int vlen = (isFloatVector64 ? 4 : vspecies.length()); // reuse 128-bit variant for 64-bit float vectors - return String.format("__jsvml_%s%s%d_ha_%s", op.operatorName(), elemType, vlen, suffix); + return String.format(Locale.ROOT, "__jsvml_%s%s%d_ha_%s", op.operatorName(), elemType, vlen, suffix); } @Override @@ -214,7 +215,7 @@ import static jdk.internal.vm.vector.Utils.debug; boolean isFloatVector64 = (vspecies.elementType() == float.class) && (vspecies.length() == 2); // FloatVector64 or FloatVectorMax int vlen = (isFloatVector64 ? 4 : vspecies.length()); // reuse 128-bit variant for 64-bit float vectors boolean isShapeAgnostic = isRISCV64() || (isAARCH64() && vspecies.vectorBitSize() > 128); - return String.format("%s%s%s_%s%s", op.operatorName(), + return String.format(Locale.ROOT, "%s%s%s_%s%s", op.operatorName(), (vspecies.elementType() == float.class ? "f" : "d"), (isShapeAgnostic ? "x" : Integer.toString(vlen)), precisionLevel(op), diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/VectorOperators.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/VectorOperators.java index 84009c55ac9..cc5a7ccbdef 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/VectorOperators.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/VectorOperators.java @@ -24,13 +24,12 @@ */ package jdk.incubator.vector; -import java.util.function.IntFunction; -import java.util.HashMap; import java.util.ArrayList; +import java.util.HashMap; +import java.util.function.IntFunction; import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.annotation.Stable; - import jdk.internal.vm.vector.VectorSupport; import static jdk.internal.vm.vector.Utils.isNonCapturingLambda; @@ -115,7 +114,7 @@ import static jdk.internal.vm.vector.Utils.isNonCapturingLambda; * operations on individual lane values. * */ -public abstract class VectorOperators { +public final class VectorOperators { private VectorOperators() { } /** @@ -131,12 +130,9 @@ public abstract class VectorOperators { * @see VectorOperators.Test Test * @see VectorOperators.Conversion Conversion * - * @apiNote - * User code should not implement this interface. A future release of - * this type may restrict implementations to be members of the same - * package. + * @sealedGraph */ - public interface Operator { + public sealed interface Operator { /** * Returns the symbolic name of this operator, * as a constant in {@link VectorOperators}. @@ -235,13 +231,8 @@ public abstract class VectorOperators { * usable in expressions like {@code w = v0.}{@link * Vector#lanewise(VectorOperators.Unary) * lanewise}{@code (NEG)}. - * - * @apiNote - * User code should not implement this interface. A future release of - * this type may restrict implementations to be members of the same - * package. */ - public interface Unary extends Operator { + public sealed interface Unary extends Operator { } /** @@ -252,12 +243,9 @@ public abstract class VectorOperators { * Vector#lanewise(VectorOperators.Binary,Vector) * lanewise}{@code (ADD, v1)}. * - * @apiNote - * User code should not implement this interface. A future release of - * this type may restrict implementations to be members of the same - * package. + * @sealedGraph */ - public interface Binary extends Operator { + public sealed interface Binary extends Operator { } /** @@ -267,13 +255,8 @@ public abstract class VectorOperators { * usable in expressions like {@code w = v0.}{@link * Vector#lanewise(VectorOperators.Ternary,Vector,Vector) * lanewise}{@code (FMA, v1, v2)}. - * - * @apiNote - * User code should not implement this interface. A future release of - * this type may restrict implementations to be members of the same - * package. */ - public interface Ternary extends Operator { + public sealed interface Ternary extends Operator { } /** @@ -283,13 +266,8 @@ public abstract class VectorOperators { * usable in expressions like {@code e = v0.}{@link * IntVector#reduceLanes(VectorOperators.Associative) * reduceLanes}{@code (ADD)}. - * - * @apiNote - * User code should not implement this interface. A future release of - * this type may restrict implementations to be members of the same - * package. */ - public interface Associative extends Binary { + public sealed interface Associative extends Binary { } /** @@ -299,13 +277,8 @@ public abstract class VectorOperators { * usable in expressions like {@code m = v0.}{@link * FloatVector#test(VectorOperators.Test) * test}{@code (IS_FINITE)}. - * - * @apiNote - * User code should not implement this interface. A future release of - * this type may restrict implementations to be members of the same - * package. */ - public interface Test extends Operator { + public sealed interface Test extends Operator { } /** @@ -315,13 +288,8 @@ public abstract class VectorOperators { * usable in expressions like {@code m = v0.}{@link * Vector#compare(VectorOperators.Comparison,Vector) * compare}{@code (LT, v1)}. - * - * @apiNote - * User code should not implement this interface. A future release of - * this type may restrict implementations to be members of the same - * package. */ - public interface Comparison extends Operator { + public sealed interface Comparison extends Operator { } /** @@ -336,13 +304,8 @@ public abstract class VectorOperators { * domain type (the input lane type) * @param the boxed element type for the conversion * range type (the output lane type) - * - * @apiNote - * User code should not implement this interface. A future release of - * this type may restrict implementations to be members of the same - * package. */ - public interface Conversion extends Operator { + public sealed interface Conversion extends Operator { /** * The domain of this conversion, a primitive type. * @return the domain of this conversion @@ -597,15 +560,15 @@ public abstract class VectorOperators { /** Produce {@code a<<(n&(ESIZE*8-1))}. Integral only. */ - public static final /*bitwise*/ Binary LSHL = binary("LSHL", "<<", VectorSupport.VECTOR_OP_LSHIFT, VO_SHIFT); + public static final /*bitwise*/ Binary LSHL = binary("LSHL", "<<", VectorSupport.VECTOR_OP_LSHIFT, VO_SHIFT+VO_NOFP); /** Produce {@code a>>(n&(ESIZE*8-1))}. Integral only. */ - public static final /*bitwise*/ Binary ASHR = binary("ASHR", ">>", VectorSupport.VECTOR_OP_RSHIFT, VO_SHIFT); + public static final /*bitwise*/ Binary ASHR = binary("ASHR", ">>", VectorSupport.VECTOR_OP_RSHIFT, VO_SHIFT+VO_NOFP); /** Produce {@code (a&EMASK)>>>(n&(ESIZE*8-1))}. Integral only. */ - public static final /*bitwise*/ Binary LSHR = binary("LSHR", ">>>", VectorSupport.VECTOR_OP_URSHIFT, VO_SHIFT); + public static final /*bitwise*/ Binary LSHR = binary("LSHR", ">>>", VectorSupport.VECTOR_OP_URSHIFT, VO_SHIFT+VO_NOFP); /** Produce {@code rotateLeft(a,n)}. Integral only. */ - public static final /*bitwise*/ Binary ROL = binary("ROL", "rotateLeft", VectorSupport.VECTOR_OP_LROTATE, VO_SHIFT); + public static final /*bitwise*/ Binary ROL = binary("ROL", "rotateLeft", VectorSupport.VECTOR_OP_LROTATE, VO_SHIFT+VO_NOFP); /** Produce {@code rotateRight(a,n)}. Integral only. */ - public static final /*bitwise*/ Binary ROR = binary("ROR", "rotateRight", VectorSupport.VECTOR_OP_RROTATE, VO_SHIFT); + public static final /*bitwise*/ Binary ROR = binary("ROR", "rotateRight", VectorSupport.VECTOR_OP_RROTATE, VO_SHIFT+VO_NOFP); /** Produce {@code compress(a,n)}. Integral, {@code int} and {@code long}, only. * @since 19 */ @@ -831,7 +794,7 @@ public abstract class VectorOperators { kind, dom, ran); } - private abstract static class OperatorImpl implements Operator { + private abstract static sealed class OperatorImpl implements Operator { private final String symName; private final String opName; private final int opInfo; @@ -956,35 +919,35 @@ public abstract class VectorOperators { } } - private static class UnaryImpl extends OperatorImpl implements Unary { + private static final class UnaryImpl extends OperatorImpl implements Unary { private UnaryImpl(String symName, String opName, int opInfo) { super(symName, opName, opInfo); assert((opInfo & VO_ARITY_MASK) == VO_UNARY); } } - private static class BinaryImpl extends OperatorImpl implements Binary { + private static sealed class BinaryImpl extends OperatorImpl implements Binary permits AssociativeImpl { private BinaryImpl(String symName, String opName, int opInfo) { super(symName, opName, opInfo); assert((opInfo & VO_ARITY_MASK) == VO_BINARY); } } - private static class TernaryImpl extends OperatorImpl implements Ternary { + private static final class TernaryImpl extends OperatorImpl implements Ternary { private TernaryImpl(String symName, String opName, int opInfo) { super(symName, opName, opInfo); assert((opInfo & VO_ARITY_MASK) == VO_TERNARY); } } - private static class AssociativeImpl extends BinaryImpl implements Associative { + private static final class AssociativeImpl extends BinaryImpl implements Associative { private AssociativeImpl(String symName, String opName, int opInfo) { super(symName, opName, opInfo); } } /*package-private*/ - static + static final class ConversionImpl extends OperatorImpl implements Conversion { private ConversionImpl(String symName, String opName, int opInfo, @@ -1260,7 +1223,7 @@ public abstract class VectorOperators { } } - private static class TestImpl extends OperatorImpl implements Test { + private static final class TestImpl extends OperatorImpl implements Test { private TestImpl(String symName, String opName, int opInfo) { super(symName, opName, opInfo); assert((opInfo & VO_ARITY_MASK) == VO_UNARY); @@ -1272,7 +1235,7 @@ public abstract class VectorOperators { } } - private static class ComparisonImpl extends OperatorImpl implements Comparison { + private static final class ComparisonImpl extends OperatorImpl implements Comparison { private ComparisonImpl(String symName, String opName, int opInfo) { super(symName, opName, opInfo); assert((opInfo & VO_ARITY_MASK) == VO_BINARY); diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/VectorShuffle.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/VectorShuffle.java index 9cde9d2315c..5da38a25e16 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/VectorShuffle.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/VectorShuffle.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -136,7 +136,7 @@ import java.util.function.IntUnaryOperator; * the element type of a vector */ @SuppressWarnings("exports") -public abstract class VectorShuffle extends jdk.internal.vm.vector.VectorSupport.VectorShuffle { +public abstract sealed class VectorShuffle extends jdk.internal.vm.vector.VectorSupport.VectorShuffle permits AbstractShuffle { VectorShuffle(Object indices) { super(indices); } diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/VectorSpecies.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/VectorSpecies.java index e80bbf231ea..4c3ef3f471d 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/VectorSpecies.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/VectorSpecies.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,11 +36,6 @@ import java.util.function.IntUnaryOperator; * of element type ({@code ETYPE}) * and {@link VectorShape shape}. * - * @apiNote - * User code should not implement this interface. A future release of - * this type may restrict implementations to be members of the same - * package. - * * @implNote * The string representation of an instance of this interface will * be of the form "Species[ETYPE, VLENGTH, SHAPE]", where {@code @@ -57,7 +52,7 @@ import java.util.function.IntUnaryOperator; * @param the boxed version of {@code ETYPE}, * the element type of a vector */ -public interface VectorSpecies { +public sealed interface VectorSpecies permits AbstractSpecies { /** * Returns the primitive element type of vectors of this * species. diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/X-Vector.java.template b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/X-Vector.java.template index d6763c2c03a..b3c5bfac302 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/X-Vector.java.template +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/X-Vector.java.template @@ -49,7 +49,8 @@ import static jdk.incubator.vector.VectorOperators.*; * {@code $type$} values. */ @SuppressWarnings("cast") // warning: redundant cast -public abstract class $abstractvectortype$ extends AbstractVector<$Boxtype$> { +public abstract sealed class $abstractvectortype$ extends AbstractVector<$Boxtype$> + permits $Type$Vector64, $Type$Vector128, $Type$Vector256, $Type$Vector512, $Type$VectorMax { $abstractvectortype$($type$[] vec) { super(vec); @@ -772,7 +773,7 @@ public abstract class $abstractvectortype$ extends AbstractVector<$Boxtype$> { @ForceInline final $abstractvectortype$ unaryMathOp(VectorOperators.Unary op) { - return VectorMathLibrary.unaryMathOp(op, opCode(op), species(), $abstractvectortype$::unaryOperations, + return VectorMathLibrary.unaryMathOp(op, opCode(op), vspecies(), $abstractvectortype$::unaryOperations, this); } #end[FP] @@ -983,7 +984,7 @@ public abstract class $abstractvectortype$ extends AbstractVector<$Boxtype$> { @ForceInline final $abstractvectortype$ binaryMathOp(VectorOperators.Binary op, $abstractvectortype$ that) { - return VectorMathLibrary.binaryMathOp(op, opCode(op), species(), $abstractvectortype$::binaryOperations, + return VectorMathLibrary.binaryMathOp(op, opCode(op), vspecies(), $abstractvectortype$::binaryOperations, this, that); } #end[FP] diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/X-VectorBits.java.template b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/X-VectorBits.java.template index bbf02f9c6cd..d66d22cab19 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/X-VectorBits.java.template +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/X-VectorBits.java.template @@ -25,22 +25,25 @@ package jdk.incubator.vector; import java.lang.foreign.MemorySegment; +#if[longOrDouble] import java.lang.foreign.ValueLayout; +#end[longOrDouble] import java.nio.ByteOrder; import java.util.Arrays; import java.util.Objects; import java.util.function.IntUnaryOperator; +import jdk.internal.ValueBased; import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.vector.VectorSupport; -import static jdk.internal.vm.vector.VectorSupport.*; - import static jdk.incubator.vector.VectorOperators.*; +import static jdk.internal.vm.vector.VectorSupport.*; #warn This file is preprocessed before being compiled @SuppressWarnings("cast") // warning: redundant cast +@ValueBased final class $vectortype$ extends $abstractvectortype$ { static final $Type$Species VSPECIES = ($Type$Species) $Type$Vector.SPECIES_$BITS$; @@ -387,7 +390,7 @@ final class $vectortype$ extends $abstractvectortype$ { @Override @ForceInline public final $shuffletype$ toShuffle() { - return ($shuffletype$) toShuffle(vspecies(), false); + return ($shuffletype$) toShuffle(VSPECIES, false); } // Specialized unary testing @@ -855,7 +858,7 @@ final class $vectortype$ extends $abstractvectortype$ { #end[FP] // Mask - + @ValueBased static final class $masktype$ extends AbstractMask<$Boxtype$> { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -903,7 +906,7 @@ final class $vectortype$ extends $abstractvectortype$ { @Override $masktype$ uOp(MUnOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); for (int i = 0; i < res.length; i++) { res[i] = f.apply(i, bits[i]); @@ -913,7 +916,7 @@ final class $vectortype$ extends $abstractvectortype$ { @Override $masktype$ bOp(VectorMask<$Boxtype$> m, MBinOp f) { - boolean[] res = new boolean[vspecies().laneCount()]; + boolean[] res = new boolean[VSPECIES.laneCount()]; boolean[] bits = getBits(); boolean[] mbits = (($masktype$)m).getBits(); for (int i = 0; i < res.length; i++) { @@ -1063,16 +1066,16 @@ final class $vectortype$ extends $abstractvectortype$ { @ForceInline public boolean anyTrue() { return VectorSupport.test(BT_ne, $masktype$.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> anyTrueHelper((($masktype$)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> anyTrueHelper((($masktype$)m).getBits())); } @Override @ForceInline public boolean allTrue() { return VectorSupport.test(BT_overflow, $masktype$.class, LANEBITS_TYPE_ORDINAL, VLENGTH, - this, vspecies().maskAll(true), - (m, __) -> allTrueHelper((($masktype$)m).getBits())); + this, VSPECIES.maskAll(true), + (m, _) -> allTrueHelper((($masktype$)m).getBits())); } @ForceInline @@ -1080,7 +1083,7 @@ final class $vectortype$ extends $abstractvectortype$ { static $masktype$ maskAll(boolean bit) { return VectorSupport.fromBitsCoerced($masktype$.class, LANEBITS_TYPE_ORDINAL, VLENGTH, (bit ? -1 : 0), MODE_BROADCAST, null, - (v, __) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); + (v, _) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); } private static final $masktype$ TRUE_MASK = new $masktype$(true); private static final $masktype$ FALSE_MASK = new $masktype$(false); @@ -1103,7 +1106,7 @@ final class $vectortype$ extends $abstractvectortype$ { } // Shuffle - + @ValueBased static final class $shuffletype$ extends AbstractShuffle<$Boxtype$> { static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM @@ -1145,7 +1148,7 @@ final class $vectortype$ extends $abstractvectortype$ { @Override @ForceInline public $vectortype$ toVector() { - return ($vectortype$) toBitsVector().castShape(vspecies(), 0); + return ($vectortype$) toBitsVector().castShape(VSPECIES, 0); } #else[FP] @Override @@ -1163,7 +1166,7 @@ final class $vectortype$ extends $abstractvectortype$ { @Override $bitsvectortype$ toBitsVector0() { - return (($bitsvectortype$) vspecies().asIntegral().dummyVector()).vectorFactory(indices()); + return (($bitsvectortype$) VSPECIES.asIntegral().dummyVector()).vectorFactory(indices()); } @Override @@ -1299,7 +1302,7 @@ final class $vectortype$ extends $abstractvectortype$ { @ForceInline public final $masktype$ laneIsValid() { return ($masktype$) toBitsVector().compare(VectorOperators.GE, 0) - .cast(vspecies()); + .cast(VSPECIES); } @ForceInline @@ -1308,10 +1311,10 @@ final class $vectortype$ extends $abstractvectortype$ { $shuffletype$ concreteShuffle = ($shuffletype$) shuffle; #if[FP] return ($shuffletype$) toBitsVector().rearrange(concreteShuffle.cast($Bitstype$Vector.SPECIES_$BITS$)) - .toShuffle(vspecies(), false); + .toShuffle(VSPECIES, false); #else[FP] return ($shuffletype$) toBitsVector().rearrange(concreteShuffle) - .toShuffle(vspecies(), false); + .toShuffle(VSPECIES, false); #end[FP] } @@ -1325,7 +1328,7 @@ final class $vectortype$ extends $abstractvectortype$ { v = ($bitsvectortype$) v.blend(v.lanewise(VectorOperators.ADD, length()), v.compare(VectorOperators.LT, 0)); } - return ($shuffletype$) v.toShuffle(vspecies(), false); + return ($shuffletype$) v.toShuffle(VSPECIES, false); } private static $bitstype$[] prepare(int[] indices, int offset) { diff --git a/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/aarch64/AArch64.java b/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/aarch64/AArch64.java index 391ac224609..7790a9abd7c 100644 --- a/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/aarch64/AArch64.java +++ b/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/aarch64/AArch64.java @@ -184,6 +184,8 @@ public class AArch64 extends Architecture { SVEBITPERM, SVE2, A53MAC, + ECV, + WFXT, FPHP, ASIMDHP, } diff --git a/src/jdk.jartool/share/man/jar.md b/src/jdk.jartool/share/man/jar.md index d944afcfb7f..658fa0cb4fa 100644 --- a/src/jdk.jartool/share/man/jar.md +++ b/src/jdk.jartool/share/man/jar.md @@ -1,5 +1,5 @@ --- -# Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -84,29 +84,29 @@ appropriate operation arguments described in this section. You can mix an operation argument with other one-letter options. Generally the operation argument is the first argument specified on the command line. -`-c` or `--create` +[`-c`]{#option--create} or `--create` : Creates the archive. -`-i` *FILE* or `--generate-index=`*FILE* +[`-i`]{#option--generate-index} *FILE* or `--generate-index=`*FILE* : Generates index information for the specified JAR file. This option is deprecated and may be removed in a future release. -`-t` or `--list` +[`-t`]{#option--list} or `--list` : Lists the table of contents for the archive. -`-u` or `--update` +[`-u`]{#option--update} or `--update` : Updates an existing JAR file. -`-x` or `--extract` +[`-x`]{#option--extract} or `--extract` : Extracts the named (or all) files from the archive. If a file with the same name appears more than once in the archive, each copy will be extracted, with later copies overwriting (replacing) earlier copies unless -k is specified. -`-d` or `--describe-module` +[`-d`]{#option--describe-module} or `--describe-module` : Prints the module descriptor or automatic module name. -`--validate` +[`--validate`]{#option--validate} : Validate the contents of the JAR file. See `Integrity of a JAR File` section below for more details. @@ -115,7 +115,7 @@ argument is the first argument specified on the command line. You can use the following options to customize the actions of any operation mode included in the `jar` command. -`-C` *DIR* +[`-C`]{#option-C} *DIR* : When used with the create operation mode, changes the specified directory and includes the *files* specified at the end of the command line. @@ -126,10 +126,10 @@ mode included in the `jar` command. where the JAR file will be extracted. Unlike with the create operation mode, this option can be specified only once with the extract operation mode. -`-f` *FILE* or `--file=`*FILE* +[`-f`]{#option--file} *FILE* or `--file=`*FILE* : Specifies the archive file name. -`--release` *VERSION* +[`--release`]{#option--release} *VERSION* : Creates a multirelease JAR file. Places all files specified after the option into a versioned directory of the JAR file named `META-INF/versions/`*VERSION*`/`, where *VERSION* must be must be a @@ -149,26 +149,26 @@ mode included in the `jar` command. You can use the following options to customize the actions of the create and the update main operation modes: -`-e` *CLASSNAME* or `--main-class=`*CLASSNAME* +[`-e`]{#option--main-class} *CLASSNAME* or `--main-class=`*CLASSNAME* : Specifies the application entry point for standalone applications bundled into a modular or executable modular JAR file. -`-m` *FILE* or `--manifest=`*FILE* +[`-m`]{#option--manifest} *FILE* or `--manifest=`*FILE* : Includes the manifest information from the given manifest file. -`-M` or `--no-manifest` +[`-M`]{#option--no-manifest} or `--no-manifest` : Doesn't create a manifest file for the entries. -`--module-version=`*VERSION* +[`--module-version=`]{#option--module-version}*VERSION* : Specifies the module version, when creating or updating a modular JAR file, or updating a non-modular JAR file. -`--hash-modules=`*PATTERN* +[`--hash-modules=`]{#option--hash-modules}*PATTERN* : Computes and records the hashes of modules matched by the given pattern and that depend upon directly or indirectly on a modular JAR file being created or a non-modular JAR file being updated. -`-p` or `--module-path` +[`-p`]{#option--module-path} or `--module-path` : Specifies the location of module dependence for generating the hash. `@`*file* @@ -181,20 +181,20 @@ You can use the following options to customize the actions of the create (`-c` or `--create`) the update (`-u` or `--update` ) and the generate-index (`-i` or `--generate-index=`*FILE*) main operation modes: -`-0` or `--no-compress` +[`-0`]{#option--no-compress} or `--no-compress` : Stores without using ZIP compression. -`--date=`*TIMESTAMP* +[`--date=`]{#option--date}*TIMESTAMP* : The timestamp in ISO-8601 extended offset date-time with optional time-zone format, to use for the timestamp of the entries, e.g. "2022-02-12T12:30:00-05:00". ## Operation Modifiers Valid Only in Extract Mode -`--dir` *DIR* +[`--dir`]{#option--dir} *DIR* : Directory into which the JAR file will be extracted. -`-k` or `--keep-old-files` +[`-k`]{#option--keep-old-files} or `--keep-old-files` : Do not overwrite existing files. If a Jar file entry with the same name exists in the target directory, the existing file will not be overwritten. diff --git a/src/jdk.javadoc/share/classes/jdk/javadoc/doclet/StandardDoclet.java b/src/jdk.javadoc/share/classes/jdk/javadoc/doclet/StandardDoclet.java index 8f5fec1e4b9..90511db8053 100644 --- a/src/jdk.javadoc/share/classes/jdk/javadoc/doclet/StandardDoclet.java +++ b/src/jdk.javadoc/share/classes/jdk/javadoc/doclet/StandardDoclet.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -48,7 +48,7 @@ import jdk.javadoc.internal.doclets.formats.html.HtmlDoclet; * in documentation comments. * * Taglets invoked by the standard doclet must return strings from - * {@link Taglet#toString(List,Element) Taglet.toString} as follows: + * {@link Taglet#toString(List,Element,java.net.URI) Taglet.toString} as follows: * *

    *
    Inline Tags diff --git a/src/jdk.javadoc/share/classes/jdk/javadoc/doclet/Taglet.java b/src/jdk.javadoc/share/classes/jdk/javadoc/doclet/Taglet.java index 1ad67a89fef..ec079047dfa 100644 --- a/src/jdk.javadoc/share/classes/jdk/javadoc/doclet/Taglet.java +++ b/src/jdk.javadoc/share/classes/jdk/javadoc/doclet/Taglet.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ package jdk.javadoc.doclet; +import java.net.URI; import java.util.List; import java.util.Set; @@ -34,7 +35,7 @@ import com.sun.source.doctree.DocTree; /** * The interface for a custom taglet supported by doclets such as - * the {@link jdk.javadoc.doclet.StandardDoclet standard doclet}. + * the {@linkplain StandardDoclet standard doclet}. * Custom taglets are used to handle custom tags in documentation * comments; custom tags can be instantiated individually as either * block tags, which appear at the end of a comment, @@ -55,10 +56,10 @@ import com.sun.source.doctree.DocTree; * {@link #isInlineTag() isInlineTag}, to determine the characteristics * of the tags supported by the taglet. *
  • As appropriate, the doclet calls the - * {@link #toString(List,Element) toString} method on the taglet object, - * giving it a list of tags and the element for which the tags are part - * of the element's documentation comment, from which the taglet can - * determine the string to be included in the documentation. + * {@link #toString(List,Element,URI) toString} method on the taglet object, + * giving it a list of tags, the element whose documentation comment contains + * the tags, and the root URI of the generated output, from which the taglet + * can determine the string to be included in the documentation. * The doclet will typically specify any requirements on the contents of * the string that is returned. * @@ -126,25 +127,70 @@ public interface Taglet { default void init(DocletEnvironment env, Doclet doclet) { } /** - * Returns the string representation of a series of instances of - * this tag to be included in the generated output. + * Returns the string representation of the specified instances of this tag + * to be included in the generated output. * - *

    If this taglet supports {@link #isInlineTag inline} tags, it will + *

    If this taglet supports {@link #isInlineTag inline} tags, this method will * be called once per instance of the inline tag, each time with a singleton list. * If this taglet supports {@link #isBlockTag block} tags, it will be called once * for each comment containing instances of block tags, with a list of all the instances * of the block tag in that comment. * + * @apiNote Taglets that do not need the root URI of the generated output may + * implement this method only. Taglets that require the root URI to link to other + * doclet-generated resources should override {@link #toString(List, Element, URI)}, + * and optionally throw an exception in the implementation of this method. + * * @param tags the list of instances of this tag * @param element the element to which the enclosing comment belongs * @return the string representation of the tags to be included in * the generated output - * + * @throws UnsupportedOperationException if {@link #toString(List, Element, URI)} + * should be invoked instead * @see User-Defined Taglets * for the Standard Doclet + * @see #toString(List, Element, URI) */ String toString(List tags, Element element); + /** + * Returns the string representation of the specified instances of this tag + * to be included in the generated output. + * + *

    If this taglet supports {@link #isInlineTag inline} tags, this method will + * be called once per instance of the inline tag, each time with a singleton list. + * If this taglet supports {@link #isBlockTag block} tags, it will be called once + * for each comment containing instances of block tags, with a list of all the instances + * of the block tag in that comment. + * + *

    The {@code docRoot} argument identifies the root of the generated output + * as seen by the current resource, and may be used to {@linkplain URI#resolve(String) + * resolve} links to other resources generated by the doclet. + * + * @apiNote The exact form of {@code docRoot} is doclet-specific. For the + * {@linkplain StandardDoclet standard doclet}, it is a relative URI from + * the current resource to the root directory of the generated output. + * Taglets intended for use with other doclets should check the validity + * of the {@code docRoot} argument as appropriate. + * + * @implSpec The default implementation invokes {@link #toString(List, Element) + * toString(tags, element)}. + * + * @param tags the list of instances of this tag + * @param element the element to which the enclosing comment belongs + * @param docRoot the root URI of the generated output + * @return the string representation of the tags to be included in + * the generated output + * @throws IllegalArgumentException if {@code docRoot} is not a valid URI + * @see User-Defined Taglets + * for the Standard Doclet + * @see #toString(List, Element) + * @since 27 + */ + default String toString(List tags, Element element, URI docRoot) { + return toString(tags, element); + } + /** * The kind of location in which a tag may be used. */ diff --git a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/HtmlDoclet.java b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/HtmlDoclet.java index 66fcd90e2e8..17d56ff11ba 100644 --- a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/HtmlDoclet.java +++ b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/HtmlDoclet.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -335,16 +335,9 @@ public class HtmlDoclet extends AbstractDoclet { if (options.createIndex()) { copyResource(DocPaths.SEARCH_JS_TEMPLATE, DocPaths.SCRIPT_FILES.resolve(DocPaths.SEARCH_JS), true); - copyResource(DocPaths.SEARCH_PAGE_JS, DocPaths.SCRIPT_FILES.resolve(DocPaths.SEARCH_PAGE_JS), true); copyResource(DocPaths.GLASS_SVG, DocPaths.RESOURCE_FILES.resolve(DocPaths.GLASS_SVG), false); copyResource(DocPaths.X_SVG, DocPaths.RESOURCE_FILES.resolve(DocPaths.X_SVG), false); - // No newline replacement for JQuery files - copyResource(DocPaths.JQUERY_DIR.resolve(DocPaths.JQUERY_JS), - DocPaths.SCRIPT_FILES.resolve(DocPaths.JQUERY_JS), false); - copyResource(DocPaths.JQUERY_DIR.resolve(DocPaths.JQUERY_UI_JS), - DocPaths.SCRIPT_FILES.resolve(DocPaths.JQUERY_UI_JS), false); - copyResource(DocPaths.JQUERY_DIR.resolve(DocPaths.JQUERY_UI_CSS), - DocPaths.RESOURCE_FILES.resolve(DocPaths.JQUERY_UI_CSS), false); } + } copyLegalFiles(options.createIndex(), options.syntaxHighlight()); // Print a notice if the documentation contains diagnostic markers @@ -369,7 +362,7 @@ public class HtmlDoclet extends AbstractDoclet { case "", "default" -> { // use a known resource as a stand-in, because we cannot get the URL for a resources directory var url = HtmlDoclet.class.getResource( - DocPaths.RESOURCES.resolve(DocPaths.LEGAL).resolve(DocPaths.JQUERY_MD).getPath()); + DocPaths.RESOURCES.resolve(DocPaths.LEGAL).resolve(DocPaths.DEJAVU_MD).getPath()); if (url != null) { try { legalNoticesDir = Path.of(url.toURI()).getParent(); diff --git a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/HtmlDocletWriter.java b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/HtmlDocletWriter.java index 86ac3a892fd..594c36c4af2 100644 --- a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/HtmlDocletWriter.java +++ b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/HtmlDocletWriter.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -243,12 +243,8 @@ public abstract class HtmlDocletWriter { if (generating) { writeGenerating(); } - CURRENT_PATH.set(path.getPath()); } - /** Temporary workaround to share current path with taglets, see 8373909 */ - public static final ThreadLocal CURRENT_PATH = new ThreadLocal<>(); - /** * The top-level method to generate and write the page represented by this writer. * diff --git a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/HtmlIds.java b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/HtmlIds.java index a3fba7eca14..50e6207b833 100644 --- a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/HtmlIds.java +++ b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/HtmlIds.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -112,6 +112,11 @@ public class HtmlIds { static final HtmlId RELATED_PACKAGE_SUMMARY = HtmlId.of("related-package-summary"); static final HtmlId RESET_SEARCH = HtmlId.of("reset-search"); static final HtmlId SEARCH_INPUT = HtmlId.of("search-input"); + static final HtmlId SEARCH_INPUT_CONTAINER = HtmlId.of("search-input-container"); + static final HtmlId SEARCH_MODULES = HtmlId.of("search-modules"); + static final HtmlId SEARCH_PAGE_LINK = HtmlId.of("search-page-link"); + static final HtmlId SEARCH_RESULT_CONTAINER = HtmlId.of("search-result-container"); + static final HtmlId SEARCH_RESULT_SECTION = HtmlId.of("search-result-section"); static final HtmlId SERVICES = HtmlId.of("services-summary"); static final HtmlId SKIP_NAVBAR_TOP = HtmlId.of("skip-navbar-top"); static final HtmlId THEME_BUTTON = HtmlId.of("theme-button"); diff --git a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/Navigation.java b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/Navigation.java index 30318bbaeea..cde5b287e25 100644 --- a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/Navigation.java +++ b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/Navigation.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,6 +45,8 @@ import jdk.javadoc.internal.html.Content; import jdk.javadoc.internal.html.ContentBuilder; import jdk.javadoc.internal.html.Entity; import jdk.javadoc.internal.html.HtmlAttr; +import jdk.javadoc.internal.html.HtmlId; +import jdk.javadoc.internal.html.HtmlTag; import jdk.javadoc.internal.html.HtmlTree; import jdk.javadoc.internal.html.Text; @@ -535,6 +537,42 @@ public class Navigation { .add(inputText) .add(inputReset); target.add(searchDiv); + target.add(HtmlTree.DIV(HtmlIds.SEARCH_RESULT_SECTION) + .add(HtmlTree.DIV(HtmlStyles.searchForm) + .add(HtmlTree.DIV(HtmlTree.LABEL(HtmlIds.SEARCH_INPUT.name(), + contents.getContent("doclet.search.for")))) + .add(HtmlTree.DIV(HtmlIds.SEARCH_INPUT_CONTAINER).addUnchecked(Text.EMPTY)) + .add(createModuleSelector())) + .add(HtmlTree.DIV(HtmlIds.SEARCH_RESULT_CONTAINER).addUnchecked(Text.EMPTY)) + .add(HtmlTree.DIV(HtmlStyles.searchLinks) + .add(HtmlTree.DIV(links.createLink(pathToRoot.resolve(DocPaths.SEARCH_PAGE), + contents.getContent("doclet.search.linkSearchPageLabel")) + .setId(HtmlIds.SEARCH_PAGE_LINK))) + .add(options.noHelp() || !options.helpFile().isEmpty() + ? HtmlTree.DIV(Text.EMPTY).addUnchecked(Text.EMPTY) + : HtmlTree.DIV(links.createLink(pathToRoot.resolve(DocPaths.HELP_DOC).fragment("search"), + contents.getContent("doclet.search.linkSearchHelpLabel")))))); + } + + private Content createModuleSelector() { + if (!configuration.showModules || configuration.modules.size() < 2) { + return Text.EMPTY; + } + var content = new ContentBuilder(HtmlTree.DIV(HtmlTree.LABEL(HtmlIds.SEARCH_MODULES.name(), + contents.getContent("doclet.search.in_modules")))); + var select = HtmlTree.of(HtmlTag.SELECT) + .setId(HtmlIds.SEARCH_MODULES) + .put(HtmlAttr.ARIA_LABEL, configuration.getDocResources().getText("doclet.selectModule")) + .add(HtmlTree.of(HtmlTag.OPTION) + .put(HtmlAttr.VALUE, "") + .add(contents.getContent("doclet.search.all_modules"))); + + for (ModuleElement module : configuration.modules) { + select.add(HtmlTree.of(HtmlTag.OPTION) + .put(HtmlAttr.VALUE, module.getQualifiedName().toString()) + .add(Text.of(module.getQualifiedName().toString()))); + } + return content.add(HtmlTree.DIV(select)); } /** diff --git a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/SearchWriter.java b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/SearchWriter.java index 433a641530d..5fa0daacf98 100644 --- a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/SearchWriter.java +++ b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/SearchWriter.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -92,17 +92,15 @@ public class SearchWriter extends HtmlDocletWriter { .add(resourceSection) .add(HtmlTree.P(contents.getContent("doclet.search.loading")) .setId(HtmlId.of("page-search-notify"))) - .add(HtmlTree.DIV(HtmlTree.DIV(HtmlId.of("result-container")) + .add(HtmlTree.DIV(HtmlTree.DIV(HtmlIds.SEARCH_RESULT_CONTAINER) .addUnchecked(Text.EMPTY)) - .setId(HtmlId.of("result-section")) - .put(HtmlAttr.STYLE, "display: none;") - .add(HtmlTree.SCRIPT(pathToRoot.resolve(DocPaths.SCRIPT_FILES) - .resolve(DocPaths.SEARCH_PAGE_JS).getPath()))); + .setId(HtmlIds.SEARCH_RESULT_SECTION) + .put(HtmlAttr.STYLE, "display: none;")); } private Content createModuleSelector() { - if (!configuration.showModules) { + if (!configuration.showModules || configuration.modules.size() < 2) { return Text.EMPTY; } @@ -118,7 +116,7 @@ public class SearchWriter extends HtmlDocletWriter { .put(HtmlAttr.VALUE, module.getQualifiedName().toString()) .add(Text.of(module.getQualifiedName().toString()))); } - return new ContentBuilder(contents.getContent("doclet.search.in", select)); + return new ContentBuilder(contents.getContent("doclet.search.in_modules"), select); } private Content createResourceSection() { diff --git a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/markup/Head.java b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/markup/Head.java index cda4bc9a5be..bff32cbd7bf 100644 --- a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/markup/Head.java +++ b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/markup/Head.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -336,11 +336,6 @@ public class Head extends Content { } private void addStylesheets(HtmlTree head) { - if (index) { - // Add JQuery-UI stylesheet first so its rules can be overridden. - addStylesheet(head, DocPaths.RESOURCE_FILES.resolve(DocPaths.JQUERY_UI_CSS)); - } - if (mainStylesheet == null) { mainStylesheet = DocPaths.STYLESHEET; } @@ -381,8 +376,6 @@ public class Head extends Content { .append("loadScripts();\n") .append("initTheme();\n"); } - addScriptElement(head, DocPaths.JQUERY_JS); - addScriptElement(head, DocPaths.JQUERY_UI_JS); } for (HtmlConfiguration.JavaScriptFile javaScriptFile : additionalScripts) { addScriptElement(head, javaScriptFile); diff --git a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/markup/HtmlStyles.java b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/markup/HtmlStyles.java index 9b59cb0cb47..2b154db7de7 100644 --- a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/markup/HtmlStyles.java +++ b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/markup/HtmlStyles.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -734,6 +734,16 @@ public enum HtmlStyles implements HtmlStyle { */ pageSearchInfo, + /** + * The class for a {@code div} element in the search widget containing the search form inputs. + */ + searchForm, + + /** + * The class for a {@code div} element in the search widget containing search-related links. + */ + searchLinks, + /** * The class for a link in the static "Index" pages to a custom searchable item, * such as defined with an {@code @index} tag. diff --git a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/jquery/jquery-3.7.1.js b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/jquery/jquery-3.7.1.js deleted file mode 100644 index 1a86433c223..00000000000 --- a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/jquery/jquery-3.7.1.js +++ /dev/null @@ -1,10716 +0,0 @@ -/*! - * jQuery JavaScript Library v3.7.1 - * https://jquery.com/ - * - * Copyright OpenJS Foundation and other contributors - * Released under the MIT license - * https://jquery.org/license - * - * Date: 2023-08-28T13:37Z - */ -( function( global, factory ) { - - "use strict"; - - if ( typeof module === "object" && typeof module.exports === "object" ) { - - // For CommonJS and CommonJS-like environments where a proper `window` - // is present, execute the factory and get jQuery. - // For environments that do not have a `window` with a `document` - // (such as Node.js), expose a factory as module.exports. - // This accentuates the need for the creation of a real `window`. - // e.g. var jQuery = require("jquery")(window); - // See ticket trac-14549 for more info. - module.exports = global.document ? - factory( global, true ) : - function( w ) { - if ( !w.document ) { - throw new Error( "jQuery requires a window with a document" ); - } - return factory( w ); - }; - } else { - factory( global ); - } - -// Pass this if window is not defined yet -} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { - -// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 -// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode -// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common -// enough that all such attempts are guarded in a try block. -"use strict"; - -var arr = []; - -var getProto = Object.getPrototypeOf; - -var slice = arr.slice; - -var flat = arr.flat ? function( array ) { - return arr.flat.call( array ); -} : function( array ) { - return arr.concat.apply( [], array ); -}; - - -var push = arr.push; - -var indexOf = arr.indexOf; - -var class2type = {}; - -var toString = class2type.toString; - -var hasOwn = class2type.hasOwnProperty; - -var fnToString = hasOwn.toString; - -var ObjectFunctionString = fnToString.call( Object ); - -var support = {}; - -var isFunction = function isFunction( obj ) { - - // Support: Chrome <=57, Firefox <=52 - // In some browsers, typeof returns "function" for HTML elements - // (i.e., `typeof document.createElement( "object" ) === "function"`). - // We don't want to classify *any* DOM node as a function. - // Support: QtWeb <=3.8.5, WebKit <=534.34, wkhtmltopdf tool <=0.12.5 - // Plus for old WebKit, typeof returns "function" for HTML collections - // (e.g., `typeof document.getElementsByTagName("div") === "function"`). (gh-4756) - return typeof obj === "function" && typeof obj.nodeType !== "number" && - typeof obj.item !== "function"; - }; - - -var isWindow = function isWindow( obj ) { - return obj != null && obj === obj.window; - }; - - -var document = window.document; - - - - var preservedScriptAttributes = { - type: true, - src: true, - nonce: true, - noModule: true - }; - - function DOMEval( code, node, doc ) { - doc = doc || document; - - var i, val, - script = doc.createElement( "script" ); - - script.text = code; - if ( node ) { - for ( i in preservedScriptAttributes ) { - - // Support: Firefox 64+, Edge 18+ - // Some browsers don't support the "nonce" property on scripts. - // On the other hand, just using `getAttribute` is not enough as - // the `nonce` attribute is reset to an empty string whenever it - // becomes browsing-context connected. - // See https://github.com/whatwg/html/issues/2369 - // See https://html.spec.whatwg.org/#nonce-attributes - // The `node.getAttribute` check was added for the sake of - // `jQuery.globalEval` so that it can fake a nonce-containing node - // via an object. - val = node[ i ] || node.getAttribute && node.getAttribute( i ); - if ( val ) { - script.setAttribute( i, val ); - } - } - } - doc.head.appendChild( script ).parentNode.removeChild( script ); - } - - -function toType( obj ) { - if ( obj == null ) { - return obj + ""; - } - - // Support: Android <=2.3 only (functionish RegExp) - return typeof obj === "object" || typeof obj === "function" ? - class2type[ toString.call( obj ) ] || "object" : - typeof obj; -} -/* global Symbol */ -// Defining this global in .eslintrc.json would create a danger of using the global -// unguarded in another place, it seems safer to define global only for this module - - - -var version = "3.7.1", - - rhtmlSuffix = /HTML$/i, - - // Define a local copy of jQuery - jQuery = function( selector, context ) { - - // The jQuery object is actually just the init constructor 'enhanced' - // Need init if jQuery is called (just allow error to be thrown if not included) - return new jQuery.fn.init( selector, context ); - }; - -jQuery.fn = jQuery.prototype = { - - // The current version of jQuery being used - jquery: version, - - constructor: jQuery, - - // The default length of a jQuery object is 0 - length: 0, - - toArray: function() { - return slice.call( this ); - }, - - // Get the Nth element in the matched element set OR - // Get the whole matched element set as a clean array - get: function( num ) { - - // Return all the elements in a clean array - if ( num == null ) { - return slice.call( this ); - } - - // Return just the one element from the set - return num < 0 ? this[ num + this.length ] : this[ num ]; - }, - - // Take an array of elements and push it onto the stack - // (returning the new matched element set) - pushStack: function( elems ) { - - // Build a new jQuery matched element set - var ret = jQuery.merge( this.constructor(), elems ); - - // Add the old object onto the stack (as a reference) - ret.prevObject = this; - - // Return the newly-formed element set - return ret; - }, - - // Execute a callback for every element in the matched set. - each: function( callback ) { - return jQuery.each( this, callback ); - }, - - map: function( callback ) { - return this.pushStack( jQuery.map( this, function( elem, i ) { - return callback.call( elem, i, elem ); - } ) ); - }, - - slice: function() { - return this.pushStack( slice.apply( this, arguments ) ); - }, - - first: function() { - return this.eq( 0 ); - }, - - last: function() { - return this.eq( -1 ); - }, - - even: function() { - return this.pushStack( jQuery.grep( this, function( _elem, i ) { - return ( i + 1 ) % 2; - } ) ); - }, - - odd: function() { - return this.pushStack( jQuery.grep( this, function( _elem, i ) { - return i % 2; - } ) ); - }, - - eq: function( i ) { - var len = this.length, - j = +i + ( i < 0 ? len : 0 ); - return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); - }, - - end: function() { - return this.prevObject || this.constructor(); - }, - - // For internal use only. - // Behaves like an Array's method, not like a jQuery method. - push: push, - sort: arr.sort, - splice: arr.splice -}; - -jQuery.extend = jQuery.fn.extend = function() { - var options, name, src, copy, copyIsArray, clone, - target = arguments[ 0 ] || {}, - i = 1, - length = arguments.length, - deep = false; - - // Handle a deep copy situation - if ( typeof target === "boolean" ) { - deep = target; - - // Skip the boolean and the target - target = arguments[ i ] || {}; - i++; - } - - // Handle case when target is a string or something (possible in deep copy) - if ( typeof target !== "object" && !isFunction( target ) ) { - target = {}; - } - - // Extend jQuery itself if only one argument is passed - if ( i === length ) { - target = this; - i--; - } - - for ( ; i < length; i++ ) { - - // Only deal with non-null/undefined values - if ( ( options = arguments[ i ] ) != null ) { - - // Extend the base object - for ( name in options ) { - copy = options[ name ]; - - // Prevent Object.prototype pollution - // Prevent never-ending loop - if ( name === "__proto__" || target === copy ) { - continue; - } - - // Recurse if we're merging plain objects or arrays - if ( deep && copy && ( jQuery.isPlainObject( copy ) || - ( copyIsArray = Array.isArray( copy ) ) ) ) { - src = target[ name ]; - - // Ensure proper type for the source value - if ( copyIsArray && !Array.isArray( src ) ) { - clone = []; - } else if ( !copyIsArray && !jQuery.isPlainObject( src ) ) { - clone = {}; - } else { - clone = src; - } - copyIsArray = false; - - // Never move original objects, clone them - target[ name ] = jQuery.extend( deep, clone, copy ); - - // Don't bring in undefined values - } else if ( copy !== undefined ) { - target[ name ] = copy; - } - } - } - } - - // Return the modified object - return target; -}; - -jQuery.extend( { - - // Unique for each copy of jQuery on the page - expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), - - // Assume jQuery is ready without the ready module - isReady: true, - - error: function( msg ) { - throw new Error( msg ); - }, - - noop: function() {}, - - isPlainObject: function( obj ) { - var proto, Ctor; - - // Detect obvious negatives - // Use toString instead of jQuery.type to catch host objects - if ( !obj || toString.call( obj ) !== "[object Object]" ) { - return false; - } - - proto = getProto( obj ); - - // Objects with no prototype (e.g., `Object.create( null )`) are plain - if ( !proto ) { - return true; - } - - // Objects with prototype are plain iff they were constructed by a global Object function - Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; - return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; - }, - - isEmptyObject: function( obj ) { - var name; - - for ( name in obj ) { - return false; - } - return true; - }, - - // Evaluates a script in a provided context; falls back to the global one - // if not specified. - globalEval: function( code, options, doc ) { - DOMEval( code, { nonce: options && options.nonce }, doc ); - }, - - each: function( obj, callback ) { - var length, i = 0; - - if ( isArrayLike( obj ) ) { - length = obj.length; - for ( ; i < length; i++ ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } else { - for ( i in obj ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } - - return obj; - }, - - - // Retrieve the text value of an array of DOM nodes - text: function( elem ) { - var node, - ret = "", - i = 0, - nodeType = elem.nodeType; - - if ( !nodeType ) { - - // If no nodeType, this is expected to be an array - while ( ( node = elem[ i++ ] ) ) { - - // Do not traverse comment nodes - ret += jQuery.text( node ); - } - } - if ( nodeType === 1 || nodeType === 11 ) { - return elem.textContent; - } - if ( nodeType === 9 ) { - return elem.documentElement.textContent; - } - if ( nodeType === 3 || nodeType === 4 ) { - return elem.nodeValue; - } - - // Do not include comment or processing instruction nodes - - return ret; - }, - - // results is for internal usage only - makeArray: function( arr, results ) { - var ret = results || []; - - if ( arr != null ) { - if ( isArrayLike( Object( arr ) ) ) { - jQuery.merge( ret, - typeof arr === "string" ? - [ arr ] : arr - ); - } else { - push.call( ret, arr ); - } - } - - return ret; - }, - - inArray: function( elem, arr, i ) { - return arr == null ? -1 : indexOf.call( arr, elem, i ); - }, - - isXMLDoc: function( elem ) { - var namespace = elem && elem.namespaceURI, - docElem = elem && ( elem.ownerDocument || elem ).documentElement; - - // Assume HTML when documentElement doesn't yet exist, such as inside - // document fragments. - return !rhtmlSuffix.test( namespace || docElem && docElem.nodeName || "HTML" ); - }, - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - merge: function( first, second ) { - var len = +second.length, - j = 0, - i = first.length; - - for ( ; j < len; j++ ) { - first[ i++ ] = second[ j ]; - } - - first.length = i; - - return first; - }, - - grep: function( elems, callback, invert ) { - var callbackInverse, - matches = [], - i = 0, - length = elems.length, - callbackExpect = !invert; - - // Go through the array, only saving the items - // that pass the validator function - for ( ; i < length; i++ ) { - callbackInverse = !callback( elems[ i ], i ); - if ( callbackInverse !== callbackExpect ) { - matches.push( elems[ i ] ); - } - } - - return matches; - }, - - // arg is for internal usage only - map: function( elems, callback, arg ) { - var length, value, - i = 0, - ret = []; - - // Go through the array, translating each of the items to their new values - if ( isArrayLike( elems ) ) { - length = elems.length; - for ( ; i < length; i++ ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - - // Go through every key on the object, - } else { - for ( i in elems ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - } - - // Flatten any nested arrays - return flat( ret ); - }, - - // A global GUID counter for objects - guid: 1, - - // jQuery.support is not used in Core but other projects attach their - // properties to it so it needs to exist. - support: support -} ); - -if ( typeof Symbol === "function" ) { - jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; -} - -// Populate the class2type map -jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), - function( _i, name ) { - class2type[ "[object " + name + "]" ] = name.toLowerCase(); - } ); - -function isArrayLike( obj ) { - - // Support: real iOS 8.2 only (not reproducible in simulator) - // `in` check used to prevent JIT error (gh-2145) - // hasOwn isn't used here due to false negatives - // regarding Nodelist length in IE - var length = !!obj && "length" in obj && obj.length, - type = toType( obj ); - - if ( isFunction( obj ) || isWindow( obj ) ) { - return false; - } - - return type === "array" || length === 0 || - typeof length === "number" && length > 0 && ( length - 1 ) in obj; -} - - -function nodeName( elem, name ) { - - return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); - -} -var pop = arr.pop; - - -var sort = arr.sort; - - -var splice = arr.splice; - - -var whitespace = "[\\x20\\t\\r\\n\\f]"; - - -var rtrimCSS = new RegExp( - "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", - "g" -); - - - - -// Note: an element does not contain itself -jQuery.contains = function( a, b ) { - var bup = b && b.parentNode; - - return a === bup || !!( bup && bup.nodeType === 1 && ( - - // Support: IE 9 - 11+ - // IE doesn't have `contains` on SVG. - a.contains ? - a.contains( bup ) : - a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 - ) ); -}; - - - - -// CSS string/identifier serialization -// https://drafts.csswg.org/cssom/#common-serializing-idioms -var rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\x80-\uFFFF\w-]/g; - -function fcssescape( ch, asCodePoint ) { - if ( asCodePoint ) { - - // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER - if ( ch === "\0" ) { - return "\uFFFD"; - } - - // Control characters and (dependent upon position) numbers get escaped as code points - return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; - } - - // Other potentially-special ASCII characters get backslash-escaped - return "\\" + ch; -} - -jQuery.escapeSelector = function( sel ) { - return ( sel + "" ).replace( rcssescape, fcssescape ); -}; - - - - -var preferredDoc = document, - pushNative = push; - -( function() { - -var i, - Expr, - outermostContext, - sortInput, - hasDuplicate, - push = pushNative, - - // Local document vars - document, - documentElement, - documentIsHTML, - rbuggyQSA, - matches, - - // Instance-specific data - expando = jQuery.expando, - dirruns = 0, - done = 0, - classCache = createCache(), - tokenCache = createCache(), - compilerCache = createCache(), - nonnativeSelectorCache = createCache(), - sortOrder = function( a, b ) { - if ( a === b ) { - hasDuplicate = true; - } - return 0; - }, - - booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|" + - "loop|multiple|open|readonly|required|scoped", - - // Regular expressions - - // https://www.w3.org/TR/css-syntax-3/#ident-token-diagram - identifier = "(?:\\\\[\\da-fA-F]{1,6}" + whitespace + - "?|\\\\[^\\r\\n\\f]|[\\w-]|[^\0-\\x7f])+", - - // Attribute selectors: https://www.w3.org/TR/selectors/#attribute-selectors - attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + - - // Operator (capture 2) - "*([*^$|!~]?=)" + whitespace + - - // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" - "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + - whitespace + "*\\]", - - pseudos = ":(" + identifier + ")(?:\\((" + - - // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: - // 1. quoted (capture 3; capture 4 or capture 5) - "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + - - // 2. simple (capture 6) - "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + - - // 3. anything else (capture 2) - ".*" + - ")\\)|)", - - // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter - rwhitespace = new RegExp( whitespace + "+", "g" ), - - rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), - rleadingCombinator = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + - whitespace + "*" ), - rdescend = new RegExp( whitespace + "|>" ), - - rpseudo = new RegExp( pseudos ), - ridentifier = new RegExp( "^" + identifier + "$" ), - - matchExpr = { - ID: new RegExp( "^#(" + identifier + ")" ), - CLASS: new RegExp( "^\\.(" + identifier + ")" ), - TAG: new RegExp( "^(" + identifier + "|[*])" ), - ATTR: new RegExp( "^" + attributes ), - PSEUDO: new RegExp( "^" + pseudos ), - CHILD: new RegExp( - "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + - whitespace + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + - whitespace + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), - bool: new RegExp( "^(?:" + booleans + ")$", "i" ), - - // For use in libraries implementing .is() - // We use this for POS matching in `select` - needsContext: new RegExp( "^" + whitespace + - "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + whitespace + - "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) - }, - - rinputs = /^(?:input|select|textarea|button)$/i, - rheader = /^h\d$/i, - - // Easily-parseable/retrievable ID or TAG or CLASS selectors - rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, - - rsibling = /[+~]/, - - // CSS escapes - // https://www.w3.org/TR/CSS21/syndata.html#escaped-characters - runescape = new RegExp( "\\\\[\\da-fA-F]{1,6}" + whitespace + - "?|\\\\([^\\r\\n\\f])", "g" ), - funescape = function( escape, nonHex ) { - var high = "0x" + escape.slice( 1 ) - 0x10000; - - if ( nonHex ) { - - // Strip the backslash prefix from a non-hex escape sequence - return nonHex; - } - - // Replace a hexadecimal escape sequence with the encoded Unicode code point - // Support: IE <=11+ - // For values outside the Basic Multilingual Plane (BMP), manually construct a - // surrogate pair - return high < 0 ? - String.fromCharCode( high + 0x10000 ) : - String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); - }, - - // Used for iframes; see `setDocument`. - // Support: IE 9 - 11+, Edge 12 - 18+ - // Removing the function wrapper causes a "Permission Denied" - // error in IE/Edge. - unloadHandler = function() { - setDocument(); - }, - - inDisabledFieldset = addCombinator( - function( elem ) { - return elem.disabled === true && nodeName( elem, "fieldset" ); - }, - { dir: "parentNode", next: "legend" } - ); - -// Support: IE <=9 only -// Accessing document.activeElement can throw unexpectedly -// https://bugs.jquery.com/ticket/13393 -function safeActiveElement() { - try { - return document.activeElement; - } catch ( err ) { } -} - -// Optimize for push.apply( _, NodeList ) -try { - push.apply( - ( arr = slice.call( preferredDoc.childNodes ) ), - preferredDoc.childNodes - ); - - // Support: Android <=4.0 - // Detect silently failing push.apply - // eslint-disable-next-line no-unused-expressions - arr[ preferredDoc.childNodes.length ].nodeType; -} catch ( e ) { - push = { - apply: function( target, els ) { - pushNative.apply( target, slice.call( els ) ); - }, - call: function( target ) { - pushNative.apply( target, slice.call( arguments, 1 ) ); - } - }; -} - -function find( selector, context, results, seed ) { - var m, i, elem, nid, match, groups, newSelector, - newContext = context && context.ownerDocument, - - // nodeType defaults to 9, since context defaults to document - nodeType = context ? context.nodeType : 9; - - results = results || []; - - // Return early from calls with invalid selector or context - if ( typeof selector !== "string" || !selector || - nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { - - return results; - } - - // Try to shortcut find operations (as opposed to filters) in HTML documents - if ( !seed ) { - setDocument( context ); - context = context || document; - - if ( documentIsHTML ) { - - // If the selector is sufficiently simple, try using a "get*By*" DOM method - // (excepting DocumentFragment context, where the methods don't exist) - if ( nodeType !== 11 && ( match = rquickExpr.exec( selector ) ) ) { - - // ID selector - if ( ( m = match[ 1 ] ) ) { - - // Document context - if ( nodeType === 9 ) { - if ( ( elem = context.getElementById( m ) ) ) { - - // Support: IE 9 only - // getElementById can match elements by name instead of ID - if ( elem.id === m ) { - push.call( results, elem ); - return results; - } - } else { - return results; - } - - // Element context - } else { - - // Support: IE 9 only - // getElementById can match elements by name instead of ID - if ( newContext && ( elem = newContext.getElementById( m ) ) && - find.contains( context, elem ) && - elem.id === m ) { - - push.call( results, elem ); - return results; - } - } - - // Type selector - } else if ( match[ 2 ] ) { - push.apply( results, context.getElementsByTagName( selector ) ); - return results; - - // Class selector - } else if ( ( m = match[ 3 ] ) && context.getElementsByClassName ) { - push.apply( results, context.getElementsByClassName( m ) ); - return results; - } - } - - // Take advantage of querySelectorAll - if ( !nonnativeSelectorCache[ selector + " " ] && - ( !rbuggyQSA || !rbuggyQSA.test( selector ) ) ) { - - newSelector = selector; - newContext = context; - - // qSA considers elements outside a scoping root when evaluating child or - // descendant combinators, which is not what we want. - // In such cases, we work around the behavior by prefixing every selector in the - // list with an ID selector referencing the scope context. - // The technique has to be used as well when a leading combinator is used - // as such selectors are not recognized by querySelectorAll. - // Thanks to Andrew Dupont for this technique. - if ( nodeType === 1 && - ( rdescend.test( selector ) || rleadingCombinator.test( selector ) ) ) { - - // Expand context for sibling selectors - newContext = rsibling.test( selector ) && testContext( context.parentNode ) || - context; - - // We can use :scope instead of the ID hack if the browser - // supports it & if we're not changing the context. - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when - // strict-comparing two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( newContext != context || !support.scope ) { - - // Capture the context ID, setting it first if necessary - if ( ( nid = context.getAttribute( "id" ) ) ) { - nid = jQuery.escapeSelector( nid ); - } else { - context.setAttribute( "id", ( nid = expando ) ); - } - } - - // Prefix every selector in the list - groups = tokenize( selector ); - i = groups.length; - while ( i-- ) { - groups[ i ] = ( nid ? "#" + nid : ":scope" ) + " " + - toSelector( groups[ i ] ); - } - newSelector = groups.join( "," ); - } - - try { - push.apply( results, - newContext.querySelectorAll( newSelector ) - ); - return results; - } catch ( qsaError ) { - nonnativeSelectorCache( selector, true ); - } finally { - if ( nid === expando ) { - context.removeAttribute( "id" ); - } - } - } - } - } - - // All others - return select( selector.replace( rtrimCSS, "$1" ), context, results, seed ); -} - -/** - * Create key-value caches of limited size - * @returns {function(string, object)} Returns the Object data after storing it on itself with - * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) - * deleting the oldest entry - */ -function createCache() { - var keys = []; - - function cache( key, value ) { - - // Use (key + " ") to avoid collision with native prototype properties - // (see https://github.com/jquery/sizzle/issues/157) - if ( keys.push( key + " " ) > Expr.cacheLength ) { - - // Only keep the most recent entries - delete cache[ keys.shift() ]; - } - return ( cache[ key + " " ] = value ); - } - return cache; -} - -/** - * Mark a function for special use by jQuery selector module - * @param {Function} fn The function to mark - */ -function markFunction( fn ) { - fn[ expando ] = true; - return fn; -} - -/** - * Support testing using an element - * @param {Function} fn Passed the created element and returns a boolean result - */ -function assert( fn ) { - var el = document.createElement( "fieldset" ); - - try { - return !!fn( el ); - } catch ( e ) { - return false; - } finally { - - // Remove from its parent by default - if ( el.parentNode ) { - el.parentNode.removeChild( el ); - } - - // release memory in IE - el = null; - } -} - -/** - * Returns a function to use in pseudos for input types - * @param {String} type - */ -function createInputPseudo( type ) { - return function( elem ) { - return nodeName( elem, "input" ) && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for buttons - * @param {String} type - */ -function createButtonPseudo( type ) { - return function( elem ) { - return ( nodeName( elem, "input" ) || nodeName( elem, "button" ) ) && - elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for :enabled/:disabled - * @param {Boolean} disabled true for :disabled; false for :enabled - */ -function createDisabledPseudo( disabled ) { - - // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable - return function( elem ) { - - // Only certain elements can match :enabled or :disabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled - if ( "form" in elem ) { - - // Check for inherited disabledness on relevant non-disabled elements: - // * listed form-associated elements in a disabled fieldset - // https://html.spec.whatwg.org/multipage/forms.html#category-listed - // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled - // * option elements in a disabled optgroup - // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled - // All such elements have a "form" property. - if ( elem.parentNode && elem.disabled === false ) { - - // Option elements defer to a parent optgroup if present - if ( "label" in elem ) { - if ( "label" in elem.parentNode ) { - return elem.parentNode.disabled === disabled; - } else { - return elem.disabled === disabled; - } - } - - // Support: IE 6 - 11+ - // Use the isDisabled shortcut property to check for disabled fieldset ancestors - return elem.isDisabled === disabled || - - // Where there is no isDisabled, check manually - elem.isDisabled !== !disabled && - inDisabledFieldset( elem ) === disabled; - } - - return elem.disabled === disabled; - - // Try to winnow out elements that can't be disabled before trusting the disabled property. - // Some victims get caught in our net (label, legend, menu, track), but it shouldn't - // even exist on them, let alone have a boolean value. - } else if ( "label" in elem ) { - return elem.disabled === disabled; - } - - // Remaining elements are neither :enabled nor :disabled - return false; - }; -} - -/** - * Returns a function to use in pseudos for positionals - * @param {Function} fn - */ -function createPositionalPseudo( fn ) { - return markFunction( function( argument ) { - argument = +argument; - return markFunction( function( seed, matches ) { - var j, - matchIndexes = fn( [], seed.length, argument ), - i = matchIndexes.length; - - // Match elements found at the specified indexes - while ( i-- ) { - if ( seed[ ( j = matchIndexes[ i ] ) ] ) { - seed[ j ] = !( matches[ j ] = seed[ j ] ); - } - } - } ); - } ); -} - -/** - * Checks a node for validity as a jQuery selector context - * @param {Element|Object=} context - * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value - */ -function testContext( context ) { - return context && typeof context.getElementsByTagName !== "undefined" && context; -} - -/** - * Sets document-related variables once based on the current document - * @param {Element|Object} [node] An element or document object to use to set the document - * @returns {Object} Returns the current document - */ -function setDocument( node ) { - var subWindow, - doc = node ? node.ownerDocument || node : preferredDoc; - - // Return early if doc is invalid or already selected - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( doc == document || doc.nodeType !== 9 || !doc.documentElement ) { - return document; - } - - // Update global variables - document = doc; - documentElement = document.documentElement; - documentIsHTML = !jQuery.isXMLDoc( document ); - - // Support: iOS 7 only, IE 9 - 11+ - // Older browsers didn't support unprefixed `matches`. - matches = documentElement.matches || - documentElement.webkitMatchesSelector || - documentElement.msMatchesSelector; - - // Support: IE 9 - 11+, Edge 12 - 18+ - // Accessing iframe documents after unload throws "permission denied" errors - // (see trac-13936). - // Limit the fix to IE & Edge Legacy; despite Edge 15+ implementing `matches`, - // all IE 9+ and Edge Legacy versions implement `msMatchesSelector` as well. - if ( documentElement.msMatchesSelector && - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - preferredDoc != document && - ( subWindow = document.defaultView ) && subWindow.top !== subWindow ) { - - // Support: IE 9 - 11+, Edge 12 - 18+ - subWindow.addEventListener( "unload", unloadHandler ); - } - - // Support: IE <10 - // Check if getElementById returns elements by name - // The broken getElementById methods don't pick up programmatically-set names, - // so use a roundabout getElementsByName test - support.getById = assert( function( el ) { - documentElement.appendChild( el ).id = jQuery.expando; - return !document.getElementsByName || - !document.getElementsByName( jQuery.expando ).length; - } ); - - // Support: IE 9 only - // Check to see if it's possible to do matchesSelector - // on a disconnected node. - support.disconnectedMatch = assert( function( el ) { - return matches.call( el, "*" ); - } ); - - // Support: IE 9 - 11+, Edge 12 - 18+ - // IE/Edge don't support the :scope pseudo-class. - support.scope = assert( function() { - return document.querySelectorAll( ":scope" ); - } ); - - // Support: Chrome 105 - 111 only, Safari 15.4 - 16.3 only - // Make sure the `:has()` argument is parsed unforgivingly. - // We include `*` in the test to detect buggy implementations that are - // _selectively_ forgiving (specifically when the list includes at least - // one valid selector). - // Note that we treat complete lack of support for `:has()` as if it were - // spec-compliant support, which is fine because use of `:has()` in such - // environments will fail in the qSA path and fall back to jQuery traversal - // anyway. - support.cssHas = assert( function() { - try { - document.querySelector( ":has(*,:jqfake)" ); - return false; - } catch ( e ) { - return true; - } - } ); - - // ID filter and find - if ( support.getById ) { - Expr.filter.ID = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - return elem.getAttribute( "id" ) === attrId; - }; - }; - Expr.find.ID = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var elem = context.getElementById( id ); - return elem ? [ elem ] : []; - } - }; - } else { - Expr.filter.ID = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - var node = typeof elem.getAttributeNode !== "undefined" && - elem.getAttributeNode( "id" ); - return node && node.value === attrId; - }; - }; - - // Support: IE 6 - 7 only - // getElementById is not reliable as a find shortcut - Expr.find.ID = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var node, i, elems, - elem = context.getElementById( id ); - - if ( elem ) { - - // Verify the id attribute - node = elem.getAttributeNode( "id" ); - if ( node && node.value === id ) { - return [ elem ]; - } - - // Fall back on getElementsByName - elems = context.getElementsByName( id ); - i = 0; - while ( ( elem = elems[ i++ ] ) ) { - node = elem.getAttributeNode( "id" ); - if ( node && node.value === id ) { - return [ elem ]; - } - } - } - - return []; - } - }; - } - - // Tag - Expr.find.TAG = function( tag, context ) { - if ( typeof context.getElementsByTagName !== "undefined" ) { - return context.getElementsByTagName( tag ); - - // DocumentFragment nodes don't have gEBTN - } else { - return context.querySelectorAll( tag ); - } - }; - - // Class - Expr.find.CLASS = function( className, context ) { - if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { - return context.getElementsByClassName( className ); - } - }; - - /* QSA/matchesSelector - ---------------------------------------------------------------------- */ - - // QSA and matchesSelector support - - rbuggyQSA = []; - - // Build QSA regex - // Regex strategy adopted from Diego Perini - assert( function( el ) { - - var input; - - documentElement.appendChild( el ).innerHTML = - "" + - ""; - - // Support: iOS <=7 - 8 only - // Boolean attributes and "value" are not treated correctly in some XML documents - if ( !el.querySelectorAll( "[selected]" ).length ) { - rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); - } - - // Support: iOS <=7 - 8 only - if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { - rbuggyQSA.push( "~=" ); - } - - // Support: iOS 8 only - // https://bugs.webkit.org/show_bug.cgi?id=136851 - // In-page `selector#id sibling-combinator selector` fails - if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { - rbuggyQSA.push( ".#.+[+~]" ); - } - - // Support: Chrome <=105+, Firefox <=104+, Safari <=15.4+ - // In some of the document kinds, these selectors wouldn't work natively. - // This is probably OK but for backwards compatibility we want to maintain - // handling them through jQuery traversal in jQuery 3.x. - if ( !el.querySelectorAll( ":checked" ).length ) { - rbuggyQSA.push( ":checked" ); - } - - // Support: Windows 8 Native Apps - // The type and name attributes are restricted during .innerHTML assignment - input = document.createElement( "input" ); - input.setAttribute( "type", "hidden" ); - el.appendChild( input ).setAttribute( "name", "D" ); - - // Support: IE 9 - 11+ - // IE's :disabled selector does not pick up the children of disabled fieldsets - // Support: Chrome <=105+, Firefox <=104+, Safari <=15.4+ - // In some of the document kinds, these selectors wouldn't work natively. - // This is probably OK but for backwards compatibility we want to maintain - // handling them through jQuery traversal in jQuery 3.x. - documentElement.appendChild( el ).disabled = true; - if ( el.querySelectorAll( ":disabled" ).length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Support: IE 11+, Edge 15 - 18+ - // IE 11/Edge don't find elements on a `[name='']` query in some cases. - // Adding a temporary attribute to the document before the selection works - // around the issue. - // Interestingly, IE 10 & older don't seem to have the issue. - input = document.createElement( "input" ); - input.setAttribute( "name", "" ); - el.appendChild( input ); - if ( !el.querySelectorAll( "[name='']" ).length ) { - rbuggyQSA.push( "\\[" + whitespace + "*name" + whitespace + "*=" + - whitespace + "*(?:''|\"\")" ); - } - } ); - - if ( !support.cssHas ) { - - // Support: Chrome 105 - 110+, Safari 15.4 - 16.3+ - // Our regular `try-catch` mechanism fails to detect natively-unsupported - // pseudo-classes inside `:has()` (such as `:has(:contains("Foo"))`) - // in browsers that parse the `:has()` argument as a forgiving selector list. - // https://drafts.csswg.org/selectors/#relational now requires the argument - // to be parsed unforgivingly, but browsers have not yet fully adjusted. - rbuggyQSA.push( ":has" ); - } - - rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join( "|" ) ); - - /* Sorting - ---------------------------------------------------------------------- */ - - // Document order sorting - sortOrder = function( a, b ) { - - // Flag for duplicate removal - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - // Sort on method existence if only one input has compareDocumentPosition - var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; - if ( compare ) { - return compare; - } - - // Calculate position if both inputs belong to the same document - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - compare = ( a.ownerDocument || a ) == ( b.ownerDocument || b ) ? - a.compareDocumentPosition( b ) : - - // Otherwise we know they are disconnected - 1; - - // Disconnected nodes - if ( compare & 1 || - ( !support.sortDetached && b.compareDocumentPosition( a ) === compare ) ) { - - // Choose the first element that is related to our preferred document - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( a === document || a.ownerDocument == preferredDoc && - find.contains( preferredDoc, a ) ) { - return -1; - } - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( b === document || b.ownerDocument == preferredDoc && - find.contains( preferredDoc, b ) ) { - return 1; - } - - // Maintain original order - return sortInput ? - ( indexOf.call( sortInput, a ) - indexOf.call( sortInput, b ) ) : - 0; - } - - return compare & 4 ? -1 : 1; - }; - - return document; -} - -find.matches = function( expr, elements ) { - return find( expr, null, null, elements ); -}; - -find.matchesSelector = function( elem, expr ) { - setDocument( elem ); - - if ( documentIsHTML && - !nonnativeSelectorCache[ expr + " " ] && - ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { - - try { - var ret = matches.call( elem, expr ); - - // IE 9's matchesSelector returns false on disconnected nodes - if ( ret || support.disconnectedMatch || - - // As well, disconnected nodes are said to be in a document - // fragment in IE 9 - elem.document && elem.document.nodeType !== 11 ) { - return ret; - } - } catch ( e ) { - nonnativeSelectorCache( expr, true ); - } - } - - return find( expr, document, null, [ elem ] ).length > 0; -}; - -find.contains = function( context, elem ) { - - // Set document vars if needed - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( ( context.ownerDocument || context ) != document ) { - setDocument( context ); - } - return jQuery.contains( context, elem ); -}; - - -find.attr = function( elem, name ) { - - // Set document vars if needed - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( ( elem.ownerDocument || elem ) != document ) { - setDocument( elem ); - } - - var fn = Expr.attrHandle[ name.toLowerCase() ], - - // Don't get fooled by Object.prototype properties (see trac-13807) - val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? - fn( elem, name, !documentIsHTML ) : - undefined; - - if ( val !== undefined ) { - return val; - } - - return elem.getAttribute( name ); -}; - -find.error = function( msg ) { - throw new Error( "Syntax error, unrecognized expression: " + msg ); -}; - -/** - * Document sorting and removing duplicates - * @param {ArrayLike} results - */ -jQuery.uniqueSort = function( results ) { - var elem, - duplicates = [], - j = 0, - i = 0; - - // Unless we *know* we can detect duplicates, assume their presence - // - // Support: Android <=4.0+ - // Testing for detecting duplicates is unpredictable so instead assume we can't - // depend on duplicate detection in all browsers without a stable sort. - hasDuplicate = !support.sortStable; - sortInput = !support.sortStable && slice.call( results, 0 ); - sort.call( results, sortOrder ); - - if ( hasDuplicate ) { - while ( ( elem = results[ i++ ] ) ) { - if ( elem === results[ i ] ) { - j = duplicates.push( i ); - } - } - while ( j-- ) { - splice.call( results, duplicates[ j ], 1 ); - } - } - - // Clear input after sorting to release objects - // See https://github.com/jquery/sizzle/pull/225 - sortInput = null; - - return results; -}; - -jQuery.fn.uniqueSort = function() { - return this.pushStack( jQuery.uniqueSort( slice.apply( this ) ) ); -}; - -Expr = jQuery.expr = { - - // Can be adjusted by the user - cacheLength: 50, - - createPseudo: markFunction, - - match: matchExpr, - - attrHandle: {}, - - find: {}, - - relative: { - ">": { dir: "parentNode", first: true }, - " ": { dir: "parentNode" }, - "+": { dir: "previousSibling", first: true }, - "~": { dir: "previousSibling" } - }, - - preFilter: { - ATTR: function( match ) { - match[ 1 ] = match[ 1 ].replace( runescape, funescape ); - - // Move the given value to match[3] whether quoted or unquoted - match[ 3 ] = ( match[ 3 ] || match[ 4 ] || match[ 5 ] || "" ) - .replace( runescape, funescape ); - - if ( match[ 2 ] === "~=" ) { - match[ 3 ] = " " + match[ 3 ] + " "; - } - - return match.slice( 0, 4 ); - }, - - CHILD: function( match ) { - - /* matches from matchExpr["CHILD"] - 1 type (only|nth|...) - 2 what (child|of-type) - 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) - 4 xn-component of xn+y argument ([+-]?\d*n|) - 5 sign of xn-component - 6 x of xn-component - 7 sign of y-component - 8 y of y-component - */ - match[ 1 ] = match[ 1 ].toLowerCase(); - - if ( match[ 1 ].slice( 0, 3 ) === "nth" ) { - - // nth-* requires argument - if ( !match[ 3 ] ) { - find.error( match[ 0 ] ); - } - - // numeric x and y parameters for Expr.filter.CHILD - // remember that false/true cast respectively to 0/1 - match[ 4 ] = +( match[ 4 ] ? - match[ 5 ] + ( match[ 6 ] || 1 ) : - 2 * ( match[ 3 ] === "even" || match[ 3 ] === "odd" ) - ); - match[ 5 ] = +( ( match[ 7 ] + match[ 8 ] ) || match[ 3 ] === "odd" ); - - // other types prohibit arguments - } else if ( match[ 3 ] ) { - find.error( match[ 0 ] ); - } - - return match; - }, - - PSEUDO: function( match ) { - var excess, - unquoted = !match[ 6 ] && match[ 2 ]; - - if ( matchExpr.CHILD.test( match[ 0 ] ) ) { - return null; - } - - // Accept quoted arguments as-is - if ( match[ 3 ] ) { - match[ 2 ] = match[ 4 ] || match[ 5 ] || ""; - - // Strip excess characters from unquoted arguments - } else if ( unquoted && rpseudo.test( unquoted ) && - - // Get excess from tokenize (recursively) - ( excess = tokenize( unquoted, true ) ) && - - // advance to the next closing parenthesis - ( excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length ) ) { - - // excess is a negative index - match[ 0 ] = match[ 0 ].slice( 0, excess ); - match[ 2 ] = unquoted.slice( 0, excess ); - } - - // Return only captures needed by the pseudo filter method (type and argument) - return match.slice( 0, 3 ); - } - }, - - filter: { - - TAG: function( nodeNameSelector ) { - var expectedNodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); - return nodeNameSelector === "*" ? - function() { - return true; - } : - function( elem ) { - return nodeName( elem, expectedNodeName ); - }; - }, - - CLASS: function( className ) { - var pattern = classCache[ className + " " ]; - - return pattern || - ( pattern = new RegExp( "(^|" + whitespace + ")" + className + - "(" + whitespace + "|$)" ) ) && - classCache( className, function( elem ) { - return pattern.test( - typeof elem.className === "string" && elem.className || - typeof elem.getAttribute !== "undefined" && - elem.getAttribute( "class" ) || - "" - ); - } ); - }, - - ATTR: function( name, operator, check ) { - return function( elem ) { - var result = find.attr( elem, name ); - - if ( result == null ) { - return operator === "!="; - } - if ( !operator ) { - return true; - } - - result += ""; - - if ( operator === "=" ) { - return result === check; - } - if ( operator === "!=" ) { - return result !== check; - } - if ( operator === "^=" ) { - return check && result.indexOf( check ) === 0; - } - if ( operator === "*=" ) { - return check && result.indexOf( check ) > -1; - } - if ( operator === "$=" ) { - return check && result.slice( -check.length ) === check; - } - if ( operator === "~=" ) { - return ( " " + result.replace( rwhitespace, " " ) + " " ) - .indexOf( check ) > -1; - } - if ( operator === "|=" ) { - return result === check || result.slice( 0, check.length + 1 ) === check + "-"; - } - - return false; - }; - }, - - CHILD: function( type, what, _argument, first, last ) { - var simple = type.slice( 0, 3 ) !== "nth", - forward = type.slice( -4 ) !== "last", - ofType = what === "of-type"; - - return first === 1 && last === 0 ? - - // Shortcut for :nth-*(n) - function( elem ) { - return !!elem.parentNode; - } : - - function( elem, _context, xml ) { - var cache, outerCache, node, nodeIndex, start, - dir = simple !== forward ? "nextSibling" : "previousSibling", - parent = elem.parentNode, - name = ofType && elem.nodeName.toLowerCase(), - useCache = !xml && !ofType, - diff = false; - - if ( parent ) { - - // :(first|last|only)-(child|of-type) - if ( simple ) { - while ( dir ) { - node = elem; - while ( ( node = node[ dir ] ) ) { - if ( ofType ? - nodeName( node, name ) : - node.nodeType === 1 ) { - - return false; - } - } - - // Reverse direction for :only-* (if we haven't yet done so) - start = dir = type === "only" && !start && "nextSibling"; - } - return true; - } - - start = [ forward ? parent.firstChild : parent.lastChild ]; - - // non-xml :nth-child(...) stores cache data on `parent` - if ( forward && useCache ) { - - // Seek `elem` from a previously-cached index - outerCache = parent[ expando ] || ( parent[ expando ] = {} ); - cache = outerCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex && cache[ 2 ]; - node = nodeIndex && parent.childNodes[ nodeIndex ]; - - while ( ( node = ++nodeIndex && node && node[ dir ] || - - // Fallback to seeking `elem` from the start - ( diff = nodeIndex = 0 ) || start.pop() ) ) { - - // When found, cache indexes on `parent` and break - if ( node.nodeType === 1 && ++diff && node === elem ) { - outerCache[ type ] = [ dirruns, nodeIndex, diff ]; - break; - } - } - - } else { - - // Use previously-cached element index if available - if ( useCache ) { - outerCache = elem[ expando ] || ( elem[ expando ] = {} ); - cache = outerCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex; - } - - // xml :nth-child(...) - // or :nth-last-child(...) or :nth(-last)?-of-type(...) - if ( diff === false ) { - - // Use the same loop as above to seek `elem` from the start - while ( ( node = ++nodeIndex && node && node[ dir ] || - ( diff = nodeIndex = 0 ) || start.pop() ) ) { - - if ( ( ofType ? - nodeName( node, name ) : - node.nodeType === 1 ) && - ++diff ) { - - // Cache the index of each encountered element - if ( useCache ) { - outerCache = node[ expando ] || - ( node[ expando ] = {} ); - outerCache[ type ] = [ dirruns, diff ]; - } - - if ( node === elem ) { - break; - } - } - } - } - } - - // Incorporate the offset, then check against cycle size - diff -= last; - return diff === first || ( diff % first === 0 && diff / first >= 0 ); - } - }; - }, - - PSEUDO: function( pseudo, argument ) { - - // pseudo-class names are case-insensitive - // https://www.w3.org/TR/selectors/#pseudo-classes - // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters - // Remember that setFilters inherits from pseudos - var args, - fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || - find.error( "unsupported pseudo: " + pseudo ); - - // The user may use createPseudo to indicate that - // arguments are needed to create the filter function - // just as jQuery does - if ( fn[ expando ] ) { - return fn( argument ); - } - - // But maintain support for old signatures - if ( fn.length > 1 ) { - args = [ pseudo, pseudo, "", argument ]; - return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? - markFunction( function( seed, matches ) { - var idx, - matched = fn( seed, argument ), - i = matched.length; - while ( i-- ) { - idx = indexOf.call( seed, matched[ i ] ); - seed[ idx ] = !( matches[ idx ] = matched[ i ] ); - } - } ) : - function( elem ) { - return fn( elem, 0, args ); - }; - } - - return fn; - } - }, - - pseudos: { - - // Potentially complex pseudos - not: markFunction( function( selector ) { - - // Trim the selector passed to compile - // to avoid treating leading and trailing - // spaces as combinators - var input = [], - results = [], - matcher = compile( selector.replace( rtrimCSS, "$1" ) ); - - return matcher[ expando ] ? - markFunction( function( seed, matches, _context, xml ) { - var elem, - unmatched = matcher( seed, null, xml, [] ), - i = seed.length; - - // Match elements unmatched by `matcher` - while ( i-- ) { - if ( ( elem = unmatched[ i ] ) ) { - seed[ i ] = !( matches[ i ] = elem ); - } - } - } ) : - function( elem, _context, xml ) { - input[ 0 ] = elem; - matcher( input, null, xml, results ); - - // Don't keep the element - // (see https://github.com/jquery/sizzle/issues/299) - input[ 0 ] = null; - return !results.pop(); - }; - } ), - - has: markFunction( function( selector ) { - return function( elem ) { - return find( selector, elem ).length > 0; - }; - } ), - - contains: markFunction( function( text ) { - text = text.replace( runescape, funescape ); - return function( elem ) { - return ( elem.textContent || jQuery.text( elem ) ).indexOf( text ) > -1; - }; - } ), - - // "Whether an element is represented by a :lang() selector - // is based solely on the element's language value - // being equal to the identifier C, - // or beginning with the identifier C immediately followed by "-". - // The matching of C against the element's language value is performed case-insensitively. - // The identifier C does not have to be a valid language name." - // https://www.w3.org/TR/selectors/#lang-pseudo - lang: markFunction( function( lang ) { - - // lang value must be a valid identifier - if ( !ridentifier.test( lang || "" ) ) { - find.error( "unsupported lang: " + lang ); - } - lang = lang.replace( runescape, funescape ).toLowerCase(); - return function( elem ) { - var elemLang; - do { - if ( ( elemLang = documentIsHTML ? - elem.lang : - elem.getAttribute( "xml:lang" ) || elem.getAttribute( "lang" ) ) ) { - - elemLang = elemLang.toLowerCase(); - return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; - } - } while ( ( elem = elem.parentNode ) && elem.nodeType === 1 ); - return false; - }; - } ), - - // Miscellaneous - target: function( elem ) { - var hash = window.location && window.location.hash; - return hash && hash.slice( 1 ) === elem.id; - }, - - root: function( elem ) { - return elem === documentElement; - }, - - focus: function( elem ) { - return elem === safeActiveElement() && - document.hasFocus() && - !!( elem.type || elem.href || ~elem.tabIndex ); - }, - - // Boolean properties - enabled: createDisabledPseudo( false ), - disabled: createDisabledPseudo( true ), - - checked: function( elem ) { - - // In CSS3, :checked should return both checked and selected elements - // https://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - return ( nodeName( elem, "input" ) && !!elem.checked ) || - ( nodeName( elem, "option" ) && !!elem.selected ); - }, - - selected: function( elem ) { - - // Support: IE <=11+ - // Accessing the selectedIndex property - // forces the browser to treat the default option as - // selected when in an optgroup. - if ( elem.parentNode ) { - // eslint-disable-next-line no-unused-expressions - elem.parentNode.selectedIndex; - } - - return elem.selected === true; - }, - - // Contents - empty: function( elem ) { - - // https://www.w3.org/TR/selectors/#empty-pseudo - // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), - // but not by others (comment: 8; processing instruction: 7; etc.) - // nodeType < 6 works because attributes (2) do not appear as children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - if ( elem.nodeType < 6 ) { - return false; - } - } - return true; - }, - - parent: function( elem ) { - return !Expr.pseudos.empty( elem ); - }, - - // Element/input types - header: function( elem ) { - return rheader.test( elem.nodeName ); - }, - - input: function( elem ) { - return rinputs.test( elem.nodeName ); - }, - - button: function( elem ) { - return nodeName( elem, "input" ) && elem.type === "button" || - nodeName( elem, "button" ); - }, - - text: function( elem ) { - var attr; - return nodeName( elem, "input" ) && elem.type === "text" && - - // Support: IE <10 only - // New HTML5 attribute values (e.g., "search") appear - // with elem.type === "text" - ( ( attr = elem.getAttribute( "type" ) ) == null || - attr.toLowerCase() === "text" ); - }, - - // Position-in-collection - first: createPositionalPseudo( function() { - return [ 0 ]; - } ), - - last: createPositionalPseudo( function( _matchIndexes, length ) { - return [ length - 1 ]; - } ), - - eq: createPositionalPseudo( function( _matchIndexes, length, argument ) { - return [ argument < 0 ? argument + length : argument ]; - } ), - - even: createPositionalPseudo( function( matchIndexes, length ) { - var i = 0; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ), - - odd: createPositionalPseudo( function( matchIndexes, length ) { - var i = 1; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ), - - lt: createPositionalPseudo( function( matchIndexes, length, argument ) { - var i; - - if ( argument < 0 ) { - i = argument + length; - } else if ( argument > length ) { - i = length; - } else { - i = argument; - } - - for ( ; --i >= 0; ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ), - - gt: createPositionalPseudo( function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; ++i < length; ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ) - } -}; - -Expr.pseudos.nth = Expr.pseudos.eq; - -// Add button/input type pseudos -for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { - Expr.pseudos[ i ] = createInputPseudo( i ); -} -for ( i in { submit: true, reset: true } ) { - Expr.pseudos[ i ] = createButtonPseudo( i ); -} - -// Easy API for creating new setFilters -function setFilters() {} -setFilters.prototype = Expr.filters = Expr.pseudos; -Expr.setFilters = new setFilters(); - -function tokenize( selector, parseOnly ) { - var matched, match, tokens, type, - soFar, groups, preFilters, - cached = tokenCache[ selector + " " ]; - - if ( cached ) { - return parseOnly ? 0 : cached.slice( 0 ); - } - - soFar = selector; - groups = []; - preFilters = Expr.preFilter; - - while ( soFar ) { - - // Comma and first run - if ( !matched || ( match = rcomma.exec( soFar ) ) ) { - if ( match ) { - - // Don't consume trailing commas as valid - soFar = soFar.slice( match[ 0 ].length ) || soFar; - } - groups.push( ( tokens = [] ) ); - } - - matched = false; - - // Combinators - if ( ( match = rleadingCombinator.exec( soFar ) ) ) { - matched = match.shift(); - tokens.push( { - value: matched, - - // Cast descendant combinators to space - type: match[ 0 ].replace( rtrimCSS, " " ) - } ); - soFar = soFar.slice( matched.length ); - } - - // Filters - for ( type in Expr.filter ) { - if ( ( match = matchExpr[ type ].exec( soFar ) ) && ( !preFilters[ type ] || - ( match = preFilters[ type ]( match ) ) ) ) { - matched = match.shift(); - tokens.push( { - value: matched, - type: type, - matches: match - } ); - soFar = soFar.slice( matched.length ); - } - } - - if ( !matched ) { - break; - } - } - - // Return the length of the invalid excess - // if we're just parsing - // Otherwise, throw an error or return tokens - if ( parseOnly ) { - return soFar.length; - } - - return soFar ? - find.error( selector ) : - - // Cache the tokens - tokenCache( selector, groups ).slice( 0 ); -} - -function toSelector( tokens ) { - var i = 0, - len = tokens.length, - selector = ""; - for ( ; i < len; i++ ) { - selector += tokens[ i ].value; - } - return selector; -} - -function addCombinator( matcher, combinator, base ) { - var dir = combinator.dir, - skip = combinator.next, - key = skip || dir, - checkNonElements = base && key === "parentNode", - doneName = done++; - - return combinator.first ? - - // Check against closest ancestor/preceding element - function( elem, context, xml ) { - while ( ( elem = elem[ dir ] ) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - return matcher( elem, context, xml ); - } - } - return false; - } : - - // Check against all ancestor/preceding elements - function( elem, context, xml ) { - var oldCache, outerCache, - newCache = [ dirruns, doneName ]; - - // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching - if ( xml ) { - while ( ( elem = elem[ dir ] ) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - if ( matcher( elem, context, xml ) ) { - return true; - } - } - } - } else { - while ( ( elem = elem[ dir ] ) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - outerCache = elem[ expando ] || ( elem[ expando ] = {} ); - - if ( skip && nodeName( elem, skip ) ) { - elem = elem[ dir ] || elem; - } else if ( ( oldCache = outerCache[ key ] ) && - oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { - - // Assign to newCache so results back-propagate to previous elements - return ( newCache[ 2 ] = oldCache[ 2 ] ); - } else { - - // Reuse newcache so results back-propagate to previous elements - outerCache[ key ] = newCache; - - // A match means we're done; a fail means we have to keep checking - if ( ( newCache[ 2 ] = matcher( elem, context, xml ) ) ) { - return true; - } - } - } - } - } - return false; - }; -} - -function elementMatcher( matchers ) { - return matchers.length > 1 ? - function( elem, context, xml ) { - var i = matchers.length; - while ( i-- ) { - if ( !matchers[ i ]( elem, context, xml ) ) { - return false; - } - } - return true; - } : - matchers[ 0 ]; -} - -function multipleContexts( selector, contexts, results ) { - var i = 0, - len = contexts.length; - for ( ; i < len; i++ ) { - find( selector, contexts[ i ], results ); - } - return results; -} - -function condense( unmatched, map, filter, context, xml ) { - var elem, - newUnmatched = [], - i = 0, - len = unmatched.length, - mapped = map != null; - - for ( ; i < len; i++ ) { - if ( ( elem = unmatched[ i ] ) ) { - if ( !filter || filter( elem, context, xml ) ) { - newUnmatched.push( elem ); - if ( mapped ) { - map.push( i ); - } - } - } - } - - return newUnmatched; -} - -function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { - if ( postFilter && !postFilter[ expando ] ) { - postFilter = setMatcher( postFilter ); - } - if ( postFinder && !postFinder[ expando ] ) { - postFinder = setMatcher( postFinder, postSelector ); - } - return markFunction( function( seed, results, context, xml ) { - var temp, i, elem, matcherOut, - preMap = [], - postMap = [], - preexisting = results.length, - - // Get initial elements from seed or context - elems = seed || - multipleContexts( selector || "*", - context.nodeType ? [ context ] : context, [] ), - - // Prefilter to get matcher input, preserving a map for seed-results synchronization - matcherIn = preFilter && ( seed || !selector ) ? - condense( elems, preMap, preFilter, context, xml ) : - elems; - - if ( matcher ) { - - // If we have a postFinder, or filtered seed, or non-seed postFilter - // or preexisting results, - matcherOut = postFinder || ( seed ? preFilter : preexisting || postFilter ) ? - - // ...intermediate processing is necessary - [] : - - // ...otherwise use results directly - results; - - // Find primary matches - matcher( matcherIn, matcherOut, context, xml ); - } else { - matcherOut = matcherIn; - } - - // Apply postFilter - if ( postFilter ) { - temp = condense( matcherOut, postMap ); - postFilter( temp, [], context, xml ); - - // Un-match failing elements by moving them back to matcherIn - i = temp.length; - while ( i-- ) { - if ( ( elem = temp[ i ] ) ) { - matcherOut[ postMap[ i ] ] = !( matcherIn[ postMap[ i ] ] = elem ); - } - } - } - - if ( seed ) { - if ( postFinder || preFilter ) { - if ( postFinder ) { - - // Get the final matcherOut by condensing this intermediate into postFinder contexts - temp = []; - i = matcherOut.length; - while ( i-- ) { - if ( ( elem = matcherOut[ i ] ) ) { - - // Restore matcherIn since elem is not yet a final match - temp.push( ( matcherIn[ i ] = elem ) ); - } - } - postFinder( null, ( matcherOut = [] ), temp, xml ); - } - - // Move matched elements from seed to results to keep them synchronized - i = matcherOut.length; - while ( i-- ) { - if ( ( elem = matcherOut[ i ] ) && - ( temp = postFinder ? indexOf.call( seed, elem ) : preMap[ i ] ) > -1 ) { - - seed[ temp ] = !( results[ temp ] = elem ); - } - } - } - - // Add elements to results, through postFinder if defined - } else { - matcherOut = condense( - matcherOut === results ? - matcherOut.splice( preexisting, matcherOut.length ) : - matcherOut - ); - if ( postFinder ) { - postFinder( null, results, matcherOut, xml ); - } else { - push.apply( results, matcherOut ); - } - } - } ); -} - -function matcherFromTokens( tokens ) { - var checkContext, matcher, j, - len = tokens.length, - leadingRelative = Expr.relative[ tokens[ 0 ].type ], - implicitRelative = leadingRelative || Expr.relative[ " " ], - i = leadingRelative ? 1 : 0, - - // The foundational matcher ensures that elements are reachable from top-level context(s) - matchContext = addCombinator( function( elem ) { - return elem === checkContext; - }, implicitRelative, true ), - matchAnyContext = addCombinator( function( elem ) { - return indexOf.call( checkContext, elem ) > -1; - }, implicitRelative, true ), - matchers = [ function( elem, context, xml ) { - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - var ret = ( !leadingRelative && ( xml || context != outermostContext ) ) || ( - ( checkContext = context ).nodeType ? - matchContext( elem, context, xml ) : - matchAnyContext( elem, context, xml ) ); - - // Avoid hanging onto element - // (see https://github.com/jquery/sizzle/issues/299) - checkContext = null; - return ret; - } ]; - - for ( ; i < len; i++ ) { - if ( ( matcher = Expr.relative[ tokens[ i ].type ] ) ) { - matchers = [ addCombinator( elementMatcher( matchers ), matcher ) ]; - } else { - matcher = Expr.filter[ tokens[ i ].type ].apply( null, tokens[ i ].matches ); - - // Return special upon seeing a positional matcher - if ( matcher[ expando ] ) { - - // Find the next relative operator (if any) for proper handling - j = ++i; - for ( ; j < len; j++ ) { - if ( Expr.relative[ tokens[ j ].type ] ) { - break; - } - } - return setMatcher( - i > 1 && elementMatcher( matchers ), - i > 1 && toSelector( - - // If the preceding token was a descendant combinator, insert an implicit any-element `*` - tokens.slice( 0, i - 1 ) - .concat( { value: tokens[ i - 2 ].type === " " ? "*" : "" } ) - ).replace( rtrimCSS, "$1" ), - matcher, - i < j && matcherFromTokens( tokens.slice( i, j ) ), - j < len && matcherFromTokens( ( tokens = tokens.slice( j ) ) ), - j < len && toSelector( tokens ) - ); - } - matchers.push( matcher ); - } - } - - return elementMatcher( matchers ); -} - -function matcherFromGroupMatchers( elementMatchers, setMatchers ) { - var bySet = setMatchers.length > 0, - byElement = elementMatchers.length > 0, - superMatcher = function( seed, context, xml, results, outermost ) { - var elem, j, matcher, - matchedCount = 0, - i = "0", - unmatched = seed && [], - setMatched = [], - contextBackup = outermostContext, - - // We must always have either seed elements or outermost context - elems = seed || byElement && Expr.find.TAG( "*", outermost ), - - // Use integer dirruns iff this is the outermost matcher - dirrunsUnique = ( dirruns += contextBackup == null ? 1 : Math.random() || 0.1 ), - len = elems.length; - - if ( outermost ) { - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - outermostContext = context == document || context || outermost; - } - - // Add elements passing elementMatchers directly to results - // Support: iOS <=7 - 9 only - // Tolerate NodeList properties (IE: "length"; Safari: ) matching - // elements by id. (see trac-14142) - for ( ; i !== len && ( elem = elems[ i ] ) != null; i++ ) { - if ( byElement && elem ) { - j = 0; - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( !context && elem.ownerDocument != document ) { - setDocument( elem ); - xml = !documentIsHTML; - } - while ( ( matcher = elementMatchers[ j++ ] ) ) { - if ( matcher( elem, context || document, xml ) ) { - push.call( results, elem ); - break; - } - } - if ( outermost ) { - dirruns = dirrunsUnique; - } - } - - // Track unmatched elements for set filters - if ( bySet ) { - - // They will have gone through all possible matchers - if ( ( elem = !matcher && elem ) ) { - matchedCount--; - } - - // Lengthen the array for every element, matched or not - if ( seed ) { - unmatched.push( elem ); - } - } - } - - // `i` is now the count of elements visited above, and adding it to `matchedCount` - // makes the latter nonnegative. - matchedCount += i; - - // Apply set filters to unmatched elements - // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` - // equals `i`), unless we didn't visit _any_ elements in the above loop because we have - // no element matchers and no seed. - // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that - // case, which will result in a "00" `matchedCount` that differs from `i` but is also - // numerically zero. - if ( bySet && i !== matchedCount ) { - j = 0; - while ( ( matcher = setMatchers[ j++ ] ) ) { - matcher( unmatched, setMatched, context, xml ); - } - - if ( seed ) { - - // Reintegrate element matches to eliminate the need for sorting - if ( matchedCount > 0 ) { - while ( i-- ) { - if ( !( unmatched[ i ] || setMatched[ i ] ) ) { - setMatched[ i ] = pop.call( results ); - } - } - } - - // Discard index placeholder values to get only actual matches - setMatched = condense( setMatched ); - } - - // Add matches to results - push.apply( results, setMatched ); - - // Seedless set matches succeeding multiple successful matchers stipulate sorting - if ( outermost && !seed && setMatched.length > 0 && - ( matchedCount + setMatchers.length ) > 1 ) { - - jQuery.uniqueSort( results ); - } - } - - // Override manipulation of globals by nested matchers - if ( outermost ) { - dirruns = dirrunsUnique; - outermostContext = contextBackup; - } - - return unmatched; - }; - - return bySet ? - markFunction( superMatcher ) : - superMatcher; -} - -function compile( selector, match /* Internal Use Only */ ) { - var i, - setMatchers = [], - elementMatchers = [], - cached = compilerCache[ selector + " " ]; - - if ( !cached ) { - - // Generate a function of recursive functions that can be used to check each element - if ( !match ) { - match = tokenize( selector ); - } - i = match.length; - while ( i-- ) { - cached = matcherFromTokens( match[ i ] ); - if ( cached[ expando ] ) { - setMatchers.push( cached ); - } else { - elementMatchers.push( cached ); - } - } - - // Cache the compiled function - cached = compilerCache( selector, - matcherFromGroupMatchers( elementMatchers, setMatchers ) ); - - // Save selector and tokenization - cached.selector = selector; - } - return cached; -} - -/** - * A low-level selection function that works with jQuery's compiled - * selector functions - * @param {String|Function} selector A selector or a pre-compiled - * selector function built with jQuery selector compile - * @param {Element} context - * @param {Array} [results] - * @param {Array} [seed] A set of elements to match against - */ -function select( selector, context, results, seed ) { - var i, tokens, token, type, find, - compiled = typeof selector === "function" && selector, - match = !seed && tokenize( ( selector = compiled.selector || selector ) ); - - results = results || []; - - // Try to minimize operations if there is only one selector in the list and no seed - // (the latter of which guarantees us context) - if ( match.length === 1 ) { - - // Reduce context if the leading compound selector is an ID - tokens = match[ 0 ] = match[ 0 ].slice( 0 ); - if ( tokens.length > 2 && ( token = tokens[ 0 ] ).type === "ID" && - context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[ 1 ].type ] ) { - - context = ( Expr.find.ID( - token.matches[ 0 ].replace( runescape, funescape ), - context - ) || [] )[ 0 ]; - if ( !context ) { - return results; - - // Precompiled matchers will still verify ancestry, so step up a level - } else if ( compiled ) { - context = context.parentNode; - } - - selector = selector.slice( tokens.shift().value.length ); - } - - // Fetch a seed set for right-to-left matching - i = matchExpr.needsContext.test( selector ) ? 0 : tokens.length; - while ( i-- ) { - token = tokens[ i ]; - - // Abort if we hit a combinator - if ( Expr.relative[ ( type = token.type ) ] ) { - break; - } - if ( ( find = Expr.find[ type ] ) ) { - - // Search, expanding context for leading sibling combinators - if ( ( seed = find( - token.matches[ 0 ].replace( runescape, funescape ), - rsibling.test( tokens[ 0 ].type ) && - testContext( context.parentNode ) || context - ) ) ) { - - // If seed is empty or no tokens remain, we can return early - tokens.splice( i, 1 ); - selector = seed.length && toSelector( tokens ); - if ( !selector ) { - push.apply( results, seed ); - return results; - } - - break; - } - } - } - } - - // Compile and execute a filtering function if one is not provided - // Provide `match` to avoid retokenization if we modified the selector above - ( compiled || compile( selector, match ) )( - seed, - context, - !documentIsHTML, - results, - !context || rsibling.test( selector ) && testContext( context.parentNode ) || context - ); - return results; -} - -// One-time assignments - -// Support: Android <=4.0 - 4.1+ -// Sort stability -support.sortStable = expando.split( "" ).sort( sortOrder ).join( "" ) === expando; - -// Initialize against the default document -setDocument(); - -// Support: Android <=4.0 - 4.1+ -// Detached nodes confoundingly follow *each other* -support.sortDetached = assert( function( el ) { - - // Should return 1, but returns 4 (following) - return el.compareDocumentPosition( document.createElement( "fieldset" ) ) & 1; -} ); - -jQuery.find = find; - -// Deprecated -jQuery.expr[ ":" ] = jQuery.expr.pseudos; -jQuery.unique = jQuery.uniqueSort; - -// These have always been private, but they used to be documented as part of -// Sizzle so let's maintain them for now for backwards compatibility purposes. -find.compile = compile; -find.select = select; -find.setDocument = setDocument; -find.tokenize = tokenize; - -find.escape = jQuery.escapeSelector; -find.getText = jQuery.text; -find.isXML = jQuery.isXMLDoc; -find.selectors = jQuery.expr; -find.support = jQuery.support; -find.uniqueSort = jQuery.uniqueSort; - - /* eslint-enable */ - -} )(); - - -var dir = function( elem, dir, until ) { - var matched = [], - truncate = until !== undefined; - - while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { - if ( elem.nodeType === 1 ) { - if ( truncate && jQuery( elem ).is( until ) ) { - break; - } - matched.push( elem ); - } - } - return matched; -}; - - -var siblings = function( n, elem ) { - var matched = []; - - for ( ; n; n = n.nextSibling ) { - if ( n.nodeType === 1 && n !== elem ) { - matched.push( n ); - } - } - - return matched; -}; - - -var rneedsContext = jQuery.expr.match.needsContext; - -var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); - - - -// Implement the identical functionality for filter and not -function winnow( elements, qualifier, not ) { - if ( isFunction( qualifier ) ) { - return jQuery.grep( elements, function( elem, i ) { - return !!qualifier.call( elem, i, elem ) !== not; - } ); - } - - // Single element - if ( qualifier.nodeType ) { - return jQuery.grep( elements, function( elem ) { - return ( elem === qualifier ) !== not; - } ); - } - - // Arraylike of elements (jQuery, arguments, Array) - if ( typeof qualifier !== "string" ) { - return jQuery.grep( elements, function( elem ) { - return ( indexOf.call( qualifier, elem ) > -1 ) !== not; - } ); - } - - // Filtered directly for both simple and complex selectors - return jQuery.filter( qualifier, elements, not ); -} - -jQuery.filter = function( expr, elems, not ) { - var elem = elems[ 0 ]; - - if ( not ) { - expr = ":not(" + expr + ")"; - } - - if ( elems.length === 1 && elem.nodeType === 1 ) { - return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; - } - - return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { - return elem.nodeType === 1; - } ) ); -}; - -jQuery.fn.extend( { - find: function( selector ) { - var i, ret, - len = this.length, - self = this; - - if ( typeof selector !== "string" ) { - return this.pushStack( jQuery( selector ).filter( function() { - for ( i = 0; i < len; i++ ) { - if ( jQuery.contains( self[ i ], this ) ) { - return true; - } - } - } ) ); - } - - ret = this.pushStack( [] ); - - for ( i = 0; i < len; i++ ) { - jQuery.find( selector, self[ i ], ret ); - } - - return len > 1 ? jQuery.uniqueSort( ret ) : ret; - }, - filter: function( selector ) { - return this.pushStack( winnow( this, selector || [], false ) ); - }, - not: function( selector ) { - return this.pushStack( winnow( this, selector || [], true ) ); - }, - is: function( selector ) { - return !!winnow( - this, - - // If this is a positional/relative selector, check membership in the returned set - // so $("p:first").is("p:last") won't return true for a doc with two "p". - typeof selector === "string" && rneedsContext.test( selector ) ? - jQuery( selector ) : - selector || [], - false - ).length; - } -} ); - - -// Initialize a jQuery object - - -// A central reference to the root jQuery(document) -var rootjQuery, - - // A simple way to check for HTML strings - // Prioritize #id over to avoid XSS via location.hash (trac-9521) - // Strict HTML recognition (trac-11290: must start with <) - // Shortcut simple #id case for speed - rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, - - init = jQuery.fn.init = function( selector, context, root ) { - var match, elem; - - // HANDLE: $(""), $(null), $(undefined), $(false) - if ( !selector ) { - return this; - } - - // Method init() accepts an alternate rootjQuery - // so migrate can support jQuery.sub (gh-2101) - root = root || rootjQuery; - - // Handle HTML strings - if ( typeof selector === "string" ) { - if ( selector[ 0 ] === "<" && - selector[ selector.length - 1 ] === ">" && - selector.length >= 3 ) { - - // Assume that strings that start and end with <> are HTML and skip the regex check - match = [ null, selector, null ]; - - } else { - match = rquickExpr.exec( selector ); - } - - // Match html or make sure no context is specified for #id - if ( match && ( match[ 1 ] || !context ) ) { - - // HANDLE: $(html) -> $(array) - if ( match[ 1 ] ) { - context = context instanceof jQuery ? context[ 0 ] : context; - - // Option to run scripts is true for back-compat - // Intentionally let the error be thrown if parseHTML is not present - jQuery.merge( this, jQuery.parseHTML( - match[ 1 ], - context && context.nodeType ? context.ownerDocument || context : document, - true - ) ); - - // HANDLE: $(html, props) - if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { - for ( match in context ) { - - // Properties of context are called as methods if possible - if ( isFunction( this[ match ] ) ) { - this[ match ]( context[ match ] ); - - // ...and otherwise set as attributes - } else { - this.attr( match, context[ match ] ); - } - } - } - - return this; - - // HANDLE: $(#id) - } else { - elem = document.getElementById( match[ 2 ] ); - - if ( elem ) { - - // Inject the element directly into the jQuery object - this[ 0 ] = elem; - this.length = 1; - } - return this; - } - - // HANDLE: $(expr, $(...)) - } else if ( !context || context.jquery ) { - return ( context || root ).find( selector ); - - // HANDLE: $(expr, context) - // (which is just equivalent to: $(context).find(expr) - } else { - return this.constructor( context ).find( selector ); - } - - // HANDLE: $(DOMElement) - } else if ( selector.nodeType ) { - this[ 0 ] = selector; - this.length = 1; - return this; - - // HANDLE: $(function) - // Shortcut for document ready - } else if ( isFunction( selector ) ) { - return root.ready !== undefined ? - root.ready( selector ) : - - // Execute immediately if ready is not present - selector( jQuery ); - } - - return jQuery.makeArray( selector, this ); - }; - -// Give the init function the jQuery prototype for later instantiation -init.prototype = jQuery.fn; - -// Initialize central reference -rootjQuery = jQuery( document ); - - -var rparentsprev = /^(?:parents|prev(?:Until|All))/, - - // Methods guaranteed to produce a unique set when starting from a unique set - guaranteedUnique = { - children: true, - contents: true, - next: true, - prev: true - }; - -jQuery.fn.extend( { - has: function( target ) { - var targets = jQuery( target, this ), - l = targets.length; - - return this.filter( function() { - var i = 0; - for ( ; i < l; i++ ) { - if ( jQuery.contains( this, targets[ i ] ) ) { - return true; - } - } - } ); - }, - - closest: function( selectors, context ) { - var cur, - i = 0, - l = this.length, - matched = [], - targets = typeof selectors !== "string" && jQuery( selectors ); - - // Positional selectors never match, since there's no _selection_ context - if ( !rneedsContext.test( selectors ) ) { - for ( ; i < l; i++ ) { - for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { - - // Always skip document fragments - if ( cur.nodeType < 11 && ( targets ? - targets.index( cur ) > -1 : - - // Don't pass non-elements to jQuery#find - cur.nodeType === 1 && - jQuery.find.matchesSelector( cur, selectors ) ) ) { - - matched.push( cur ); - break; - } - } - } - } - - return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); - }, - - // Determine the position of an element within the set - index: function( elem ) { - - // No argument, return index in parent - if ( !elem ) { - return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; - } - - // Index in selector - if ( typeof elem === "string" ) { - return indexOf.call( jQuery( elem ), this[ 0 ] ); - } - - // Locate the position of the desired element - return indexOf.call( this, - - // If it receives a jQuery object, the first element is used - elem.jquery ? elem[ 0 ] : elem - ); - }, - - add: function( selector, context ) { - return this.pushStack( - jQuery.uniqueSort( - jQuery.merge( this.get(), jQuery( selector, context ) ) - ) - ); - }, - - addBack: function( selector ) { - return this.add( selector == null ? - this.prevObject : this.prevObject.filter( selector ) - ); - } -} ); - -function sibling( cur, dir ) { - while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} - return cur; -} - -jQuery.each( { - parent: function( elem ) { - var parent = elem.parentNode; - return parent && parent.nodeType !== 11 ? parent : null; - }, - parents: function( elem ) { - return dir( elem, "parentNode" ); - }, - parentsUntil: function( elem, _i, until ) { - return dir( elem, "parentNode", until ); - }, - next: function( elem ) { - return sibling( elem, "nextSibling" ); - }, - prev: function( elem ) { - return sibling( elem, "previousSibling" ); - }, - nextAll: function( elem ) { - return dir( elem, "nextSibling" ); - }, - prevAll: function( elem ) { - return dir( elem, "previousSibling" ); - }, - nextUntil: function( elem, _i, until ) { - return dir( elem, "nextSibling", until ); - }, - prevUntil: function( elem, _i, until ) { - return dir( elem, "previousSibling", until ); - }, - siblings: function( elem ) { - return siblings( ( elem.parentNode || {} ).firstChild, elem ); - }, - children: function( elem ) { - return siblings( elem.firstChild ); - }, - contents: function( elem ) { - if ( elem.contentDocument != null && - - // Support: IE 11+ - // elements with no `data` attribute has an object - // `contentDocument` with a `null` prototype. - getProto( elem.contentDocument ) ) { - - return elem.contentDocument; - } - - // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only - // Treat the template element as a regular one in browsers that - // don't support it. - if ( nodeName( elem, "template" ) ) { - elem = elem.content || elem; - } - - return jQuery.merge( [], elem.childNodes ); - } -}, function( name, fn ) { - jQuery.fn[ name ] = function( until, selector ) { - var matched = jQuery.map( this, fn, until ); - - if ( name.slice( -5 ) !== "Until" ) { - selector = until; - } - - if ( selector && typeof selector === "string" ) { - matched = jQuery.filter( selector, matched ); - } - - if ( this.length > 1 ) { - - // Remove duplicates - if ( !guaranteedUnique[ name ] ) { - jQuery.uniqueSort( matched ); - } - - // Reverse order for parents* and prev-derivatives - if ( rparentsprev.test( name ) ) { - matched.reverse(); - } - } - - return this.pushStack( matched ); - }; -} ); -var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); - - - -// Convert String-formatted options into Object-formatted ones -function createOptions( options ) { - var object = {}; - jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { - object[ flag ] = true; - } ); - return object; -} - -/* - * Create a callback list using the following parameters: - * - * options: an optional list of space-separated options that will change how - * the callback list behaves or a more traditional option object - * - * By default a callback list will act like an event callback list and can be - * "fired" multiple times. - * - * Possible options: - * - * once: will ensure the callback list can only be fired once (like a Deferred) - * - * memory: will keep track of previous values and will call any callback added - * after the list has been fired right away with the latest "memorized" - * values (like a Deferred) - * - * unique: will ensure a callback can only be added once (no duplicate in the list) - * - * stopOnFalse: interrupt callings when a callback returns false - * - */ -jQuery.Callbacks = function( options ) { - - // Convert options from String-formatted to Object-formatted if needed - // (we check in cache first) - options = typeof options === "string" ? - createOptions( options ) : - jQuery.extend( {}, options ); - - var // Flag to know if list is currently firing - firing, - - // Last fire value for non-forgettable lists - memory, - - // Flag to know if list was already fired - fired, - - // Flag to prevent firing - locked, - - // Actual callback list - list = [], - - // Queue of execution data for repeatable lists - queue = [], - - // Index of currently firing callback (modified by add/remove as needed) - firingIndex = -1, - - // Fire callbacks - fire = function() { - - // Enforce single-firing - locked = locked || options.once; - - // Execute callbacks for all pending executions, - // respecting firingIndex overrides and runtime changes - fired = firing = true; - for ( ; queue.length; firingIndex = -1 ) { - memory = queue.shift(); - while ( ++firingIndex < list.length ) { - - // Run callback and check for early termination - if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && - options.stopOnFalse ) { - - // Jump to end and forget the data so .add doesn't re-fire - firingIndex = list.length; - memory = false; - } - } - } - - // Forget the data if we're done with it - if ( !options.memory ) { - memory = false; - } - - firing = false; - - // Clean up if we're done firing for good - if ( locked ) { - - // Keep an empty list if we have data for future add calls - if ( memory ) { - list = []; - - // Otherwise, this object is spent - } else { - list = ""; - } - } - }, - - // Actual Callbacks object - self = { - - // Add a callback or a collection of callbacks to the list - add: function() { - if ( list ) { - - // If we have memory from a past run, we should fire after adding - if ( memory && !firing ) { - firingIndex = list.length - 1; - queue.push( memory ); - } - - ( function add( args ) { - jQuery.each( args, function( _, arg ) { - if ( isFunction( arg ) ) { - if ( !options.unique || !self.has( arg ) ) { - list.push( arg ); - } - } else if ( arg && arg.length && toType( arg ) !== "string" ) { - - // Inspect recursively - add( arg ); - } - } ); - } )( arguments ); - - if ( memory && !firing ) { - fire(); - } - } - return this; - }, - - // Remove a callback from the list - remove: function() { - jQuery.each( arguments, function( _, arg ) { - var index; - while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { - list.splice( index, 1 ); - - // Handle firing indexes - if ( index <= firingIndex ) { - firingIndex--; - } - } - } ); - return this; - }, - - // Check if a given callback is in the list. - // If no argument is given, return whether or not list has callbacks attached. - has: function( fn ) { - return fn ? - jQuery.inArray( fn, list ) > -1 : - list.length > 0; - }, - - // Remove all callbacks from the list - empty: function() { - if ( list ) { - list = []; - } - return this; - }, - - // Disable .fire and .add - // Abort any current/pending executions - // Clear all callbacks and values - disable: function() { - locked = queue = []; - list = memory = ""; - return this; - }, - disabled: function() { - return !list; - }, - - // Disable .fire - // Also disable .add unless we have memory (since it would have no effect) - // Abort any pending executions - lock: function() { - locked = queue = []; - if ( !memory && !firing ) { - list = memory = ""; - } - return this; - }, - locked: function() { - return !!locked; - }, - - // Call all callbacks with the given context and arguments - fireWith: function( context, args ) { - if ( !locked ) { - args = args || []; - args = [ context, args.slice ? args.slice() : args ]; - queue.push( args ); - if ( !firing ) { - fire(); - } - } - return this; - }, - - // Call all the callbacks with the given arguments - fire: function() { - self.fireWith( this, arguments ); - return this; - }, - - // To know if the callbacks have already been called at least once - fired: function() { - return !!fired; - } - }; - - return self; -}; - - -function Identity( v ) { - return v; -} -function Thrower( ex ) { - throw ex; -} - -function adoptValue( value, resolve, reject, noValue ) { - var method; - - try { - - // Check for promise aspect first to privilege synchronous behavior - if ( value && isFunction( ( method = value.promise ) ) ) { - method.call( value ).done( resolve ).fail( reject ); - - // Other thenables - } else if ( value && isFunction( ( method = value.then ) ) ) { - method.call( value, resolve, reject ); - - // Other non-thenables - } else { - - // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: - // * false: [ value ].slice( 0 ) => resolve( value ) - // * true: [ value ].slice( 1 ) => resolve() - resolve.apply( undefined, [ value ].slice( noValue ) ); - } - - // For Promises/A+, convert exceptions into rejections - // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in - // Deferred#then to conditionally suppress rejection. - } catch ( value ) { - - // Support: Android 4.0 only - // Strict mode functions invoked without .call/.apply get global-object context - reject.apply( undefined, [ value ] ); - } -} - -jQuery.extend( { - - Deferred: function( func ) { - var tuples = [ - - // action, add listener, callbacks, - // ... .then handlers, argument index, [final state] - [ "notify", "progress", jQuery.Callbacks( "memory" ), - jQuery.Callbacks( "memory" ), 2 ], - [ "resolve", "done", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 0, "resolved" ], - [ "reject", "fail", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 1, "rejected" ] - ], - state = "pending", - promise = { - state: function() { - return state; - }, - always: function() { - deferred.done( arguments ).fail( arguments ); - return this; - }, - "catch": function( fn ) { - return promise.then( null, fn ); - }, - - // Keep pipe for back-compat - pipe: function( /* fnDone, fnFail, fnProgress */ ) { - var fns = arguments; - - return jQuery.Deferred( function( newDefer ) { - jQuery.each( tuples, function( _i, tuple ) { - - // Map tuples (progress, done, fail) to arguments (done, fail, progress) - var fn = isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; - - // deferred.progress(function() { bind to newDefer or newDefer.notify }) - // deferred.done(function() { bind to newDefer or newDefer.resolve }) - // deferred.fail(function() { bind to newDefer or newDefer.reject }) - deferred[ tuple[ 1 ] ]( function() { - var returned = fn && fn.apply( this, arguments ); - if ( returned && isFunction( returned.promise ) ) { - returned.promise() - .progress( newDefer.notify ) - .done( newDefer.resolve ) - .fail( newDefer.reject ); - } else { - newDefer[ tuple[ 0 ] + "With" ]( - this, - fn ? [ returned ] : arguments - ); - } - } ); - } ); - fns = null; - } ).promise(); - }, - then: function( onFulfilled, onRejected, onProgress ) { - var maxDepth = 0; - function resolve( depth, deferred, handler, special ) { - return function() { - var that = this, - args = arguments, - mightThrow = function() { - var returned, then; - - // Support: Promises/A+ section 2.3.3.3.3 - // https://promisesaplus.com/#point-59 - // Ignore double-resolution attempts - if ( depth < maxDepth ) { - return; - } - - returned = handler.apply( that, args ); - - // Support: Promises/A+ section 2.3.1 - // https://promisesaplus.com/#point-48 - if ( returned === deferred.promise() ) { - throw new TypeError( "Thenable self-resolution" ); - } - - // Support: Promises/A+ sections 2.3.3.1, 3.5 - // https://promisesaplus.com/#point-54 - // https://promisesaplus.com/#point-75 - // Retrieve `then` only once - then = returned && - - // Support: Promises/A+ section 2.3.4 - // https://promisesaplus.com/#point-64 - // Only check objects and functions for thenability - ( typeof returned === "object" || - typeof returned === "function" ) && - returned.then; - - // Handle a returned thenable - if ( isFunction( then ) ) { - - // Special processors (notify) just wait for resolution - if ( special ) { - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ) - ); - - // Normal processors (resolve) also hook into progress - } else { - - // ...and disregard older resolution values - maxDepth++; - - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ), - resolve( maxDepth, deferred, Identity, - deferred.notifyWith ) - ); - } - - // Handle all other returned values - } else { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Identity ) { - that = undefined; - args = [ returned ]; - } - - // Process the value(s) - // Default process is resolve - ( special || deferred.resolveWith )( that, args ); - } - }, - - // Only normal processors (resolve) catch and reject exceptions - process = special ? - mightThrow : - function() { - try { - mightThrow(); - } catch ( e ) { - - if ( jQuery.Deferred.exceptionHook ) { - jQuery.Deferred.exceptionHook( e, - process.error ); - } - - // Support: Promises/A+ section 2.3.3.3.4.1 - // https://promisesaplus.com/#point-61 - // Ignore post-resolution exceptions - if ( depth + 1 >= maxDepth ) { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Thrower ) { - that = undefined; - args = [ e ]; - } - - deferred.rejectWith( that, args ); - } - } - }; - - // Support: Promises/A+ section 2.3.3.3.1 - // https://promisesaplus.com/#point-57 - // Re-resolve promises immediately to dodge false rejection from - // subsequent errors - if ( depth ) { - process(); - } else { - - // Call an optional hook to record the error, in case of exception - // since it's otherwise lost when execution goes async - if ( jQuery.Deferred.getErrorHook ) { - process.error = jQuery.Deferred.getErrorHook(); - - // The deprecated alias of the above. While the name suggests - // returning the stack, not an error instance, jQuery just passes - // it directly to `console.warn` so both will work; an instance - // just better cooperates with source maps. - } else if ( jQuery.Deferred.getStackHook ) { - process.error = jQuery.Deferred.getStackHook(); - } - window.setTimeout( process ); - } - }; - } - - return jQuery.Deferred( function( newDefer ) { - - // progress_handlers.add( ... ) - tuples[ 0 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onProgress ) ? - onProgress : - Identity, - newDefer.notifyWith - ) - ); - - // fulfilled_handlers.add( ... ) - tuples[ 1 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onFulfilled ) ? - onFulfilled : - Identity - ) - ); - - // rejected_handlers.add( ... ) - tuples[ 2 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onRejected ) ? - onRejected : - Thrower - ) - ); - } ).promise(); - }, - - // Get a promise for this deferred - // If obj is provided, the promise aspect is added to the object - promise: function( obj ) { - return obj != null ? jQuery.extend( obj, promise ) : promise; - } - }, - deferred = {}; - - // Add list-specific methods - jQuery.each( tuples, function( i, tuple ) { - var list = tuple[ 2 ], - stateString = tuple[ 5 ]; - - // promise.progress = list.add - // promise.done = list.add - // promise.fail = list.add - promise[ tuple[ 1 ] ] = list.add; - - // Handle state - if ( stateString ) { - list.add( - function() { - - // state = "resolved" (i.e., fulfilled) - // state = "rejected" - state = stateString; - }, - - // rejected_callbacks.disable - // fulfilled_callbacks.disable - tuples[ 3 - i ][ 2 ].disable, - - // rejected_handlers.disable - // fulfilled_handlers.disable - tuples[ 3 - i ][ 3 ].disable, - - // progress_callbacks.lock - tuples[ 0 ][ 2 ].lock, - - // progress_handlers.lock - tuples[ 0 ][ 3 ].lock - ); - } - - // progress_handlers.fire - // fulfilled_handlers.fire - // rejected_handlers.fire - list.add( tuple[ 3 ].fire ); - - // deferred.notify = function() { deferred.notifyWith(...) } - // deferred.resolve = function() { deferred.resolveWith(...) } - // deferred.reject = function() { deferred.rejectWith(...) } - deferred[ tuple[ 0 ] ] = function() { - deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); - return this; - }; - - // deferred.notifyWith = list.fireWith - // deferred.resolveWith = list.fireWith - // deferred.rejectWith = list.fireWith - deferred[ tuple[ 0 ] + "With" ] = list.fireWith; - } ); - - // Make the deferred a promise - promise.promise( deferred ); - - // Call given func if any - if ( func ) { - func.call( deferred, deferred ); - } - - // All done! - return deferred; - }, - - // Deferred helper - when: function( singleValue ) { - var - - // count of uncompleted subordinates - remaining = arguments.length, - - // count of unprocessed arguments - i = remaining, - - // subordinate fulfillment data - resolveContexts = Array( i ), - resolveValues = slice.call( arguments ), - - // the primary Deferred - primary = jQuery.Deferred(), - - // subordinate callback factory - updateFunc = function( i ) { - return function( value ) { - resolveContexts[ i ] = this; - resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; - if ( !( --remaining ) ) { - primary.resolveWith( resolveContexts, resolveValues ); - } - }; - }; - - // Single- and empty arguments are adopted like Promise.resolve - if ( remaining <= 1 ) { - adoptValue( singleValue, primary.done( updateFunc( i ) ).resolve, primary.reject, - !remaining ); - - // Use .then() to unwrap secondary thenables (cf. gh-3000) - if ( primary.state() === "pending" || - isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { - - return primary.then(); - } - } - - // Multiple arguments are aggregated like Promise.all array elements - while ( i-- ) { - adoptValue( resolveValues[ i ], updateFunc( i ), primary.reject ); - } - - return primary.promise(); - } -} ); - - -// These usually indicate a programmer mistake during development, -// warn about them ASAP rather than swallowing them by default. -var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; - -// If `jQuery.Deferred.getErrorHook` is defined, `asyncError` is an error -// captured before the async barrier to get the original error cause -// which may otherwise be hidden. -jQuery.Deferred.exceptionHook = function( error, asyncError ) { - - // Support: IE 8 - 9 only - // Console exists when dev tools are open, which can happen at any time - if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { - window.console.warn( "jQuery.Deferred exception: " + error.message, - error.stack, asyncError ); - } -}; - - - - -jQuery.readyException = function( error ) { - window.setTimeout( function() { - throw error; - } ); -}; - - - - -// The deferred used on DOM ready -var readyList = jQuery.Deferred(); - -jQuery.fn.ready = function( fn ) { - - readyList - .then( fn ) - - // Wrap jQuery.readyException in a function so that the lookup - // happens at the time of error handling instead of callback - // registration. - .catch( function( error ) { - jQuery.readyException( error ); - } ); - - return this; -}; - -jQuery.extend( { - - // Is the DOM ready to be used? Set to true once it occurs. - isReady: false, - - // A counter to track how many items to wait for before - // the ready event fires. See trac-6781 - readyWait: 1, - - // Handle when the DOM is ready - ready: function( wait ) { - - // Abort if there are pending holds or we're already ready - if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { - return; - } - - // Remember that the DOM is ready - jQuery.isReady = true; - - // If a normal DOM Ready event fired, decrement, and wait if need be - if ( wait !== true && --jQuery.readyWait > 0 ) { - return; - } - - // If there are functions bound, to execute - readyList.resolveWith( document, [ jQuery ] ); - } -} ); - -jQuery.ready.then = readyList.then; - -// The ready event handler and self cleanup method -function completed() { - document.removeEventListener( "DOMContentLoaded", completed ); - window.removeEventListener( "load", completed ); - jQuery.ready(); -} - -// Catch cases where $(document).ready() is called -// after the browser event has already occurred. -// Support: IE <=9 - 10 only -// Older IE sometimes signals "interactive" too soon -if ( document.readyState === "complete" || - ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { - - // Handle it asynchronously to allow scripts the opportunity to delay ready - window.setTimeout( jQuery.ready ); - -} else { - - // Use the handy event callback - document.addEventListener( "DOMContentLoaded", completed ); - - // A fallback to window.onload, that will always work - window.addEventListener( "load", completed ); -} - - - - -// Multifunctional method to get and set values of a collection -// The value/s can optionally be executed if it's a function -var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { - var i = 0, - len = elems.length, - bulk = key == null; - - // Sets many values - if ( toType( key ) === "object" ) { - chainable = true; - for ( i in key ) { - access( elems, fn, i, key[ i ], true, emptyGet, raw ); - } - - // Sets one value - } else if ( value !== undefined ) { - chainable = true; - - if ( !isFunction( value ) ) { - raw = true; - } - - if ( bulk ) { - - // Bulk operations run against the entire set - if ( raw ) { - fn.call( elems, value ); - fn = null; - - // ...except when executing function values - } else { - bulk = fn; - fn = function( elem, _key, value ) { - return bulk.call( jQuery( elem ), value ); - }; - } - } - - if ( fn ) { - for ( ; i < len; i++ ) { - fn( - elems[ i ], key, raw ? - value : - value.call( elems[ i ], i, fn( elems[ i ], key ) ) - ); - } - } - } - - if ( chainable ) { - return elems; - } - - // Gets - if ( bulk ) { - return fn.call( elems ); - } - - return len ? fn( elems[ 0 ], key ) : emptyGet; -}; - - -// Matches dashed string for camelizing -var rmsPrefix = /^-ms-/, - rdashAlpha = /-([a-z])/g; - -// Used by camelCase as callback to replace() -function fcamelCase( _all, letter ) { - return letter.toUpperCase(); -} - -// Convert dashed to camelCase; used by the css and data modules -// Support: IE <=9 - 11, Edge 12 - 15 -// Microsoft forgot to hump their vendor prefix (trac-9572) -function camelCase( string ) { - return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); -} -var acceptData = function( owner ) { - - // Accepts only: - // - Node - // - Node.ELEMENT_NODE - // - Node.DOCUMENT_NODE - // - Object - // - Any - return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); -}; - - - - -function Data() { - this.expando = jQuery.expando + Data.uid++; -} - -Data.uid = 1; - -Data.prototype = { - - cache: function( owner ) { - - // Check if the owner object already has a cache - var value = owner[ this.expando ]; - - // If not, create one - if ( !value ) { - value = {}; - - // We can accept data for non-element nodes in modern browsers, - // but we should not, see trac-8335. - // Always return an empty object. - if ( acceptData( owner ) ) { - - // If it is a node unlikely to be stringify-ed or looped over - // use plain assignment - if ( owner.nodeType ) { - owner[ this.expando ] = value; - - // Otherwise secure it in a non-enumerable property - // configurable must be true to allow the property to be - // deleted when data is removed - } else { - Object.defineProperty( owner, this.expando, { - value: value, - configurable: true - } ); - } - } - } - - return value; - }, - set: function( owner, data, value ) { - var prop, - cache = this.cache( owner ); - - // Handle: [ owner, key, value ] args - // Always use camelCase key (gh-2257) - if ( typeof data === "string" ) { - cache[ camelCase( data ) ] = value; - - // Handle: [ owner, { properties } ] args - } else { - - // Copy the properties one-by-one to the cache object - for ( prop in data ) { - cache[ camelCase( prop ) ] = data[ prop ]; - } - } - return cache; - }, - get: function( owner, key ) { - return key === undefined ? - this.cache( owner ) : - - // Always use camelCase key (gh-2257) - owner[ this.expando ] && owner[ this.expando ][ camelCase( key ) ]; - }, - access: function( owner, key, value ) { - - // In cases where either: - // - // 1. No key was specified - // 2. A string key was specified, but no value provided - // - // Take the "read" path and allow the get method to determine - // which value to return, respectively either: - // - // 1. The entire cache object - // 2. The data stored at the key - // - if ( key === undefined || - ( ( key && typeof key === "string" ) && value === undefined ) ) { - - return this.get( owner, key ); - } - - // When the key is not a string, or both a key and value - // are specified, set or extend (existing objects) with either: - // - // 1. An object of properties - // 2. A key and value - // - this.set( owner, key, value ); - - // Since the "set" path can have two possible entry points - // return the expected data based on which path was taken[*] - return value !== undefined ? value : key; - }, - remove: function( owner, key ) { - var i, - cache = owner[ this.expando ]; - - if ( cache === undefined ) { - return; - } - - if ( key !== undefined ) { - - // Support array or space separated string of keys - if ( Array.isArray( key ) ) { - - // If key is an array of keys... - // We always set camelCase keys, so remove that. - key = key.map( camelCase ); - } else { - key = camelCase( key ); - - // If a key with the spaces exists, use it. - // Otherwise, create an array by matching non-whitespace - key = key in cache ? - [ key ] : - ( key.match( rnothtmlwhite ) || [] ); - } - - i = key.length; - - while ( i-- ) { - delete cache[ key[ i ] ]; - } - } - - // Remove the expando if there's no more data - if ( key === undefined || jQuery.isEmptyObject( cache ) ) { - - // Support: Chrome <=35 - 45 - // Webkit & Blink performance suffers when deleting properties - // from DOM nodes, so set to undefined instead - // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) - if ( owner.nodeType ) { - owner[ this.expando ] = undefined; - } else { - delete owner[ this.expando ]; - } - } - }, - hasData: function( owner ) { - var cache = owner[ this.expando ]; - return cache !== undefined && !jQuery.isEmptyObject( cache ); - } -}; -var dataPriv = new Data(); - -var dataUser = new Data(); - - - -// Implementation Summary -// -// 1. Enforce API surface and semantic compatibility with 1.9.x branch -// 2. Improve the module's maintainability by reducing the storage -// paths to a single mechanism. -// 3. Use the same single mechanism to support "private" and "user" data. -// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) -// 5. Avoid exposing implementation details on user objects (eg. expando properties) -// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 - -var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, - rmultiDash = /[A-Z]/g; - -function getData( data ) { - if ( data === "true" ) { - return true; - } - - if ( data === "false" ) { - return false; - } - - if ( data === "null" ) { - return null; - } - - // Only convert to a number if it doesn't change the string - if ( data === +data + "" ) { - return +data; - } - - if ( rbrace.test( data ) ) { - return JSON.parse( data ); - } - - return data; -} - -function dataAttr( elem, key, data ) { - var name; - - // If nothing was found internally, try to fetch any - // data from the HTML5 data-* attribute - if ( data === undefined && elem.nodeType === 1 ) { - name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); - data = elem.getAttribute( name ); - - if ( typeof data === "string" ) { - try { - data = getData( data ); - } catch ( e ) {} - - // Make sure we set the data so it isn't changed later - dataUser.set( elem, key, data ); - } else { - data = undefined; - } - } - return data; -} - -jQuery.extend( { - hasData: function( elem ) { - return dataUser.hasData( elem ) || dataPriv.hasData( elem ); - }, - - data: function( elem, name, data ) { - return dataUser.access( elem, name, data ); - }, - - removeData: function( elem, name ) { - dataUser.remove( elem, name ); - }, - - // TODO: Now that all calls to _data and _removeData have been replaced - // with direct calls to dataPriv methods, these can be deprecated. - _data: function( elem, name, data ) { - return dataPriv.access( elem, name, data ); - }, - - _removeData: function( elem, name ) { - dataPriv.remove( elem, name ); - } -} ); - -jQuery.fn.extend( { - data: function( key, value ) { - var i, name, data, - elem = this[ 0 ], - attrs = elem && elem.attributes; - - // Gets all values - if ( key === undefined ) { - if ( this.length ) { - data = dataUser.get( elem ); - - if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { - i = attrs.length; - while ( i-- ) { - - // Support: IE 11 only - // The attrs elements can be null (trac-14894) - if ( attrs[ i ] ) { - name = attrs[ i ].name; - if ( name.indexOf( "data-" ) === 0 ) { - name = camelCase( name.slice( 5 ) ); - dataAttr( elem, name, data[ name ] ); - } - } - } - dataPriv.set( elem, "hasDataAttrs", true ); - } - } - - return data; - } - - // Sets multiple values - if ( typeof key === "object" ) { - return this.each( function() { - dataUser.set( this, key ); - } ); - } - - return access( this, function( value ) { - var data; - - // The calling jQuery object (element matches) is not empty - // (and therefore has an element appears at this[ 0 ]) and the - // `value` parameter was not undefined. An empty jQuery object - // will result in `undefined` for elem = this[ 0 ] which will - // throw an exception if an attempt to read a data cache is made. - if ( elem && value === undefined ) { - - // Attempt to get data from the cache - // The key will always be camelCased in Data - data = dataUser.get( elem, key ); - if ( data !== undefined ) { - return data; - } - - // Attempt to "discover" the data in - // HTML5 custom data-* attrs - data = dataAttr( elem, key ); - if ( data !== undefined ) { - return data; - } - - // We tried really hard, but the data doesn't exist. - return; - } - - // Set the data... - this.each( function() { - - // We always store the camelCased key - dataUser.set( this, key, value ); - } ); - }, null, value, arguments.length > 1, null, true ); - }, - - removeData: function( key ) { - return this.each( function() { - dataUser.remove( this, key ); - } ); - } -} ); - - -jQuery.extend( { - queue: function( elem, type, data ) { - var queue; - - if ( elem ) { - type = ( type || "fx" ) + "queue"; - queue = dataPriv.get( elem, type ); - - // Speed up dequeue by getting out quickly if this is just a lookup - if ( data ) { - if ( !queue || Array.isArray( data ) ) { - queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); - } else { - queue.push( data ); - } - } - return queue || []; - } - }, - - dequeue: function( elem, type ) { - type = type || "fx"; - - var queue = jQuery.queue( elem, type ), - startLength = queue.length, - fn = queue.shift(), - hooks = jQuery._queueHooks( elem, type ), - next = function() { - jQuery.dequeue( elem, type ); - }; - - // If the fx queue is dequeued, always remove the progress sentinel - if ( fn === "inprogress" ) { - fn = queue.shift(); - startLength--; - } - - if ( fn ) { - - // Add a progress sentinel to prevent the fx queue from being - // automatically dequeued - if ( type === "fx" ) { - queue.unshift( "inprogress" ); - } - - // Clear up the last queue stop function - delete hooks.stop; - fn.call( elem, next, hooks ); - } - - if ( !startLength && hooks ) { - hooks.empty.fire(); - } - }, - - // Not public - generate a queueHooks object, or return the current one - _queueHooks: function( elem, type ) { - var key = type + "queueHooks"; - return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { - empty: jQuery.Callbacks( "once memory" ).add( function() { - dataPriv.remove( elem, [ type + "queue", key ] ); - } ) - } ); - } -} ); - -jQuery.fn.extend( { - queue: function( type, data ) { - var setter = 2; - - if ( typeof type !== "string" ) { - data = type; - type = "fx"; - setter--; - } - - if ( arguments.length < setter ) { - return jQuery.queue( this[ 0 ], type ); - } - - return data === undefined ? - this : - this.each( function() { - var queue = jQuery.queue( this, type, data ); - - // Ensure a hooks for this queue - jQuery._queueHooks( this, type ); - - if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { - jQuery.dequeue( this, type ); - } - } ); - }, - dequeue: function( type ) { - return this.each( function() { - jQuery.dequeue( this, type ); - } ); - }, - clearQueue: function( type ) { - return this.queue( type || "fx", [] ); - }, - - // Get a promise resolved when queues of a certain type - // are emptied (fx is the type by default) - promise: function( type, obj ) { - var tmp, - count = 1, - defer = jQuery.Deferred(), - elements = this, - i = this.length, - resolve = function() { - if ( !( --count ) ) { - defer.resolveWith( elements, [ elements ] ); - } - }; - - if ( typeof type !== "string" ) { - obj = type; - type = undefined; - } - type = type || "fx"; - - while ( i-- ) { - tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); - if ( tmp && tmp.empty ) { - count++; - tmp.empty.add( resolve ); - } - } - resolve(); - return defer.promise( obj ); - } -} ); -var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; - -var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); - - -var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; - -var documentElement = document.documentElement; - - - - var isAttached = function( elem ) { - return jQuery.contains( elem.ownerDocument, elem ); - }, - composed = { composed: true }; - - // Support: IE 9 - 11+, Edge 12 - 18+, iOS 10.0 - 10.2 only - // Check attachment across shadow DOM boundaries when possible (gh-3504) - // Support: iOS 10.0-10.2 only - // Early iOS 10 versions support `attachShadow` but not `getRootNode`, - // leading to errors. We need to check for `getRootNode`. - if ( documentElement.getRootNode ) { - isAttached = function( elem ) { - return jQuery.contains( elem.ownerDocument, elem ) || - elem.getRootNode( composed ) === elem.ownerDocument; - }; - } -var isHiddenWithinTree = function( elem, el ) { - - // isHiddenWithinTree might be called from jQuery#filter function; - // in that case, element will be second argument - elem = el || elem; - - // Inline style trumps all - return elem.style.display === "none" || - elem.style.display === "" && - - // Otherwise, check computed style - // Support: Firefox <=43 - 45 - // Disconnected elements can have computed display: none, so first confirm that elem is - // in the document. - isAttached( elem ) && - - jQuery.css( elem, "display" ) === "none"; - }; - - - -function adjustCSS( elem, prop, valueParts, tween ) { - var adjusted, scale, - maxIterations = 20, - currentValue = tween ? - function() { - return tween.cur(); - } : - function() { - return jQuery.css( elem, prop, "" ); - }, - initial = currentValue(), - unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), - - // Starting value computation is required for potential unit mismatches - initialInUnit = elem.nodeType && - ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && - rcssNum.exec( jQuery.css( elem, prop ) ); - - if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { - - // Support: Firefox <=54 - // Halve the iteration target value to prevent interference from CSS upper bounds (gh-2144) - initial = initial / 2; - - // Trust units reported by jQuery.css - unit = unit || initialInUnit[ 3 ]; - - // Iteratively approximate from a nonzero starting point - initialInUnit = +initial || 1; - - while ( maxIterations-- ) { - - // Evaluate and update our best guess (doubling guesses that zero out). - // Finish if the scale equals or crosses 1 (making the old*new product non-positive). - jQuery.style( elem, prop, initialInUnit + unit ); - if ( ( 1 - scale ) * ( 1 - ( scale = currentValue() / initial || 0.5 ) ) <= 0 ) { - maxIterations = 0; - } - initialInUnit = initialInUnit / scale; - - } - - initialInUnit = initialInUnit * 2; - jQuery.style( elem, prop, initialInUnit + unit ); - - // Make sure we update the tween properties later on - valueParts = valueParts || []; - } - - if ( valueParts ) { - initialInUnit = +initialInUnit || +initial || 0; - - // Apply relative offset (+=/-=) if specified - adjusted = valueParts[ 1 ] ? - initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : - +valueParts[ 2 ]; - if ( tween ) { - tween.unit = unit; - tween.start = initialInUnit; - tween.end = adjusted; - } - } - return adjusted; -} - - -var defaultDisplayMap = {}; - -function getDefaultDisplay( elem ) { - var temp, - doc = elem.ownerDocument, - nodeName = elem.nodeName, - display = defaultDisplayMap[ nodeName ]; - - if ( display ) { - return display; - } - - temp = doc.body.appendChild( doc.createElement( nodeName ) ); - display = jQuery.css( temp, "display" ); - - temp.parentNode.removeChild( temp ); - - if ( display === "none" ) { - display = "block"; - } - defaultDisplayMap[ nodeName ] = display; - - return display; -} - -function showHide( elements, show ) { - var display, elem, - values = [], - index = 0, - length = elements.length; - - // Determine new display value for elements that need to change - for ( ; index < length; index++ ) { - elem = elements[ index ]; - if ( !elem.style ) { - continue; - } - - display = elem.style.display; - if ( show ) { - - // Since we force visibility upon cascade-hidden elements, an immediate (and slow) - // check is required in this first loop unless we have a nonempty display value (either - // inline or about-to-be-restored) - if ( display === "none" ) { - values[ index ] = dataPriv.get( elem, "display" ) || null; - if ( !values[ index ] ) { - elem.style.display = ""; - } - } - if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { - values[ index ] = getDefaultDisplay( elem ); - } - } else { - if ( display !== "none" ) { - values[ index ] = "none"; - - // Remember what we're overwriting - dataPriv.set( elem, "display", display ); - } - } - } - - // Set the display of the elements in a second loop to avoid constant reflow - for ( index = 0; index < length; index++ ) { - if ( values[ index ] != null ) { - elements[ index ].style.display = values[ index ]; - } - } - - return elements; -} - -jQuery.fn.extend( { - show: function() { - return showHide( this, true ); - }, - hide: function() { - return showHide( this ); - }, - toggle: function( state ) { - if ( typeof state === "boolean" ) { - return state ? this.show() : this.hide(); - } - - return this.each( function() { - if ( isHiddenWithinTree( this ) ) { - jQuery( this ).show(); - } else { - jQuery( this ).hide(); - } - } ); - } -} ); -var rcheckableType = ( /^(?:checkbox|radio)$/i ); - -var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]*)/i ); - -var rscriptType = ( /^$|^module$|\/(?:java|ecma)script/i ); - - - -( function() { - var fragment = document.createDocumentFragment(), - div = fragment.appendChild( document.createElement( "div" ) ), - input = document.createElement( "input" ); - - // Support: Android 4.0 - 4.3 only - // Check state lost if the name is set (trac-11217) - // Support: Windows Web Apps (WWA) - // `name` and `type` must use .setAttribute for WWA (trac-14901) - input.setAttribute( "type", "radio" ); - input.setAttribute( "checked", "checked" ); - input.setAttribute( "name", "t" ); - - div.appendChild( input ); - - // Support: Android <=4.1 only - // Older WebKit doesn't clone checked state correctly in fragments - support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; - - // Support: IE <=11 only - // Make sure textarea (and checkbox) defaultValue is properly cloned - div.innerHTML = ""; - support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; - - // Support: IE <=9 only - // IE <=9 replaces "; - support.option = !!div.lastChild; -} )(); - - -// We have to close these tags to support XHTML (trac-13200) -var wrapMap = { - - // XHTML parsers do not magically insert elements in the - // same way that tag soup parsers do. So we cannot shorten - // this by omitting or other required elements. - thead: [ 1, "", "
    " ], - col: [ 2, "", "
    " ], - tr: [ 2, "", "
    " ], - td: [ 3, "", "
    " ], - - _default: [ 0, "", "" ] -}; - -wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; -wrapMap.th = wrapMap.td; - -// Support: IE <=9 only -if ( !support.option ) { - wrapMap.optgroup = wrapMap.option = [ 1, "" ]; -} - - -function getAll( context, tag ) { - - // Support: IE <=9 - 11 only - // Use typeof to avoid zero-argument method invocation on host objects (trac-15151) - var ret; - - if ( typeof context.getElementsByTagName !== "undefined" ) { - ret = context.getElementsByTagName( tag || "*" ); - - } else if ( typeof context.querySelectorAll !== "undefined" ) { - ret = context.querySelectorAll( tag || "*" ); - - } else { - ret = []; - } - - if ( tag === undefined || tag && nodeName( context, tag ) ) { - return jQuery.merge( [ context ], ret ); - } - - return ret; -} - - -// Mark scripts as having already been evaluated -function setGlobalEval( elems, refElements ) { - var i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - dataPriv.set( - elems[ i ], - "globalEval", - !refElements || dataPriv.get( refElements[ i ], "globalEval" ) - ); - } -} - - -var rhtml = /<|&#?\w+;/; - -function buildFragment( elems, context, scripts, selection, ignored ) { - var elem, tmp, tag, wrap, attached, j, - fragment = context.createDocumentFragment(), - nodes = [], - i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - elem = elems[ i ]; - - if ( elem || elem === 0 ) { - - // Add nodes directly - if ( toType( elem ) === "object" ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); - - // Convert non-html into a text node - } else if ( !rhtml.test( elem ) ) { - nodes.push( context.createTextNode( elem ) ); - - // Convert html into DOM nodes - } else { - tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); - - // Deserialize a standard representation - tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); - wrap = wrapMap[ tag ] || wrapMap._default; - tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; - - // Descend through wrappers to the right content - j = wrap[ 0 ]; - while ( j-- ) { - tmp = tmp.lastChild; - } - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, tmp.childNodes ); - - // Remember the top-level container - tmp = fragment.firstChild; - - // Ensure the created nodes are orphaned (trac-12392) - tmp.textContent = ""; - } - } - } - - // Remove wrapper from fragment - fragment.textContent = ""; - - i = 0; - while ( ( elem = nodes[ i++ ] ) ) { - - // Skip elements already in the context collection (trac-4087) - if ( selection && jQuery.inArray( elem, selection ) > -1 ) { - if ( ignored ) { - ignored.push( elem ); - } - continue; - } - - attached = isAttached( elem ); - - // Append to fragment - tmp = getAll( fragment.appendChild( elem ), "script" ); - - // Preserve script evaluation history - if ( attached ) { - setGlobalEval( tmp ); - } - - // Capture executables - if ( scripts ) { - j = 0; - while ( ( elem = tmp[ j++ ] ) ) { - if ( rscriptType.test( elem.type || "" ) ) { - scripts.push( elem ); - } - } - } - } - - return fragment; -} - - -var rtypenamespace = /^([^.]*)(?:\.(.+)|)/; - -function returnTrue() { - return true; -} - -function returnFalse() { - return false; -} - -function on( elem, types, selector, data, fn, one ) { - var origFn, type; - - // Types can be a map of types/handlers - if ( typeof types === "object" ) { - - // ( types-Object, selector, data ) - if ( typeof selector !== "string" ) { - - // ( types-Object, data ) - data = data || selector; - selector = undefined; - } - for ( type in types ) { - on( elem, type, selector, data, types[ type ], one ); - } - return elem; - } - - if ( data == null && fn == null ) { - - // ( types, fn ) - fn = selector; - data = selector = undefined; - } else if ( fn == null ) { - if ( typeof selector === "string" ) { - - // ( types, selector, fn ) - fn = data; - data = undefined; - } else { - - // ( types, data, fn ) - fn = data; - data = selector; - selector = undefined; - } - } - if ( fn === false ) { - fn = returnFalse; - } else if ( !fn ) { - return elem; - } - - if ( one === 1 ) { - origFn = fn; - fn = function( event ) { - - // Can use an empty set, since event contains the info - jQuery().off( event ); - return origFn.apply( this, arguments ); - }; - - // Use same guid so caller can remove using origFn - fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); - } - return elem.each( function() { - jQuery.event.add( this, types, fn, data, selector ); - } ); -} - -/* - * Helper functions for managing events -- not part of the public interface. - * Props to Dean Edwards' addEvent library for many of the ideas. - */ -jQuery.event = { - - global: {}, - - add: function( elem, types, handler, data, selector ) { - - var handleObjIn, eventHandle, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.get( elem ); - - // Only attach events to objects that accept data - if ( !acceptData( elem ) ) { - return; - } - - // Caller can pass in an object of custom data in lieu of the handler - if ( handler.handler ) { - handleObjIn = handler; - handler = handleObjIn.handler; - selector = handleObjIn.selector; - } - - // Ensure that invalid selectors throw exceptions at attach time - // Evaluate against documentElement in case elem is a non-element node (e.g., document) - if ( selector ) { - jQuery.find.matchesSelector( documentElement, selector ); - } - - // Make sure that the handler has a unique ID, used to find/remove it later - if ( !handler.guid ) { - handler.guid = jQuery.guid++; - } - - // Init the element's event structure and main handler, if this is the first - if ( !( events = elemData.events ) ) { - events = elemData.events = Object.create( null ); - } - if ( !( eventHandle = elemData.handle ) ) { - eventHandle = elemData.handle = function( e ) { - - // Discard the second event of a jQuery.event.trigger() and - // when an event is called after a page has unloaded - return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? - jQuery.event.dispatch.apply( elem, arguments ) : undefined; - }; - } - - // Handle multiple events separated by a space - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // There *must* be a type, no attaching namespace-only handlers - if ( !type ) { - continue; - } - - // If event changes its type, use the special event handlers for the changed type - special = jQuery.event.special[ type ] || {}; - - // If selector defined, determine special event api type, otherwise given type - type = ( selector ? special.delegateType : special.bindType ) || type; - - // Update special based on newly reset type - special = jQuery.event.special[ type ] || {}; - - // handleObj is passed to all event handlers - handleObj = jQuery.extend( { - type: type, - origType: origType, - data: data, - handler: handler, - guid: handler.guid, - selector: selector, - needsContext: selector && jQuery.expr.match.needsContext.test( selector ), - namespace: namespaces.join( "." ) - }, handleObjIn ); - - // Init the event handler queue if we're the first - if ( !( handlers = events[ type ] ) ) { - handlers = events[ type ] = []; - handlers.delegateCount = 0; - - // Only use addEventListener if the special events handler returns false - if ( !special.setup || - special.setup.call( elem, data, namespaces, eventHandle ) === false ) { - - if ( elem.addEventListener ) { - elem.addEventListener( type, eventHandle ); - } - } - } - - if ( special.add ) { - special.add.call( elem, handleObj ); - - if ( !handleObj.handler.guid ) { - handleObj.handler.guid = handler.guid; - } - } - - // Add to the element's handler list, delegates in front - if ( selector ) { - handlers.splice( handlers.delegateCount++, 0, handleObj ); - } else { - handlers.push( handleObj ); - } - - // Keep track of which events have ever been used, for event optimization - jQuery.event.global[ type ] = true; - } - - }, - - // Detach an event or set of events from an element - remove: function( elem, types, handler, selector, mappedTypes ) { - - var j, origCount, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); - - if ( !elemData || !( events = elemData.events ) ) { - return; - } - - // Once for each type.namespace in types; type may be omitted - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // Unbind all events (on this namespace, if provided) for the element - if ( !type ) { - for ( type in events ) { - jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); - } - continue; - } - - special = jQuery.event.special[ type ] || {}; - type = ( selector ? special.delegateType : special.bindType ) || type; - handlers = events[ type ] || []; - tmp = tmp[ 2 ] && - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); - - // Remove matching events - origCount = j = handlers.length; - while ( j-- ) { - handleObj = handlers[ j ]; - - if ( ( mappedTypes || origType === handleObj.origType ) && - ( !handler || handler.guid === handleObj.guid ) && - ( !tmp || tmp.test( handleObj.namespace ) ) && - ( !selector || selector === handleObj.selector || - selector === "**" && handleObj.selector ) ) { - handlers.splice( j, 1 ); - - if ( handleObj.selector ) { - handlers.delegateCount--; - } - if ( special.remove ) { - special.remove.call( elem, handleObj ); - } - } - } - - // Remove generic event handler if we removed something and no more handlers exist - // (avoids potential for endless recursion during removal of special event handlers) - if ( origCount && !handlers.length ) { - if ( !special.teardown || - special.teardown.call( elem, namespaces, elemData.handle ) === false ) { - - jQuery.removeEvent( elem, type, elemData.handle ); - } - - delete events[ type ]; - } - } - - // Remove data and the expando if it's no longer used - if ( jQuery.isEmptyObject( events ) ) { - dataPriv.remove( elem, "handle events" ); - } - }, - - dispatch: function( nativeEvent ) { - - var i, j, ret, matched, handleObj, handlerQueue, - args = new Array( arguments.length ), - - // Make a writable jQuery.Event from the native event object - event = jQuery.event.fix( nativeEvent ), - - handlers = ( - dataPriv.get( this, "events" ) || Object.create( null ) - )[ event.type ] || [], - special = jQuery.event.special[ event.type ] || {}; - - // Use the fix-ed jQuery.Event rather than the (read-only) native event - args[ 0 ] = event; - - for ( i = 1; i < arguments.length; i++ ) { - args[ i ] = arguments[ i ]; - } - - event.delegateTarget = this; - - // Call the preDispatch hook for the mapped type, and let it bail if desired - if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { - return; - } - - // Determine handlers - handlerQueue = jQuery.event.handlers.call( this, event, handlers ); - - // Run delegates first; they may want to stop propagation beneath us - i = 0; - while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { - event.currentTarget = matched.elem; - - j = 0; - while ( ( handleObj = matched.handlers[ j++ ] ) && - !event.isImmediatePropagationStopped() ) { - - // If the event is namespaced, then each handler is only invoked if it is - // specially universal or its namespaces are a superset of the event's. - if ( !event.rnamespace || handleObj.namespace === false || - event.rnamespace.test( handleObj.namespace ) ) { - - event.handleObj = handleObj; - event.data = handleObj.data; - - ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || - handleObj.handler ).apply( matched.elem, args ); - - if ( ret !== undefined ) { - if ( ( event.result = ret ) === false ) { - event.preventDefault(); - event.stopPropagation(); - } - } - } - } - } - - // Call the postDispatch hook for the mapped type - if ( special.postDispatch ) { - special.postDispatch.call( this, event ); - } - - return event.result; - }, - - handlers: function( event, handlers ) { - var i, handleObj, sel, matchedHandlers, matchedSelectors, - handlerQueue = [], - delegateCount = handlers.delegateCount, - cur = event.target; - - // Find delegate handlers - if ( delegateCount && - - // Support: IE <=9 - // Black-hole SVG instance trees (trac-13180) - cur.nodeType && - - // Support: Firefox <=42 - // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) - // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click - // Support: IE 11 only - // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) - !( event.type === "click" && event.button >= 1 ) ) { - - for ( ; cur !== this; cur = cur.parentNode || this ) { - - // Don't check non-elements (trac-13208) - // Don't process clicks on disabled elements (trac-6911, trac-8165, trac-11382, trac-11764) - if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { - matchedHandlers = []; - matchedSelectors = {}; - for ( i = 0; i < delegateCount; i++ ) { - handleObj = handlers[ i ]; - - // Don't conflict with Object.prototype properties (trac-13203) - sel = handleObj.selector + " "; - - if ( matchedSelectors[ sel ] === undefined ) { - matchedSelectors[ sel ] = handleObj.needsContext ? - jQuery( sel, this ).index( cur ) > -1 : - jQuery.find( sel, this, null, [ cur ] ).length; - } - if ( matchedSelectors[ sel ] ) { - matchedHandlers.push( handleObj ); - } - } - if ( matchedHandlers.length ) { - handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); - } - } - } - } - - // Add the remaining (directly-bound) handlers - cur = this; - if ( delegateCount < handlers.length ) { - handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); - } - - return handlerQueue; - }, - - addProp: function( name, hook ) { - Object.defineProperty( jQuery.Event.prototype, name, { - enumerable: true, - configurable: true, - - get: isFunction( hook ) ? - function() { - if ( this.originalEvent ) { - return hook( this.originalEvent ); - } - } : - function() { - if ( this.originalEvent ) { - return this.originalEvent[ name ]; - } - }, - - set: function( value ) { - Object.defineProperty( this, name, { - enumerable: true, - configurable: true, - writable: true, - value: value - } ); - } - } ); - }, - - fix: function( originalEvent ) { - return originalEvent[ jQuery.expando ] ? - originalEvent : - new jQuery.Event( originalEvent ); - }, - - special: { - load: { - - // Prevent triggered image.load events from bubbling to window.load - noBubble: true - }, - click: { - - // Utilize native event to ensure correct state for checkable inputs - setup: function( data ) { - - // For mutual compressibility with _default, replace `this` access with a local var. - // `|| data` is dead code meant only to preserve the variable through minification. - var el = this || data; - - // Claim the first handler - if ( rcheckableType.test( el.type ) && - el.click && nodeName( el, "input" ) ) { - - // dataPriv.set( el, "click", ... ) - leverageNative( el, "click", true ); - } - - // Return false to allow normal processing in the caller - return false; - }, - trigger: function( data ) { - - // For mutual compressibility with _default, replace `this` access with a local var. - // `|| data` is dead code meant only to preserve the variable through minification. - var el = this || data; - - // Force setup before triggering a click - if ( rcheckableType.test( el.type ) && - el.click && nodeName( el, "input" ) ) { - - leverageNative( el, "click" ); - } - - // Return non-false to allow normal event-path propagation - return true; - }, - - // For cross-browser consistency, suppress native .click() on links - // Also prevent it if we're currently inside a leveraged native-event stack - _default: function( event ) { - var target = event.target; - return rcheckableType.test( target.type ) && - target.click && nodeName( target, "input" ) && - dataPriv.get( target, "click" ) || - nodeName( target, "a" ); - } - }, - - beforeunload: { - postDispatch: function( event ) { - - // Support: Firefox 20+ - // Firefox doesn't alert if the returnValue field is not set. - if ( event.result !== undefined && event.originalEvent ) { - event.originalEvent.returnValue = event.result; - } - } - } - } -}; - -// Ensure the presence of an event listener that handles manually-triggered -// synthetic events by interrupting progress until reinvoked in response to -// *native* events that it fires directly, ensuring that state changes have -// already occurred before other listeners are invoked. -function leverageNative( el, type, isSetup ) { - - // Missing `isSetup` indicates a trigger call, which must force setup through jQuery.event.add - if ( !isSetup ) { - if ( dataPriv.get( el, type ) === undefined ) { - jQuery.event.add( el, type, returnTrue ); - } - return; - } - - // Register the controller as a special universal handler for all event namespaces - dataPriv.set( el, type, false ); - jQuery.event.add( el, type, { - namespace: false, - handler: function( event ) { - var result, - saved = dataPriv.get( this, type ); - - if ( ( event.isTrigger & 1 ) && this[ type ] ) { - - // Interrupt processing of the outer synthetic .trigger()ed event - if ( !saved ) { - - // Store arguments for use when handling the inner native event - // There will always be at least one argument (an event object), so this array - // will not be confused with a leftover capture object. - saved = slice.call( arguments ); - dataPriv.set( this, type, saved ); - - // Trigger the native event and capture its result - this[ type ](); - result = dataPriv.get( this, type ); - dataPriv.set( this, type, false ); - - if ( saved !== result ) { - - // Cancel the outer synthetic event - event.stopImmediatePropagation(); - event.preventDefault(); - - return result; - } - - // If this is an inner synthetic event for an event with a bubbling surrogate - // (focus or blur), assume that the surrogate already propagated from triggering - // the native event and prevent that from happening again here. - // This technically gets the ordering wrong w.r.t. to `.trigger()` (in which the - // bubbling surrogate propagates *after* the non-bubbling base), but that seems - // less bad than duplication. - } else if ( ( jQuery.event.special[ type ] || {} ).delegateType ) { - event.stopPropagation(); - } - - // If this is a native event triggered above, everything is now in order - // Fire an inner synthetic event with the original arguments - } else if ( saved ) { - - // ...and capture the result - dataPriv.set( this, type, jQuery.event.trigger( - saved[ 0 ], - saved.slice( 1 ), - this - ) ); - - // Abort handling of the native event by all jQuery handlers while allowing - // native handlers on the same element to run. On target, this is achieved - // by stopping immediate propagation just on the jQuery event. However, - // the native event is re-wrapped by a jQuery one on each level of the - // propagation so the only way to stop it for jQuery is to stop it for - // everyone via native `stopPropagation()`. This is not a problem for - // focus/blur which don't bubble, but it does also stop click on checkboxes - // and radios. We accept this limitation. - event.stopPropagation(); - event.isImmediatePropagationStopped = returnTrue; - } - } - } ); -} - -jQuery.removeEvent = function( elem, type, handle ) { - - // This "if" is needed for plain objects - if ( elem.removeEventListener ) { - elem.removeEventListener( type, handle ); - } -}; - -jQuery.Event = function( src, props ) { - - // Allow instantiation without the 'new' keyword - if ( !( this instanceof jQuery.Event ) ) { - return new jQuery.Event( src, props ); - } - - // Event object - if ( src && src.type ) { - this.originalEvent = src; - this.type = src.type; - - // Events bubbling up the document may have been marked as prevented - // by a handler lower down the tree; reflect the correct value. - this.isDefaultPrevented = src.defaultPrevented || - src.defaultPrevented === undefined && - - // Support: Android <=2.3 only - src.returnValue === false ? - returnTrue : - returnFalse; - - // Create target properties - // Support: Safari <=6 - 7 only - // Target should not be a text node (trac-504, trac-13143) - this.target = ( src.target && src.target.nodeType === 3 ) ? - src.target.parentNode : - src.target; - - this.currentTarget = src.currentTarget; - this.relatedTarget = src.relatedTarget; - - // Event type - } else { - this.type = src; - } - - // Put explicitly provided properties onto the event object - if ( props ) { - jQuery.extend( this, props ); - } - - // Create a timestamp if incoming event doesn't have one - this.timeStamp = src && src.timeStamp || Date.now(); - - // Mark it as fixed - this[ jQuery.expando ] = true; -}; - -// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding -// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html -jQuery.Event.prototype = { - constructor: jQuery.Event, - isDefaultPrevented: returnFalse, - isPropagationStopped: returnFalse, - isImmediatePropagationStopped: returnFalse, - isSimulated: false, - - preventDefault: function() { - var e = this.originalEvent; - - this.isDefaultPrevented = returnTrue; - - if ( e && !this.isSimulated ) { - e.preventDefault(); - } - }, - stopPropagation: function() { - var e = this.originalEvent; - - this.isPropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopPropagation(); - } - }, - stopImmediatePropagation: function() { - var e = this.originalEvent; - - this.isImmediatePropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopImmediatePropagation(); - } - - this.stopPropagation(); - } -}; - -// Includes all common event props including KeyEvent and MouseEvent specific props -jQuery.each( { - altKey: true, - bubbles: true, - cancelable: true, - changedTouches: true, - ctrlKey: true, - detail: true, - eventPhase: true, - metaKey: true, - pageX: true, - pageY: true, - shiftKey: true, - view: true, - "char": true, - code: true, - charCode: true, - key: true, - keyCode: true, - button: true, - buttons: true, - clientX: true, - clientY: true, - offsetX: true, - offsetY: true, - pointerId: true, - pointerType: true, - screenX: true, - screenY: true, - targetTouches: true, - toElement: true, - touches: true, - which: true -}, jQuery.event.addProp ); - -jQuery.each( { focus: "focusin", blur: "focusout" }, function( type, delegateType ) { - - function focusMappedHandler( nativeEvent ) { - if ( document.documentMode ) { - - // Support: IE 11+ - // Attach a single focusin/focusout handler on the document while someone wants - // focus/blur. This is because the former are synchronous in IE while the latter - // are async. In other browsers, all those handlers are invoked synchronously. - - // `handle` from private data would already wrap the event, but we need - // to change the `type` here. - var handle = dataPriv.get( this, "handle" ), - event = jQuery.event.fix( nativeEvent ); - event.type = nativeEvent.type === "focusin" ? "focus" : "blur"; - event.isSimulated = true; - - // First, handle focusin/focusout - handle( nativeEvent ); - - // ...then, handle focus/blur - // - // focus/blur don't bubble while focusin/focusout do; simulate the former by only - // invoking the handler at the lower level. - if ( event.target === event.currentTarget ) { - - // The setup part calls `leverageNative`, which, in turn, calls - // `jQuery.event.add`, so event handle will already have been set - // by this point. - handle( event ); - } - } else { - - // For non-IE browsers, attach a single capturing handler on the document - // while someone wants focusin/focusout. - jQuery.event.simulate( delegateType, nativeEvent.target, - jQuery.event.fix( nativeEvent ) ); - } - } - - jQuery.event.special[ type ] = { - - // Utilize native event if possible so blur/focus sequence is correct - setup: function() { - - var attaches; - - // Claim the first handler - // dataPriv.set( this, "focus", ... ) - // dataPriv.set( this, "blur", ... ) - leverageNative( this, type, true ); - - if ( document.documentMode ) { - - // Support: IE 9 - 11+ - // We use the same native handler for focusin & focus (and focusout & blur) - // so we need to coordinate setup & teardown parts between those events. - // Use `delegateType` as the key as `type` is already used by `leverageNative`. - attaches = dataPriv.get( this, delegateType ); - if ( !attaches ) { - this.addEventListener( delegateType, focusMappedHandler ); - } - dataPriv.set( this, delegateType, ( attaches || 0 ) + 1 ); - } else { - - // Return false to allow normal processing in the caller - return false; - } - }, - trigger: function() { - - // Force setup before trigger - leverageNative( this, type ); - - // Return non-false to allow normal event-path propagation - return true; - }, - - teardown: function() { - var attaches; - - if ( document.documentMode ) { - attaches = dataPriv.get( this, delegateType ) - 1; - if ( !attaches ) { - this.removeEventListener( delegateType, focusMappedHandler ); - dataPriv.remove( this, delegateType ); - } else { - dataPriv.set( this, delegateType, attaches ); - } - } else { - - // Return false to indicate standard teardown should be applied - return false; - } - }, - - // Suppress native focus or blur if we're currently inside - // a leveraged native-event stack - _default: function( event ) { - return dataPriv.get( event.target, type ); - }, - - delegateType: delegateType - }; - - // Support: Firefox <=44 - // Firefox doesn't have focus(in | out) events - // Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 - // - // Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 - // focus(in | out) events fire after focus & blur events, - // which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order - // Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 - // - // Support: IE 9 - 11+ - // To preserve relative focusin/focus & focusout/blur event order guaranteed on the 3.x branch, - // attach a single handler for both events in IE. - jQuery.event.special[ delegateType ] = { - setup: function() { - - // Handle: regular nodes (via `this.ownerDocument`), window - // (via `this.document`) & document (via `this`). - var doc = this.ownerDocument || this.document || this, - dataHolder = document.documentMode ? this : doc, - attaches = dataPriv.get( dataHolder, delegateType ); - - // Support: IE 9 - 11+ - // We use the same native handler for focusin & focus (and focusout & blur) - // so we need to coordinate setup & teardown parts between those events. - // Use `delegateType` as the key as `type` is already used by `leverageNative`. - if ( !attaches ) { - if ( document.documentMode ) { - this.addEventListener( delegateType, focusMappedHandler ); - } else { - doc.addEventListener( type, focusMappedHandler, true ); - } - } - dataPriv.set( dataHolder, delegateType, ( attaches || 0 ) + 1 ); - }, - teardown: function() { - var doc = this.ownerDocument || this.document || this, - dataHolder = document.documentMode ? this : doc, - attaches = dataPriv.get( dataHolder, delegateType ) - 1; - - if ( !attaches ) { - if ( document.documentMode ) { - this.removeEventListener( delegateType, focusMappedHandler ); - } else { - doc.removeEventListener( type, focusMappedHandler, true ); - } - dataPriv.remove( dataHolder, delegateType ); - } else { - dataPriv.set( dataHolder, delegateType, attaches ); - } - } - }; -} ); - -// Create mouseenter/leave events using mouseover/out and event-time checks -// so that event delegation works in jQuery. -// Do the same for pointerenter/pointerleave and pointerover/pointerout -// -// Support: Safari 7 only -// Safari sends mouseenter too often; see: -// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 -// for the description of the bug (it existed in older Chrome versions as well). -jQuery.each( { - mouseenter: "mouseover", - mouseleave: "mouseout", - pointerenter: "pointerover", - pointerleave: "pointerout" -}, function( orig, fix ) { - jQuery.event.special[ orig ] = { - delegateType: fix, - bindType: fix, - - handle: function( event ) { - var ret, - target = this, - related = event.relatedTarget, - handleObj = event.handleObj; - - // For mouseenter/leave call the handler if related is outside the target. - // NB: No relatedTarget if the mouse left/entered the browser window - if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { - event.type = handleObj.origType; - ret = handleObj.handler.apply( this, arguments ); - event.type = fix; - } - return ret; - } - }; -} ); - -jQuery.fn.extend( { - - on: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn ); - }, - one: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn, 1 ); - }, - off: function( types, selector, fn ) { - var handleObj, type; - if ( types && types.preventDefault && types.handleObj ) { - - // ( event ) dispatched jQuery.Event - handleObj = types.handleObj; - jQuery( types.delegateTarget ).off( - handleObj.namespace ? - handleObj.origType + "." + handleObj.namespace : - handleObj.origType, - handleObj.selector, - handleObj.handler - ); - return this; - } - if ( typeof types === "object" ) { - - // ( types-object [, selector] ) - for ( type in types ) { - this.off( type, selector, types[ type ] ); - } - return this; - } - if ( selector === false || typeof selector === "function" ) { - - // ( types [, fn] ) - fn = selector; - selector = undefined; - } - if ( fn === false ) { - fn = returnFalse; - } - return this.each( function() { - jQuery.event.remove( this, types, fn, selector ); - } ); - } -} ); - - -var - - // Support: IE <=10 - 11, Edge 12 - 13 only - // In IE/Edge using regex groups here causes severe slowdowns. - // See https://connect.microsoft.com/IE/feedback/details/1736512/ - rnoInnerhtml = /\s*$/g; - -// Prefer a tbody over its parent table for containing new rows -function manipulationTarget( elem, content ) { - if ( nodeName( elem, "table" ) && - nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { - - return jQuery( elem ).children( "tbody" )[ 0 ] || elem; - } - - return elem; -} - -// Replace/restore the type attribute of script elements for safe DOM manipulation -function disableScript( elem ) { - elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; - return elem; -} -function restoreScript( elem ) { - if ( ( elem.type || "" ).slice( 0, 5 ) === "true/" ) { - elem.type = elem.type.slice( 5 ); - } else { - elem.removeAttribute( "type" ); - } - - return elem; -} - -function cloneCopyEvent( src, dest ) { - var i, l, type, pdataOld, udataOld, udataCur, events; - - if ( dest.nodeType !== 1 ) { - return; - } - - // 1. Copy private data: events, handlers, etc. - if ( dataPriv.hasData( src ) ) { - pdataOld = dataPriv.get( src ); - events = pdataOld.events; - - if ( events ) { - dataPriv.remove( dest, "handle events" ); - - for ( type in events ) { - for ( i = 0, l = events[ type ].length; i < l; i++ ) { - jQuery.event.add( dest, type, events[ type ][ i ] ); - } - } - } - } - - // 2. Copy user data - if ( dataUser.hasData( src ) ) { - udataOld = dataUser.access( src ); - udataCur = jQuery.extend( {}, udataOld ); - - dataUser.set( dest, udataCur ); - } -} - -// Fix IE bugs, see support tests -function fixInput( src, dest ) { - var nodeName = dest.nodeName.toLowerCase(); - - // Fails to persist the checked state of a cloned checkbox or radio button. - if ( nodeName === "input" && rcheckableType.test( src.type ) ) { - dest.checked = src.checked; - - // Fails to return the selected option to the default selected state when cloning options - } else if ( nodeName === "input" || nodeName === "textarea" ) { - dest.defaultValue = src.defaultValue; - } -} - -function domManip( collection, args, callback, ignored ) { - - // Flatten any nested arrays - args = flat( args ); - - var fragment, first, scripts, hasScripts, node, doc, - i = 0, - l = collection.length, - iNoClone = l - 1, - value = args[ 0 ], - valueIsFunction = isFunction( value ); - - // We can't cloneNode fragments that contain checked, in WebKit - if ( valueIsFunction || - ( l > 1 && typeof value === "string" && - !support.checkClone && rchecked.test( value ) ) ) { - return collection.each( function( index ) { - var self = collection.eq( index ); - if ( valueIsFunction ) { - args[ 0 ] = value.call( this, index, self.html() ); - } - domManip( self, args, callback, ignored ); - } ); - } - - if ( l ) { - fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); - first = fragment.firstChild; - - if ( fragment.childNodes.length === 1 ) { - fragment = first; - } - - // Require either new content or an interest in ignored elements to invoke the callback - if ( first || ignored ) { - scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); - hasScripts = scripts.length; - - // Use the original fragment for the last item - // instead of the first because it can end up - // being emptied incorrectly in certain situations (trac-8070). - for ( ; i < l; i++ ) { - node = fragment; - - if ( i !== iNoClone ) { - node = jQuery.clone( node, true, true ); - - // Keep references to cloned scripts for later restoration - if ( hasScripts ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( scripts, getAll( node, "script" ) ); - } - } - - callback.call( collection[ i ], node, i ); - } - - if ( hasScripts ) { - doc = scripts[ scripts.length - 1 ].ownerDocument; - - // Re-enable scripts - jQuery.map( scripts, restoreScript ); - - // Evaluate executable scripts on first document insertion - for ( i = 0; i < hasScripts; i++ ) { - node = scripts[ i ]; - if ( rscriptType.test( node.type || "" ) && - !dataPriv.access( node, "globalEval" ) && - jQuery.contains( doc, node ) ) { - - if ( node.src && ( node.type || "" ).toLowerCase() !== "module" ) { - - // Optional AJAX dependency, but won't run scripts if not present - if ( jQuery._evalUrl && !node.noModule ) { - jQuery._evalUrl( node.src, { - nonce: node.nonce || node.getAttribute( "nonce" ) - }, doc ); - } - } else { - - // Unwrap a CDATA section containing script contents. This shouldn't be - // needed as in XML documents they're already not visible when - // inspecting element contents and in HTML documents they have no - // meaning but we're preserving that logic for backwards compatibility. - // This will be removed completely in 4.0. See gh-4904. - DOMEval( node.textContent.replace( rcleanScript, "" ), node, doc ); - } - } - } - } - } - } - - return collection; -} - -function remove( elem, selector, keepData ) { - var node, - nodes = selector ? jQuery.filter( selector, elem ) : elem, - i = 0; - - for ( ; ( node = nodes[ i ] ) != null; i++ ) { - if ( !keepData && node.nodeType === 1 ) { - jQuery.cleanData( getAll( node ) ); - } - - if ( node.parentNode ) { - if ( keepData && isAttached( node ) ) { - setGlobalEval( getAll( node, "script" ) ); - } - node.parentNode.removeChild( node ); - } - } - - return elem; -} - -jQuery.extend( { - htmlPrefilter: function( html ) { - return html; - }, - - clone: function( elem, dataAndEvents, deepDataAndEvents ) { - var i, l, srcElements, destElements, - clone = elem.cloneNode( true ), - inPage = isAttached( elem ); - - // Fix IE cloning issues - if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && - !jQuery.isXMLDoc( elem ) ) { - - // We eschew jQuery#find here for performance reasons: - // https://jsperf.com/getall-vs-sizzle/2 - destElements = getAll( clone ); - srcElements = getAll( elem ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - fixInput( srcElements[ i ], destElements[ i ] ); - } - } - - // Copy the events from the original to the clone - if ( dataAndEvents ) { - if ( deepDataAndEvents ) { - srcElements = srcElements || getAll( elem ); - destElements = destElements || getAll( clone ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - cloneCopyEvent( srcElements[ i ], destElements[ i ] ); - } - } else { - cloneCopyEvent( elem, clone ); - } - } - - // Preserve script evaluation history - destElements = getAll( clone, "script" ); - if ( destElements.length > 0 ) { - setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); - } - - // Return the cloned set - return clone; - }, - - cleanData: function( elems ) { - var data, elem, type, - special = jQuery.event.special, - i = 0; - - for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { - if ( acceptData( elem ) ) { - if ( ( data = elem[ dataPriv.expando ] ) ) { - if ( data.events ) { - for ( type in data.events ) { - if ( special[ type ] ) { - jQuery.event.remove( elem, type ); - - // This is a shortcut to avoid jQuery.event.remove's overhead - } else { - jQuery.removeEvent( elem, type, data.handle ); - } - } - } - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataPriv.expando ] = undefined; - } - if ( elem[ dataUser.expando ] ) { - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataUser.expando ] = undefined; - } - } - } - } -} ); - -jQuery.fn.extend( { - detach: function( selector ) { - return remove( this, selector, true ); - }, - - remove: function( selector ) { - return remove( this, selector ); - }, - - text: function( value ) { - return access( this, function( value ) { - return value === undefined ? - jQuery.text( this ) : - this.empty().each( function() { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - this.textContent = value; - } - } ); - }, null, value, arguments.length ); - }, - - append: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.appendChild( elem ); - } - } ); - }, - - prepend: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.insertBefore( elem, target.firstChild ); - } - } ); - }, - - before: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this ); - } - } ); - }, - - after: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this.nextSibling ); - } - } ); - }, - - empty: function() { - var elem, - i = 0; - - for ( ; ( elem = this[ i ] ) != null; i++ ) { - if ( elem.nodeType === 1 ) { - - // Prevent memory leaks - jQuery.cleanData( getAll( elem, false ) ); - - // Remove any remaining nodes - elem.textContent = ""; - } - } - - return this; - }, - - clone: function( dataAndEvents, deepDataAndEvents ) { - dataAndEvents = dataAndEvents == null ? false : dataAndEvents; - deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; - - return this.map( function() { - return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); - } ); - }, - - html: function( value ) { - return access( this, function( value ) { - var elem = this[ 0 ] || {}, - i = 0, - l = this.length; - - if ( value === undefined && elem.nodeType === 1 ) { - return elem.innerHTML; - } - - // See if we can take a shortcut and just use innerHTML - if ( typeof value === "string" && !rnoInnerhtml.test( value ) && - !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { - - value = jQuery.htmlPrefilter( value ); - - try { - for ( ; i < l; i++ ) { - elem = this[ i ] || {}; - - // Remove element nodes and prevent memory leaks - if ( elem.nodeType === 1 ) { - jQuery.cleanData( getAll( elem, false ) ); - elem.innerHTML = value; - } - } - - elem = 0; - - // If using innerHTML throws an exception, use the fallback method - } catch ( e ) {} - } - - if ( elem ) { - this.empty().append( value ); - } - }, null, value, arguments.length ); - }, - - replaceWith: function() { - var ignored = []; - - // Make the changes, replacing each non-ignored context element with the new content - return domManip( this, arguments, function( elem ) { - var parent = this.parentNode; - - if ( jQuery.inArray( this, ignored ) < 0 ) { - jQuery.cleanData( getAll( this ) ); - if ( parent ) { - parent.replaceChild( elem, this ); - } - } - - // Force callback invocation - }, ignored ); - } -} ); - -jQuery.each( { - appendTo: "append", - prependTo: "prepend", - insertBefore: "before", - insertAfter: "after", - replaceAll: "replaceWith" -}, function( name, original ) { - jQuery.fn[ name ] = function( selector ) { - var elems, - ret = [], - insert = jQuery( selector ), - last = insert.length - 1, - i = 0; - - for ( ; i <= last; i++ ) { - elems = i === last ? this : this.clone( true ); - jQuery( insert[ i ] )[ original ]( elems ); - - // Support: Android <=4.0 only, PhantomJS 1 only - // .get() because push.apply(_, arraylike) throws on ancient WebKit - push.apply( ret, elems.get() ); - } - - return this.pushStack( ret ); - }; -} ); -var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); - -var rcustomProp = /^--/; - - -var getStyles = function( elem ) { - - // Support: IE <=11 only, Firefox <=30 (trac-15098, trac-14150) - // IE throws on elements created in popups - // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" - var view = elem.ownerDocument.defaultView; - - if ( !view || !view.opener ) { - view = window; - } - - return view.getComputedStyle( elem ); - }; - -var swap = function( elem, options, callback ) { - var ret, name, - old = {}; - - // Remember the old values, and insert the new ones - for ( name in options ) { - old[ name ] = elem.style[ name ]; - elem.style[ name ] = options[ name ]; - } - - ret = callback.call( elem ); - - // Revert the old values - for ( name in options ) { - elem.style[ name ] = old[ name ]; - } - - return ret; -}; - - -var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" ); - - - -( function() { - - // Executing both pixelPosition & boxSizingReliable tests require only one layout - // so they're executed at the same time to save the second computation. - function computeStyleTests() { - - // This is a singleton, we need to execute it only once - if ( !div ) { - return; - } - - container.style.cssText = "position:absolute;left:-11111px;width:60px;" + - "margin-top:1px;padding:0;border:0"; - div.style.cssText = - "position:relative;display:block;box-sizing:border-box;overflow:scroll;" + - "margin:auto;border:1px;padding:1px;" + - "width:60%;top:1%"; - documentElement.appendChild( container ).appendChild( div ); - - var divStyle = window.getComputedStyle( div ); - pixelPositionVal = divStyle.top !== "1%"; - - // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 - reliableMarginLeftVal = roundPixelMeasures( divStyle.marginLeft ) === 12; - - // Support: Android 4.0 - 4.3 only, Safari <=9.1 - 10.1, iOS <=7.0 - 9.3 - // Some styles come back with percentage values, even though they shouldn't - div.style.right = "60%"; - pixelBoxStylesVal = roundPixelMeasures( divStyle.right ) === 36; - - // Support: IE 9 - 11 only - // Detect misreporting of content dimensions for box-sizing:border-box elements - boxSizingReliableVal = roundPixelMeasures( divStyle.width ) === 36; - - // Support: IE 9 only - // Detect overflow:scroll screwiness (gh-3699) - // Support: Chrome <=64 - // Don't get tricked when zoom affects offsetWidth (gh-4029) - div.style.position = "absolute"; - scrollboxSizeVal = roundPixelMeasures( div.offsetWidth / 3 ) === 12; - - documentElement.removeChild( container ); - - // Nullify the div so it wouldn't be stored in the memory and - // it will also be a sign that checks already performed - div = null; - } - - function roundPixelMeasures( measure ) { - return Math.round( parseFloat( measure ) ); - } - - var pixelPositionVal, boxSizingReliableVal, scrollboxSizeVal, pixelBoxStylesVal, - reliableTrDimensionsVal, reliableMarginLeftVal, - container = document.createElement( "div" ), - div = document.createElement( "div" ); - - // Finish early in limited (non-browser) environments - if ( !div.style ) { - return; - } - - // Support: IE <=9 - 11 only - // Style of cloned element affects source element cloned (trac-8908) - div.style.backgroundClip = "content-box"; - div.cloneNode( true ).style.backgroundClip = ""; - support.clearCloneStyle = div.style.backgroundClip === "content-box"; - - jQuery.extend( support, { - boxSizingReliable: function() { - computeStyleTests(); - return boxSizingReliableVal; - }, - pixelBoxStyles: function() { - computeStyleTests(); - return pixelBoxStylesVal; - }, - pixelPosition: function() { - computeStyleTests(); - return pixelPositionVal; - }, - reliableMarginLeft: function() { - computeStyleTests(); - return reliableMarginLeftVal; - }, - scrollboxSize: function() { - computeStyleTests(); - return scrollboxSizeVal; - }, - - // Support: IE 9 - 11+, Edge 15 - 18+ - // IE/Edge misreport `getComputedStyle` of table rows with width/height - // set in CSS while `offset*` properties report correct values. - // Behavior in IE 9 is more subtle than in newer versions & it passes - // some versions of this test; make sure not to make it pass there! - // - // Support: Firefox 70+ - // Only Firefox includes border widths - // in computed dimensions. (gh-4529) - reliableTrDimensions: function() { - var table, tr, trChild, trStyle; - if ( reliableTrDimensionsVal == null ) { - table = document.createElement( "table" ); - tr = document.createElement( "tr" ); - trChild = document.createElement( "div" ); - - table.style.cssText = "position:absolute;left:-11111px;border-collapse:separate"; - tr.style.cssText = "box-sizing:content-box;border:1px solid"; - - // Support: Chrome 86+ - // Height set through cssText does not get applied. - // Computed height then comes back as 0. - tr.style.height = "1px"; - trChild.style.height = "9px"; - - // Support: Android 8 Chrome 86+ - // In our bodyBackground.html iframe, - // display for all div elements is set to "inline", - // which causes a problem only in Android 8 Chrome 86. - // Ensuring the div is `display: block` - // gets around this issue. - trChild.style.display = "block"; - - documentElement - .appendChild( table ) - .appendChild( tr ) - .appendChild( trChild ); - - trStyle = window.getComputedStyle( tr ); - reliableTrDimensionsVal = ( parseInt( trStyle.height, 10 ) + - parseInt( trStyle.borderTopWidth, 10 ) + - parseInt( trStyle.borderBottomWidth, 10 ) ) === tr.offsetHeight; - - documentElement.removeChild( table ); - } - return reliableTrDimensionsVal; - } - } ); -} )(); - - -function curCSS( elem, name, computed ) { - var width, minWidth, maxWidth, ret, - isCustomProp = rcustomProp.test( name ), - - // Support: Firefox 51+ - // Retrieving style before computed somehow - // fixes an issue with getting wrong values - // on detached elements - style = elem.style; - - computed = computed || getStyles( elem ); - - // getPropertyValue is needed for: - // .css('filter') (IE 9 only, trac-12537) - // .css('--customProperty) (gh-3144) - if ( computed ) { - - // Support: IE <=9 - 11+ - // IE only supports `"float"` in `getPropertyValue`; in computed styles - // it's only available as `"cssFloat"`. We no longer modify properties - // sent to `.css()` apart from camelCasing, so we need to check both. - // Normally, this would create difference in behavior: if - // `getPropertyValue` returns an empty string, the value returned - // by `.css()` would be `undefined`. This is usually the case for - // disconnected elements. However, in IE even disconnected elements - // with no styles return `"none"` for `getPropertyValue( "float" )` - ret = computed.getPropertyValue( name ) || computed[ name ]; - - if ( isCustomProp && ret ) { - - // Support: Firefox 105+, Chrome <=105+ - // Spec requires trimming whitespace for custom properties (gh-4926). - // Firefox only trims leading whitespace. Chrome just collapses - // both leading & trailing whitespace to a single space. - // - // Fall back to `undefined` if empty string returned. - // This collapses a missing definition with property defined - // and set to an empty string but there's no standard API - // allowing us to differentiate them without a performance penalty - // and returning `undefined` aligns with older jQuery. - // - // rtrimCSS treats U+000D CARRIAGE RETURN and U+000C FORM FEED - // as whitespace while CSS does not, but this is not a problem - // because CSS preprocessing replaces them with U+000A LINE FEED - // (which *is* CSS whitespace) - // https://www.w3.org/TR/css-syntax-3/#input-preprocessing - ret = ret.replace( rtrimCSS, "$1" ) || undefined; - } - - if ( ret === "" && !isAttached( elem ) ) { - ret = jQuery.style( elem, name ); - } - - // A tribute to the "awesome hack by Dean Edwards" - // Android Browser returns percentage for some values, - // but width seems to be reliably pixels. - // This is against the CSSOM draft spec: - // https://drafts.csswg.org/cssom/#resolved-values - if ( !support.pixelBoxStyles() && rnumnonpx.test( ret ) && rboxStyle.test( name ) ) { - - // Remember the original values - width = style.width; - minWidth = style.minWidth; - maxWidth = style.maxWidth; - - // Put in the new values to get a computed value out - style.minWidth = style.maxWidth = style.width = ret; - ret = computed.width; - - // Revert the changed values - style.width = width; - style.minWidth = minWidth; - style.maxWidth = maxWidth; - } - } - - return ret !== undefined ? - - // Support: IE <=9 - 11 only - // IE returns zIndex value as an integer. - ret + "" : - ret; -} - - -function addGetHookIf( conditionFn, hookFn ) { - - // Define the hook, we'll check on the first run if it's really needed. - return { - get: function() { - if ( conditionFn() ) { - - // Hook not needed (or it's not possible to use it due - // to missing dependency), remove it. - delete this.get; - return; - } - - // Hook needed; redefine it so that the support test is not executed again. - return ( this.get = hookFn ).apply( this, arguments ); - } - }; -} - - -var cssPrefixes = [ "Webkit", "Moz", "ms" ], - emptyStyle = document.createElement( "div" ).style, - vendorProps = {}; - -// Return a vendor-prefixed property or undefined -function vendorPropName( name ) { - - // Check for vendor prefixed names - var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), - i = cssPrefixes.length; - - while ( i-- ) { - name = cssPrefixes[ i ] + capName; - if ( name in emptyStyle ) { - return name; - } - } -} - -// Return a potentially-mapped jQuery.cssProps or vendor prefixed property -function finalPropName( name ) { - var final = jQuery.cssProps[ name ] || vendorProps[ name ]; - - if ( final ) { - return final; - } - if ( name in emptyStyle ) { - return name; - } - return vendorProps[ name ] = vendorPropName( name ) || name; -} - - -var - - // Swappable if display is none or starts with table - // except "table", "table-cell", or "table-caption" - // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display - rdisplayswap = /^(none|table(?!-c[ea]).+)/, - cssShow = { position: "absolute", visibility: "hidden", display: "block" }, - cssNormalTransform = { - letterSpacing: "0", - fontWeight: "400" - }; - -function setPositiveNumber( _elem, value, subtract ) { - - // Any relative (+/-) values have already been - // normalized at this point - var matches = rcssNum.exec( value ); - return matches ? - - // Guard against undefined "subtract", e.g., when used as in cssHooks - Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : - value; -} - -function boxModelAdjustment( elem, dimension, box, isBorderBox, styles, computedVal ) { - var i = dimension === "width" ? 1 : 0, - extra = 0, - delta = 0, - marginDelta = 0; - - // Adjustment may not be necessary - if ( box === ( isBorderBox ? "border" : "content" ) ) { - return 0; - } - - for ( ; i < 4; i += 2 ) { - - // Both box models exclude margin - // Count margin delta separately to only add it after scroll gutter adjustment. - // This is needed to make negative margins work with `outerHeight( true )` (gh-3982). - if ( box === "margin" ) { - marginDelta += jQuery.css( elem, box + cssExpand[ i ], true, styles ); - } - - // If we get here with a content-box, we're seeking "padding" or "border" or "margin" - if ( !isBorderBox ) { - - // Add padding - delta += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - - // For "border" or "margin", add border - if ( box !== "padding" ) { - delta += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - - // But still keep track of it otherwise - } else { - extra += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - - // If we get here with a border-box (content + padding + border), we're seeking "content" or - // "padding" or "margin" - } else { - - // For "content", subtract padding - if ( box === "content" ) { - delta -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - } - - // For "content" or "padding", subtract border - if ( box !== "margin" ) { - delta -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } - } - - // Account for positive content-box scroll gutter when requested by providing computedVal - if ( !isBorderBox && computedVal >= 0 ) { - - // offsetWidth/offsetHeight is a rounded sum of content, padding, scroll gutter, and border - // Assuming integer scroll gutter, subtract the rest and round down - delta += Math.max( 0, Math.ceil( - elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - - computedVal - - delta - - extra - - 0.5 - - // If offsetWidth/offsetHeight is unknown, then we can't determine content-box scroll gutter - // Use an explicit zero to avoid NaN (gh-3964) - ) ) || 0; - } - - return delta + marginDelta; -} - -function getWidthOrHeight( elem, dimension, extra ) { - - // Start with computed style - var styles = getStyles( elem ), - - // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-4322). - // Fake content-box until we know it's needed to know the true value. - boxSizingNeeded = !support.boxSizingReliable() || extra, - isBorderBox = boxSizingNeeded && - jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - valueIsBorderBox = isBorderBox, - - val = curCSS( elem, dimension, styles ), - offsetProp = "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ); - - // Support: Firefox <=54 - // Return a confounding non-pixel value or feign ignorance, as appropriate. - if ( rnumnonpx.test( val ) ) { - if ( !extra ) { - return val; - } - val = "auto"; - } - - - // Support: IE 9 - 11 only - // Use offsetWidth/offsetHeight for when box sizing is unreliable. - // In those cases, the computed value can be trusted to be border-box. - if ( ( !support.boxSizingReliable() && isBorderBox || - - // Support: IE 10 - 11+, Edge 15 - 18+ - // IE/Edge misreport `getComputedStyle` of table rows with width/height - // set in CSS while `offset*` properties report correct values. - // Interestingly, in some cases IE 9 doesn't suffer from this issue. - !support.reliableTrDimensions() && nodeName( elem, "tr" ) || - - // Fall back to offsetWidth/offsetHeight when value is "auto" - // This happens for inline elements with no explicit setting (gh-3571) - val === "auto" || - - // Support: Android <=4.1 - 4.3 only - // Also use offsetWidth/offsetHeight for misreported inline dimensions (gh-3602) - !parseFloat( val ) && jQuery.css( elem, "display", false, styles ) === "inline" ) && - - // Make sure the element is visible & connected - elem.getClientRects().length ) { - - isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; - - // Where available, offsetWidth/offsetHeight approximate border box dimensions. - // Where not available (e.g., SVG), assume unreliable box-sizing and interpret the - // retrieved value as a content box dimension. - valueIsBorderBox = offsetProp in elem; - if ( valueIsBorderBox ) { - val = elem[ offsetProp ]; - } - } - - // Normalize "" and auto - val = parseFloat( val ) || 0; - - // Adjust for the element's box model - return ( val + - boxModelAdjustment( - elem, - dimension, - extra || ( isBorderBox ? "border" : "content" ), - valueIsBorderBox, - styles, - - // Provide the current computed size to request scroll gutter calculation (gh-3589) - val - ) - ) + "px"; -} - -jQuery.extend( { - - // Add in style property hooks for overriding the default - // behavior of getting and setting a style property - cssHooks: { - opacity: { - get: function( elem, computed ) { - if ( computed ) { - - // We should always get a number back from opacity - var ret = curCSS( elem, "opacity" ); - return ret === "" ? "1" : ret; - } - } - } - }, - - // Don't automatically add "px" to these possibly-unitless properties - cssNumber: { - animationIterationCount: true, - aspectRatio: true, - borderImageSlice: true, - columnCount: true, - flexGrow: true, - flexShrink: true, - fontWeight: true, - gridArea: true, - gridColumn: true, - gridColumnEnd: true, - gridColumnStart: true, - gridRow: true, - gridRowEnd: true, - gridRowStart: true, - lineHeight: true, - opacity: true, - order: true, - orphans: true, - scale: true, - widows: true, - zIndex: true, - zoom: true, - - // SVG-related - fillOpacity: true, - floodOpacity: true, - stopOpacity: true, - strokeMiterlimit: true, - strokeOpacity: true - }, - - // Add in properties whose names you wish to fix before - // setting or getting the value - cssProps: {}, - - // Get and set the style property on a DOM Node - style: function( elem, name, value, extra ) { - - // Don't set styles on text and comment nodes - if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { - return; - } - - // Make sure that we're working with the right name - var ret, type, hooks, - origName = camelCase( name ), - isCustomProp = rcustomProp.test( name ), - style = elem.style; - - // Make sure that we're working with the right name. We don't - // want to query the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Gets hook for the prefixed version, then unprefixed version - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // Check if we're setting a value - if ( value !== undefined ) { - type = typeof value; - - // Convert "+=" or "-=" to relative numbers (trac-7345) - if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { - value = adjustCSS( elem, name, ret ); - - // Fixes bug trac-9237 - type = "number"; - } - - // Make sure that null and NaN values aren't set (trac-7116) - if ( value == null || value !== value ) { - return; - } - - // If a number was passed in, add the unit (except for certain CSS properties) - // The isCustomProp check can be removed in jQuery 4.0 when we only auto-append - // "px" to a few hardcoded values. - if ( type === "number" && !isCustomProp ) { - value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); - } - - // background-* props affect original clone's values - if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { - style[ name ] = "inherit"; - } - - // If a hook was provided, use that value, otherwise just set the specified value - if ( !hooks || !( "set" in hooks ) || - ( value = hooks.set( elem, value, extra ) ) !== undefined ) { - - if ( isCustomProp ) { - style.setProperty( name, value ); - } else { - style[ name ] = value; - } - } - - } else { - - // If a hook was provided get the non-computed value from there - if ( hooks && "get" in hooks && - ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { - - return ret; - } - - // Otherwise just get the value from the style object - return style[ name ]; - } - }, - - css: function( elem, name, extra, styles ) { - var val, num, hooks, - origName = camelCase( name ), - isCustomProp = rcustomProp.test( name ); - - // Make sure that we're working with the right name. We don't - // want to modify the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Try prefixed name followed by the unprefixed name - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // If a hook was provided get the computed value from there - if ( hooks && "get" in hooks ) { - val = hooks.get( elem, true, extra ); - } - - // Otherwise, if a way to get the computed value exists, use that - if ( val === undefined ) { - val = curCSS( elem, name, styles ); - } - - // Convert "normal" to computed value - if ( val === "normal" && name in cssNormalTransform ) { - val = cssNormalTransform[ name ]; - } - - // Make numeric if forced or a qualifier was provided and val looks numeric - if ( extra === "" || extra ) { - num = parseFloat( val ); - return extra === true || isFinite( num ) ? num || 0 : val; - } - - return val; - } -} ); - -jQuery.each( [ "height", "width" ], function( _i, dimension ) { - jQuery.cssHooks[ dimension ] = { - get: function( elem, computed, extra ) { - if ( computed ) { - - // Certain elements can have dimension info if we invisibly show them - // but it must have a current display style that would benefit - return rdisplayswap.test( jQuery.css( elem, "display" ) ) && - - // Support: Safari 8+ - // Table columns in Safari have non-zero offsetWidth & zero - // getBoundingClientRect().width unless display is changed. - // Support: IE <=11 only - // Running getBoundingClientRect on a disconnected node - // in IE throws an error. - ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? - swap( elem, cssShow, function() { - return getWidthOrHeight( elem, dimension, extra ); - } ) : - getWidthOrHeight( elem, dimension, extra ); - } - }, - - set: function( elem, value, extra ) { - var matches, - styles = getStyles( elem ), - - // Only read styles.position if the test has a chance to fail - // to avoid forcing a reflow. - scrollboxSizeBuggy = !support.scrollboxSize() && - styles.position === "absolute", - - // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-3991) - boxSizingNeeded = scrollboxSizeBuggy || extra, - isBorderBox = boxSizingNeeded && - jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - subtract = extra ? - boxModelAdjustment( - elem, - dimension, - extra, - isBorderBox, - styles - ) : - 0; - - // Account for unreliable border-box dimensions by comparing offset* to computed and - // faking a content-box to get border and padding (gh-3699) - if ( isBorderBox && scrollboxSizeBuggy ) { - subtract -= Math.ceil( - elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - - parseFloat( styles[ dimension ] ) - - boxModelAdjustment( elem, dimension, "border", false, styles ) - - 0.5 - ); - } - - // Convert to pixels if value adjustment is needed - if ( subtract && ( matches = rcssNum.exec( value ) ) && - ( matches[ 3 ] || "px" ) !== "px" ) { - - elem.style[ dimension ] = value; - value = jQuery.css( elem, dimension ); - } - - return setPositiveNumber( elem, value, subtract ); - } - }; -} ); - -jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, - function( elem, computed ) { - if ( computed ) { - return ( parseFloat( curCSS( elem, "marginLeft" ) ) || - elem.getBoundingClientRect().left - - swap( elem, { marginLeft: 0 }, function() { - return elem.getBoundingClientRect().left; - } ) - ) + "px"; - } - } -); - -// These hooks are used by animate to expand properties -jQuery.each( { - margin: "", - padding: "", - border: "Width" -}, function( prefix, suffix ) { - jQuery.cssHooks[ prefix + suffix ] = { - expand: function( value ) { - var i = 0, - expanded = {}, - - // Assumes a single number if not a string - parts = typeof value === "string" ? value.split( " " ) : [ value ]; - - for ( ; i < 4; i++ ) { - expanded[ prefix + cssExpand[ i ] + suffix ] = - parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; - } - - return expanded; - } - }; - - if ( prefix !== "margin" ) { - jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; - } -} ); - -jQuery.fn.extend( { - css: function( name, value ) { - return access( this, function( elem, name, value ) { - var styles, len, - map = {}, - i = 0; - - if ( Array.isArray( name ) ) { - styles = getStyles( elem ); - len = name.length; - - for ( ; i < len; i++ ) { - map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); - } - - return map; - } - - return value !== undefined ? - jQuery.style( elem, name, value ) : - jQuery.css( elem, name ); - }, name, value, arguments.length > 1 ); - } -} ); - - -function Tween( elem, options, prop, end, easing ) { - return new Tween.prototype.init( elem, options, prop, end, easing ); -} -jQuery.Tween = Tween; - -Tween.prototype = { - constructor: Tween, - init: function( elem, options, prop, end, easing, unit ) { - this.elem = elem; - this.prop = prop; - this.easing = easing || jQuery.easing._default; - this.options = options; - this.start = this.now = this.cur(); - this.end = end; - this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); - }, - cur: function() { - var hooks = Tween.propHooks[ this.prop ]; - - return hooks && hooks.get ? - hooks.get( this ) : - Tween.propHooks._default.get( this ); - }, - run: function( percent ) { - var eased, - hooks = Tween.propHooks[ this.prop ]; - - if ( this.options.duration ) { - this.pos = eased = jQuery.easing[ this.easing ]( - percent, this.options.duration * percent, 0, 1, this.options.duration - ); - } else { - this.pos = eased = percent; - } - this.now = ( this.end - this.start ) * eased + this.start; - - if ( this.options.step ) { - this.options.step.call( this.elem, this.now, this ); - } - - if ( hooks && hooks.set ) { - hooks.set( this ); - } else { - Tween.propHooks._default.set( this ); - } - return this; - } -}; - -Tween.prototype.init.prototype = Tween.prototype; - -Tween.propHooks = { - _default: { - get: function( tween ) { - var result; - - // Use a property on the element directly when it is not a DOM element, - // or when there is no matching style property that exists. - if ( tween.elem.nodeType !== 1 || - tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { - return tween.elem[ tween.prop ]; - } - - // Passing an empty string as a 3rd parameter to .css will automatically - // attempt a parseFloat and fallback to a string if the parse fails. - // Simple values such as "10px" are parsed to Float; - // complex values such as "rotate(1rad)" are returned as-is. - result = jQuery.css( tween.elem, tween.prop, "" ); - - // Empty strings, null, undefined and "auto" are converted to 0. - return !result || result === "auto" ? 0 : result; - }, - set: function( tween ) { - - // Use step hook for back compat. - // Use cssHook if its there. - // Use .style if available and use plain properties where available. - if ( jQuery.fx.step[ tween.prop ] ) { - jQuery.fx.step[ tween.prop ]( tween ); - } else if ( tween.elem.nodeType === 1 && ( - jQuery.cssHooks[ tween.prop ] || - tween.elem.style[ finalPropName( tween.prop ) ] != null ) ) { - jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); - } else { - tween.elem[ tween.prop ] = tween.now; - } - } - } -}; - -// Support: IE <=9 only -// Panic based approach to setting things on disconnected nodes -Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { - set: function( tween ) { - if ( tween.elem.nodeType && tween.elem.parentNode ) { - tween.elem[ tween.prop ] = tween.now; - } - } -}; - -jQuery.easing = { - linear: function( p ) { - return p; - }, - swing: function( p ) { - return 0.5 - Math.cos( p * Math.PI ) / 2; - }, - _default: "swing" -}; - -jQuery.fx = Tween.prototype.init; - -// Back compat <1.8 extension point -jQuery.fx.step = {}; - - - - -var - fxNow, inProgress, - rfxtypes = /^(?:toggle|show|hide)$/, - rrun = /queueHooks$/; - -function schedule() { - if ( inProgress ) { - if ( document.hidden === false && window.requestAnimationFrame ) { - window.requestAnimationFrame( schedule ); - } else { - window.setTimeout( schedule, jQuery.fx.interval ); - } - - jQuery.fx.tick(); - } -} - -// Animations created synchronously will run synchronously -function createFxNow() { - window.setTimeout( function() { - fxNow = undefined; - } ); - return ( fxNow = Date.now() ); -} - -// Generate parameters to create a standard animation -function genFx( type, includeWidth ) { - var which, - i = 0, - attrs = { height: type }; - - // If we include width, step value is 1 to do all cssExpand values, - // otherwise step value is 2 to skip over Left and Right - includeWidth = includeWidth ? 1 : 0; - for ( ; i < 4; i += 2 - includeWidth ) { - which = cssExpand[ i ]; - attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; - } - - if ( includeWidth ) { - attrs.opacity = attrs.width = type; - } - - return attrs; -} - -function createTween( value, prop, animation ) { - var tween, - collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), - index = 0, - length = collection.length; - for ( ; index < length; index++ ) { - if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { - - // We're done with this property - return tween; - } - } -} - -function defaultPrefilter( elem, props, opts ) { - var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, - isBox = "width" in props || "height" in props, - anim = this, - orig = {}, - style = elem.style, - hidden = elem.nodeType && isHiddenWithinTree( elem ), - dataShow = dataPriv.get( elem, "fxshow" ); - - // Queue-skipping animations hijack the fx hooks - if ( !opts.queue ) { - hooks = jQuery._queueHooks( elem, "fx" ); - if ( hooks.unqueued == null ) { - hooks.unqueued = 0; - oldfire = hooks.empty.fire; - hooks.empty.fire = function() { - if ( !hooks.unqueued ) { - oldfire(); - } - }; - } - hooks.unqueued++; - - anim.always( function() { - - // Ensure the complete handler is called before this completes - anim.always( function() { - hooks.unqueued--; - if ( !jQuery.queue( elem, "fx" ).length ) { - hooks.empty.fire(); - } - } ); - } ); - } - - // Detect show/hide animations - for ( prop in props ) { - value = props[ prop ]; - if ( rfxtypes.test( value ) ) { - delete props[ prop ]; - toggle = toggle || value === "toggle"; - if ( value === ( hidden ? "hide" : "show" ) ) { - - // Pretend to be hidden if this is a "show" and - // there is still data from a stopped show/hide - if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { - hidden = true; - - // Ignore all other no-op show/hide data - } else { - continue; - } - } - orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); - } - } - - // Bail out if this is a no-op like .hide().hide() - propTween = !jQuery.isEmptyObject( props ); - if ( !propTween && jQuery.isEmptyObject( orig ) ) { - return; - } - - // Restrict "overflow" and "display" styles during box animations - if ( isBox && elem.nodeType === 1 ) { - - // Support: IE <=9 - 11, Edge 12 - 15 - // Record all 3 overflow attributes because IE does not infer the shorthand - // from identically-valued overflowX and overflowY and Edge just mirrors - // the overflowX value there. - opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; - - // Identify a display type, preferring old show/hide data over the CSS cascade - restoreDisplay = dataShow && dataShow.display; - if ( restoreDisplay == null ) { - restoreDisplay = dataPriv.get( elem, "display" ); - } - display = jQuery.css( elem, "display" ); - if ( display === "none" ) { - if ( restoreDisplay ) { - display = restoreDisplay; - } else { - - // Get nonempty value(s) by temporarily forcing visibility - showHide( [ elem ], true ); - restoreDisplay = elem.style.display || restoreDisplay; - display = jQuery.css( elem, "display" ); - showHide( [ elem ] ); - } - } - - // Animate inline elements as inline-block - if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { - if ( jQuery.css( elem, "float" ) === "none" ) { - - // Restore the original display value at the end of pure show/hide animations - if ( !propTween ) { - anim.done( function() { - style.display = restoreDisplay; - } ); - if ( restoreDisplay == null ) { - display = style.display; - restoreDisplay = display === "none" ? "" : display; - } - } - style.display = "inline-block"; - } - } - } - - if ( opts.overflow ) { - style.overflow = "hidden"; - anim.always( function() { - style.overflow = opts.overflow[ 0 ]; - style.overflowX = opts.overflow[ 1 ]; - style.overflowY = opts.overflow[ 2 ]; - } ); - } - - // Implement show/hide animations - propTween = false; - for ( prop in orig ) { - - // General show/hide setup for this element animation - if ( !propTween ) { - if ( dataShow ) { - if ( "hidden" in dataShow ) { - hidden = dataShow.hidden; - } - } else { - dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); - } - - // Store hidden/visible for toggle so `.stop().toggle()` "reverses" - if ( toggle ) { - dataShow.hidden = !hidden; - } - - // Show elements before animating them - if ( hidden ) { - showHide( [ elem ], true ); - } - - /* eslint-disable no-loop-func */ - - anim.done( function() { - - /* eslint-enable no-loop-func */ - - // The final step of a "hide" animation is actually hiding the element - if ( !hidden ) { - showHide( [ elem ] ); - } - dataPriv.remove( elem, "fxshow" ); - for ( prop in orig ) { - jQuery.style( elem, prop, orig[ prop ] ); - } - } ); - } - - // Per-property setup - propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); - if ( !( prop in dataShow ) ) { - dataShow[ prop ] = propTween.start; - if ( hidden ) { - propTween.end = propTween.start; - propTween.start = 0; - } - } - } -} - -function propFilter( props, specialEasing ) { - var index, name, easing, value, hooks; - - // camelCase, specialEasing and expand cssHook pass - for ( index in props ) { - name = camelCase( index ); - easing = specialEasing[ name ]; - value = props[ index ]; - if ( Array.isArray( value ) ) { - easing = value[ 1 ]; - value = props[ index ] = value[ 0 ]; - } - - if ( index !== name ) { - props[ name ] = value; - delete props[ index ]; - } - - hooks = jQuery.cssHooks[ name ]; - if ( hooks && "expand" in hooks ) { - value = hooks.expand( value ); - delete props[ name ]; - - // Not quite $.extend, this won't overwrite existing keys. - // Reusing 'index' because we have the correct "name" - for ( index in value ) { - if ( !( index in props ) ) { - props[ index ] = value[ index ]; - specialEasing[ index ] = easing; - } - } - } else { - specialEasing[ name ] = easing; - } - } -} - -function Animation( elem, properties, options ) { - var result, - stopped, - index = 0, - length = Animation.prefilters.length, - deferred = jQuery.Deferred().always( function() { - - // Don't match elem in the :animated selector - delete tick.elem; - } ), - tick = function() { - if ( stopped ) { - return false; - } - var currentTime = fxNow || createFxNow(), - remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), - - // Support: Android 2.3 only - // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (trac-12497) - temp = remaining / animation.duration || 0, - percent = 1 - temp, - index = 0, - length = animation.tweens.length; - - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( percent ); - } - - deferred.notifyWith( elem, [ animation, percent, remaining ] ); - - // If there's more to do, yield - if ( percent < 1 && length ) { - return remaining; - } - - // If this was an empty animation, synthesize a final progress notification - if ( !length ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - } - - // Resolve the animation and report its conclusion - deferred.resolveWith( elem, [ animation ] ); - return false; - }, - animation = deferred.promise( { - elem: elem, - props: jQuery.extend( {}, properties ), - opts: jQuery.extend( true, { - specialEasing: {}, - easing: jQuery.easing._default - }, options ), - originalProperties: properties, - originalOptions: options, - startTime: fxNow || createFxNow(), - duration: options.duration, - tweens: [], - createTween: function( prop, end ) { - var tween = jQuery.Tween( elem, animation.opts, prop, end, - animation.opts.specialEasing[ prop ] || animation.opts.easing ); - animation.tweens.push( tween ); - return tween; - }, - stop: function( gotoEnd ) { - var index = 0, - - // If we are going to the end, we want to run all the tweens - // otherwise we skip this part - length = gotoEnd ? animation.tweens.length : 0; - if ( stopped ) { - return this; - } - stopped = true; - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( 1 ); - } - - // Resolve when we played the last frame; otherwise, reject - if ( gotoEnd ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - deferred.resolveWith( elem, [ animation, gotoEnd ] ); - } else { - deferred.rejectWith( elem, [ animation, gotoEnd ] ); - } - return this; - } - } ), - props = animation.props; - - propFilter( props, animation.opts.specialEasing ); - - for ( ; index < length; index++ ) { - result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); - if ( result ) { - if ( isFunction( result.stop ) ) { - jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = - result.stop.bind( result ); - } - return result; - } - } - - jQuery.map( props, createTween, animation ); - - if ( isFunction( animation.opts.start ) ) { - animation.opts.start.call( elem, animation ); - } - - // Attach callbacks from options - animation - .progress( animation.opts.progress ) - .done( animation.opts.done, animation.opts.complete ) - .fail( animation.opts.fail ) - .always( animation.opts.always ); - - jQuery.fx.timer( - jQuery.extend( tick, { - elem: elem, - anim: animation, - queue: animation.opts.queue - } ) - ); - - return animation; -} - -jQuery.Animation = jQuery.extend( Animation, { - - tweeners: { - "*": [ function( prop, value ) { - var tween = this.createTween( prop, value ); - adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); - return tween; - } ] - }, - - tweener: function( props, callback ) { - if ( isFunction( props ) ) { - callback = props; - props = [ "*" ]; - } else { - props = props.match( rnothtmlwhite ); - } - - var prop, - index = 0, - length = props.length; - - for ( ; index < length; index++ ) { - prop = props[ index ]; - Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; - Animation.tweeners[ prop ].unshift( callback ); - } - }, - - prefilters: [ defaultPrefilter ], - - prefilter: function( callback, prepend ) { - if ( prepend ) { - Animation.prefilters.unshift( callback ); - } else { - Animation.prefilters.push( callback ); - } - } -} ); - -jQuery.speed = function( speed, easing, fn ) { - var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { - complete: fn || !fn && easing || - isFunction( speed ) && speed, - duration: speed, - easing: fn && easing || easing && !isFunction( easing ) && easing - }; - - // Go to the end state if fx are off - if ( jQuery.fx.off ) { - opt.duration = 0; - - } else { - if ( typeof opt.duration !== "number" ) { - if ( opt.duration in jQuery.fx.speeds ) { - opt.duration = jQuery.fx.speeds[ opt.duration ]; - - } else { - opt.duration = jQuery.fx.speeds._default; - } - } - } - - // Normalize opt.queue - true/undefined/null -> "fx" - if ( opt.queue == null || opt.queue === true ) { - opt.queue = "fx"; - } - - // Queueing - opt.old = opt.complete; - - opt.complete = function() { - if ( isFunction( opt.old ) ) { - opt.old.call( this ); - } - - if ( opt.queue ) { - jQuery.dequeue( this, opt.queue ); - } - }; - - return opt; -}; - -jQuery.fn.extend( { - fadeTo: function( speed, to, easing, callback ) { - - // Show any hidden elements after setting opacity to 0 - return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() - - // Animate to the value specified - .end().animate( { opacity: to }, speed, easing, callback ); - }, - animate: function( prop, speed, easing, callback ) { - var empty = jQuery.isEmptyObject( prop ), - optall = jQuery.speed( speed, easing, callback ), - doAnimation = function() { - - // Operate on a copy of prop so per-property easing won't be lost - var anim = Animation( this, jQuery.extend( {}, prop ), optall ); - - // Empty animations, or finishing resolves immediately - if ( empty || dataPriv.get( this, "finish" ) ) { - anim.stop( true ); - } - }; - - doAnimation.finish = doAnimation; - - return empty || optall.queue === false ? - this.each( doAnimation ) : - this.queue( optall.queue, doAnimation ); - }, - stop: function( type, clearQueue, gotoEnd ) { - var stopQueue = function( hooks ) { - var stop = hooks.stop; - delete hooks.stop; - stop( gotoEnd ); - }; - - if ( typeof type !== "string" ) { - gotoEnd = clearQueue; - clearQueue = type; - type = undefined; - } - if ( clearQueue ) { - this.queue( type || "fx", [] ); - } - - return this.each( function() { - var dequeue = true, - index = type != null && type + "queueHooks", - timers = jQuery.timers, - data = dataPriv.get( this ); - - if ( index ) { - if ( data[ index ] && data[ index ].stop ) { - stopQueue( data[ index ] ); - } - } else { - for ( index in data ) { - if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { - stopQueue( data[ index ] ); - } - } - } - - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && - ( type == null || timers[ index ].queue === type ) ) { - - timers[ index ].anim.stop( gotoEnd ); - dequeue = false; - timers.splice( index, 1 ); - } - } - - // Start the next in the queue if the last step wasn't forced. - // Timers currently will call their complete callbacks, which - // will dequeue but only if they were gotoEnd. - if ( dequeue || !gotoEnd ) { - jQuery.dequeue( this, type ); - } - } ); - }, - finish: function( type ) { - if ( type !== false ) { - type = type || "fx"; - } - return this.each( function() { - var index, - data = dataPriv.get( this ), - queue = data[ type + "queue" ], - hooks = data[ type + "queueHooks" ], - timers = jQuery.timers, - length = queue ? queue.length : 0; - - // Enable finishing flag on private data - data.finish = true; - - // Empty the queue first - jQuery.queue( this, type, [] ); - - if ( hooks && hooks.stop ) { - hooks.stop.call( this, true ); - } - - // Look for any active animations, and finish them - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && timers[ index ].queue === type ) { - timers[ index ].anim.stop( true ); - timers.splice( index, 1 ); - } - } - - // Look for any animations in the old queue and finish them - for ( index = 0; index < length; index++ ) { - if ( queue[ index ] && queue[ index ].finish ) { - queue[ index ].finish.call( this ); - } - } - - // Turn off finishing flag - delete data.finish; - } ); - } -} ); - -jQuery.each( [ "toggle", "show", "hide" ], function( _i, name ) { - var cssFn = jQuery.fn[ name ]; - jQuery.fn[ name ] = function( speed, easing, callback ) { - return speed == null || typeof speed === "boolean" ? - cssFn.apply( this, arguments ) : - this.animate( genFx( name, true ), speed, easing, callback ); - }; -} ); - -// Generate shortcuts for custom animations -jQuery.each( { - slideDown: genFx( "show" ), - slideUp: genFx( "hide" ), - slideToggle: genFx( "toggle" ), - fadeIn: { opacity: "show" }, - fadeOut: { opacity: "hide" }, - fadeToggle: { opacity: "toggle" } -}, function( name, props ) { - jQuery.fn[ name ] = function( speed, easing, callback ) { - return this.animate( props, speed, easing, callback ); - }; -} ); - -jQuery.timers = []; -jQuery.fx.tick = function() { - var timer, - i = 0, - timers = jQuery.timers; - - fxNow = Date.now(); - - for ( ; i < timers.length; i++ ) { - timer = timers[ i ]; - - // Run the timer and safely remove it when done (allowing for external removal) - if ( !timer() && timers[ i ] === timer ) { - timers.splice( i--, 1 ); - } - } - - if ( !timers.length ) { - jQuery.fx.stop(); - } - fxNow = undefined; -}; - -jQuery.fx.timer = function( timer ) { - jQuery.timers.push( timer ); - jQuery.fx.start(); -}; - -jQuery.fx.interval = 13; -jQuery.fx.start = function() { - if ( inProgress ) { - return; - } - - inProgress = true; - schedule(); -}; - -jQuery.fx.stop = function() { - inProgress = null; -}; - -jQuery.fx.speeds = { - slow: 600, - fast: 200, - - // Default speed - _default: 400 -}; - - -// Based off of the plugin by Clint Helfers, with permission. -jQuery.fn.delay = function( time, type ) { - time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; - type = type || "fx"; - - return this.queue( type, function( next, hooks ) { - var timeout = window.setTimeout( next, time ); - hooks.stop = function() { - window.clearTimeout( timeout ); - }; - } ); -}; - - -( function() { - var input = document.createElement( "input" ), - select = document.createElement( "select" ), - opt = select.appendChild( document.createElement( "option" ) ); - - input.type = "checkbox"; - - // Support: Android <=4.3 only - // Default value for a checkbox should be "on" - support.checkOn = input.value !== ""; - - // Support: IE <=11 only - // Must access selectedIndex to make default options select - support.optSelected = opt.selected; - - // Support: IE <=11 only - // An input loses its value after becoming a radio - input = document.createElement( "input" ); - input.value = "t"; - input.type = "radio"; - support.radioValue = input.value === "t"; -} )(); - - -var boolHook, - attrHandle = jQuery.expr.attrHandle; - -jQuery.fn.extend( { - attr: function( name, value ) { - return access( this, jQuery.attr, name, value, arguments.length > 1 ); - }, - - removeAttr: function( name ) { - return this.each( function() { - jQuery.removeAttr( this, name ); - } ); - } -} ); - -jQuery.extend( { - attr: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set attributes on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - // Fallback to prop when attributes are not supported - if ( typeof elem.getAttribute === "undefined" ) { - return jQuery.prop( elem, name, value ); - } - - // Attribute hooks are determined by the lowercase version - // Grab necessary hook if one is defined - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - hooks = jQuery.attrHooks[ name.toLowerCase() ] || - ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); - } - - if ( value !== undefined ) { - if ( value === null ) { - jQuery.removeAttr( elem, name ); - return; - } - - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - elem.setAttribute( name, value + "" ); - return value; - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - ret = jQuery.find.attr( elem, name ); - - // Non-existent attributes return null, we normalize to undefined - return ret == null ? undefined : ret; - }, - - attrHooks: { - type: { - set: function( elem, value ) { - if ( !support.radioValue && value === "radio" && - nodeName( elem, "input" ) ) { - var val = elem.value; - elem.setAttribute( "type", value ); - if ( val ) { - elem.value = val; - } - return value; - } - } - } - }, - - removeAttr: function( elem, value ) { - var name, - i = 0, - - // Attribute names can contain non-HTML whitespace characters - // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 - attrNames = value && value.match( rnothtmlwhite ); - - if ( attrNames && elem.nodeType === 1 ) { - while ( ( name = attrNames[ i++ ] ) ) { - elem.removeAttribute( name ); - } - } - } -} ); - -// Hooks for boolean attributes -boolHook = { - set: function( elem, value, name ) { - if ( value === false ) { - - // Remove boolean attributes when set to false - jQuery.removeAttr( elem, name ); - } else { - elem.setAttribute( name, name ); - } - return name; - } -}; - -jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( _i, name ) { - var getter = attrHandle[ name ] || jQuery.find.attr; - - attrHandle[ name ] = function( elem, name, isXML ) { - var ret, handle, - lowercaseName = name.toLowerCase(); - - if ( !isXML ) { - - // Avoid an infinite loop by temporarily removing this function from the getter - handle = attrHandle[ lowercaseName ]; - attrHandle[ lowercaseName ] = ret; - ret = getter( elem, name, isXML ) != null ? - lowercaseName : - null; - attrHandle[ lowercaseName ] = handle; - } - return ret; - }; -} ); - - - - -var rfocusable = /^(?:input|select|textarea|button)$/i, - rclickable = /^(?:a|area)$/i; - -jQuery.fn.extend( { - prop: function( name, value ) { - return access( this, jQuery.prop, name, value, arguments.length > 1 ); - }, - - removeProp: function( name ) { - return this.each( function() { - delete this[ jQuery.propFix[ name ] || name ]; - } ); - } -} ); - -jQuery.extend( { - prop: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set properties on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - - // Fix name and attach hooks - name = jQuery.propFix[ name ] || name; - hooks = jQuery.propHooks[ name ]; - } - - if ( value !== undefined ) { - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - return ( elem[ name ] = value ); - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - return elem[ name ]; - }, - - propHooks: { - tabIndex: { - get: function( elem ) { - - // Support: IE <=9 - 11 only - // elem.tabIndex doesn't always return the - // correct value when it hasn't been explicitly set - // Use proper attribute retrieval (trac-12072) - var tabindex = jQuery.find.attr( elem, "tabindex" ); - - if ( tabindex ) { - return parseInt( tabindex, 10 ); - } - - if ( - rfocusable.test( elem.nodeName ) || - rclickable.test( elem.nodeName ) && - elem.href - ) { - return 0; - } - - return -1; - } - } - }, - - propFix: { - "for": "htmlFor", - "class": "className" - } -} ); - -// Support: IE <=11 only -// Accessing the selectedIndex property -// forces the browser to respect setting selected -// on the option -// The getter ensures a default option is selected -// when in an optgroup -// eslint rule "no-unused-expressions" is disabled for this code -// since it considers such accessions noop -if ( !support.optSelected ) { - jQuery.propHooks.selected = { - get: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent && parent.parentNode ) { - parent.parentNode.selectedIndex; - } - return null; - }, - set: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent ) { - parent.selectedIndex; - - if ( parent.parentNode ) { - parent.parentNode.selectedIndex; - } - } - } - }; -} - -jQuery.each( [ - "tabIndex", - "readOnly", - "maxLength", - "cellSpacing", - "cellPadding", - "rowSpan", - "colSpan", - "useMap", - "frameBorder", - "contentEditable" -], function() { - jQuery.propFix[ this.toLowerCase() ] = this; -} ); - - - - - // Strip and collapse whitespace according to HTML spec - // https://infra.spec.whatwg.org/#strip-and-collapse-ascii-whitespace - function stripAndCollapse( value ) { - var tokens = value.match( rnothtmlwhite ) || []; - return tokens.join( " " ); - } - - -function getClass( elem ) { - return elem.getAttribute && elem.getAttribute( "class" ) || ""; -} - -function classesToArray( value ) { - if ( Array.isArray( value ) ) { - return value; - } - if ( typeof value === "string" ) { - return value.match( rnothtmlwhite ) || []; - } - return []; -} - -jQuery.fn.extend( { - addClass: function( value ) { - var classNames, cur, curValue, className, i, finalValue; - - if ( isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - classNames = classesToArray( value ); - - if ( classNames.length ) { - return this.each( function() { - curValue = getClass( this ); - cur = this.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - for ( i = 0; i < classNames.length; i++ ) { - className = classNames[ i ]; - if ( cur.indexOf( " " + className + " " ) < 0 ) { - cur += className + " "; - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - this.setAttribute( "class", finalValue ); - } - } - } ); - } - - return this; - }, - - removeClass: function( value ) { - var classNames, cur, curValue, className, i, finalValue; - - if ( isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - if ( !arguments.length ) { - return this.attr( "class", "" ); - } - - classNames = classesToArray( value ); - - if ( classNames.length ) { - return this.each( function() { - curValue = getClass( this ); - - // This expression is here for better compressibility (see addClass) - cur = this.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - for ( i = 0; i < classNames.length; i++ ) { - className = classNames[ i ]; - - // Remove *all* instances - while ( cur.indexOf( " " + className + " " ) > -1 ) { - cur = cur.replace( " " + className + " ", " " ); - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - this.setAttribute( "class", finalValue ); - } - } - } ); - } - - return this; - }, - - toggleClass: function( value, stateVal ) { - var classNames, className, i, self, - type = typeof value, - isValidValue = type === "string" || Array.isArray( value ); - - if ( isFunction( value ) ) { - return this.each( function( i ) { - jQuery( this ).toggleClass( - value.call( this, i, getClass( this ), stateVal ), - stateVal - ); - } ); - } - - if ( typeof stateVal === "boolean" && isValidValue ) { - return stateVal ? this.addClass( value ) : this.removeClass( value ); - } - - classNames = classesToArray( value ); - - return this.each( function() { - if ( isValidValue ) { - - // Toggle individual class names - self = jQuery( this ); - - for ( i = 0; i < classNames.length; i++ ) { - className = classNames[ i ]; - - // Check each className given, space separated list - if ( self.hasClass( className ) ) { - self.removeClass( className ); - } else { - self.addClass( className ); - } - } - - // Toggle whole class name - } else if ( value === undefined || type === "boolean" ) { - className = getClass( this ); - if ( className ) { - - // Store className if set - dataPriv.set( this, "__className__", className ); - } - - // If the element has a class name or if we're passed `false`, - // then remove the whole classname (if there was one, the above saved it). - // Otherwise bring back whatever was previously saved (if anything), - // falling back to the empty string if nothing was stored. - if ( this.setAttribute ) { - this.setAttribute( "class", - className || value === false ? - "" : - dataPriv.get( this, "__className__" ) || "" - ); - } - } - } ); - }, - - hasClass: function( selector ) { - var className, elem, - i = 0; - - className = " " + selector + " "; - while ( ( elem = this[ i++ ] ) ) { - if ( elem.nodeType === 1 && - ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { - return true; - } - } - - return false; - } -} ); - - - - -var rreturn = /\r/g; - -jQuery.fn.extend( { - val: function( value ) { - var hooks, ret, valueIsFunction, - elem = this[ 0 ]; - - if ( !arguments.length ) { - if ( elem ) { - hooks = jQuery.valHooks[ elem.type ] || - jQuery.valHooks[ elem.nodeName.toLowerCase() ]; - - if ( hooks && - "get" in hooks && - ( ret = hooks.get( elem, "value" ) ) !== undefined - ) { - return ret; - } - - ret = elem.value; - - // Handle most common string cases - if ( typeof ret === "string" ) { - return ret.replace( rreturn, "" ); - } - - // Handle cases where value is null/undef or number - return ret == null ? "" : ret; - } - - return; - } - - valueIsFunction = isFunction( value ); - - return this.each( function( i ) { - var val; - - if ( this.nodeType !== 1 ) { - return; - } - - if ( valueIsFunction ) { - val = value.call( this, i, jQuery( this ).val() ); - } else { - val = value; - } - - // Treat null/undefined as ""; convert numbers to string - if ( val == null ) { - val = ""; - - } else if ( typeof val === "number" ) { - val += ""; - - } else if ( Array.isArray( val ) ) { - val = jQuery.map( val, function( value ) { - return value == null ? "" : value + ""; - } ); - } - - hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; - - // If set returns undefined, fall back to normal setting - if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { - this.value = val; - } - } ); - } -} ); - -jQuery.extend( { - valHooks: { - option: { - get: function( elem ) { - - var val = jQuery.find.attr( elem, "value" ); - return val != null ? - val : - - // Support: IE <=10 - 11 only - // option.text throws exceptions (trac-14686, trac-14858) - // Strip and collapse whitespace - // https://html.spec.whatwg.org/#strip-and-collapse-whitespace - stripAndCollapse( jQuery.text( elem ) ); - } - }, - select: { - get: function( elem ) { - var value, option, i, - options = elem.options, - index = elem.selectedIndex, - one = elem.type === "select-one", - values = one ? null : [], - max = one ? index + 1 : options.length; - - if ( index < 0 ) { - i = max; - - } else { - i = one ? index : 0; - } - - // Loop through all the selected options - for ( ; i < max; i++ ) { - option = options[ i ]; - - // Support: IE <=9 only - // IE8-9 doesn't update selected after form reset (trac-2551) - if ( ( option.selected || i === index ) && - - // Don't return options that are disabled or in a disabled optgroup - !option.disabled && - ( !option.parentNode.disabled || - !nodeName( option.parentNode, "optgroup" ) ) ) { - - // Get the specific value for the option - value = jQuery( option ).val(); - - // We don't need an array for one selects - if ( one ) { - return value; - } - - // Multi-Selects return an array - values.push( value ); - } - } - - return values; - }, - - set: function( elem, value ) { - var optionSet, option, - options = elem.options, - values = jQuery.makeArray( value ), - i = options.length; - - while ( i-- ) { - option = options[ i ]; - - /* eslint-disable no-cond-assign */ - - if ( option.selected = - jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 - ) { - optionSet = true; - } - - /* eslint-enable no-cond-assign */ - } - - // Force browsers to behave consistently when non-matching value is set - if ( !optionSet ) { - elem.selectedIndex = -1; - } - return values; - } - } - } -} ); - -// Radios and checkboxes getter/setter -jQuery.each( [ "radio", "checkbox" ], function() { - jQuery.valHooks[ this ] = { - set: function( elem, value ) { - if ( Array.isArray( value ) ) { - return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); - } - } - }; - if ( !support.checkOn ) { - jQuery.valHooks[ this ].get = function( elem ) { - return elem.getAttribute( "value" ) === null ? "on" : elem.value; - }; - } -} ); - - - - -// Return jQuery for attributes-only inclusion -var location = window.location; - -var nonce = { guid: Date.now() }; - -var rquery = ( /\?/ ); - - - -// Cross-browser xml parsing -jQuery.parseXML = function( data ) { - var xml, parserErrorElem; - if ( !data || typeof data !== "string" ) { - return null; - } - - // Support: IE 9 - 11 only - // IE throws on parseFromString with invalid input. - try { - xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); - } catch ( e ) {} - - parserErrorElem = xml && xml.getElementsByTagName( "parsererror" )[ 0 ]; - if ( !xml || parserErrorElem ) { - jQuery.error( "Invalid XML: " + ( - parserErrorElem ? - jQuery.map( parserErrorElem.childNodes, function( el ) { - return el.textContent; - } ).join( "\n" ) : - data - ) ); - } - return xml; -}; - - -var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, - stopPropagationCallback = function( e ) { - e.stopPropagation(); - }; - -jQuery.extend( jQuery.event, { - - trigger: function( event, data, elem, onlyHandlers ) { - - var i, cur, tmp, bubbleType, ontype, handle, special, lastElement, - eventPath = [ elem || document ], - type = hasOwn.call( event, "type" ) ? event.type : event, - namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; - - cur = lastElement = tmp = elem = elem || document; - - // Don't do events on text and comment nodes - if ( elem.nodeType === 3 || elem.nodeType === 8 ) { - return; - } - - // focus/blur morphs to focusin/out; ensure we're not firing them right now - if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { - return; - } - - if ( type.indexOf( "." ) > -1 ) { - - // Namespaced trigger; create a regexp to match event type in handle() - namespaces = type.split( "." ); - type = namespaces.shift(); - namespaces.sort(); - } - ontype = type.indexOf( ":" ) < 0 && "on" + type; - - // Caller can pass in a jQuery.Event object, Object, or just an event type string - event = event[ jQuery.expando ] ? - event : - new jQuery.Event( type, typeof event === "object" && event ); - - // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) - event.isTrigger = onlyHandlers ? 2 : 3; - event.namespace = namespaces.join( "." ); - event.rnamespace = event.namespace ? - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : - null; - - // Clean up the event in case it is being reused - event.result = undefined; - if ( !event.target ) { - event.target = elem; - } - - // Clone any incoming data and prepend the event, creating the handler arg list - data = data == null ? - [ event ] : - jQuery.makeArray( data, [ event ] ); - - // Allow special events to draw outside the lines - special = jQuery.event.special[ type ] || {}; - if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { - return; - } - - // Determine event propagation path in advance, per W3C events spec (trac-9951) - // Bubble up to document, then to window; watch for a global ownerDocument var (trac-9724) - if ( !onlyHandlers && !special.noBubble && !isWindow( elem ) ) { - - bubbleType = special.delegateType || type; - if ( !rfocusMorph.test( bubbleType + type ) ) { - cur = cur.parentNode; - } - for ( ; cur; cur = cur.parentNode ) { - eventPath.push( cur ); - tmp = cur; - } - - // Only add window if we got to document (e.g., not plain obj or detached DOM) - if ( tmp === ( elem.ownerDocument || document ) ) { - eventPath.push( tmp.defaultView || tmp.parentWindow || window ); - } - } - - // Fire handlers on the event path - i = 0; - while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { - lastElement = cur; - event.type = i > 1 ? - bubbleType : - special.bindType || type; - - // jQuery handler - handle = ( dataPriv.get( cur, "events" ) || Object.create( null ) )[ event.type ] && - dataPriv.get( cur, "handle" ); - if ( handle ) { - handle.apply( cur, data ); - } - - // Native handler - handle = ontype && cur[ ontype ]; - if ( handle && handle.apply && acceptData( cur ) ) { - event.result = handle.apply( cur, data ); - if ( event.result === false ) { - event.preventDefault(); - } - } - } - event.type = type; - - // If nobody prevented the default action, do it now - if ( !onlyHandlers && !event.isDefaultPrevented() ) { - - if ( ( !special._default || - special._default.apply( eventPath.pop(), data ) === false ) && - acceptData( elem ) ) { - - // Call a native DOM method on the target with the same name as the event. - // Don't do default actions on window, that's where global variables be (trac-6170) - if ( ontype && isFunction( elem[ type ] ) && !isWindow( elem ) ) { - - // Don't re-trigger an onFOO event when we call its FOO() method - tmp = elem[ ontype ]; - - if ( tmp ) { - elem[ ontype ] = null; - } - - // Prevent re-triggering of the same event, since we already bubbled it above - jQuery.event.triggered = type; - - if ( event.isPropagationStopped() ) { - lastElement.addEventListener( type, stopPropagationCallback ); - } - - elem[ type ](); - - if ( event.isPropagationStopped() ) { - lastElement.removeEventListener( type, stopPropagationCallback ); - } - - jQuery.event.triggered = undefined; - - if ( tmp ) { - elem[ ontype ] = tmp; - } - } - } - } - - return event.result; - }, - - // Piggyback on a donor event to simulate a different one - // Used only for `focus(in | out)` events - simulate: function( type, elem, event ) { - var e = jQuery.extend( - new jQuery.Event(), - event, - { - type: type, - isSimulated: true - } - ); - - jQuery.event.trigger( e, null, elem ); - } - -} ); - -jQuery.fn.extend( { - - trigger: function( type, data ) { - return this.each( function() { - jQuery.event.trigger( type, data, this ); - } ); - }, - triggerHandler: function( type, data ) { - var elem = this[ 0 ]; - if ( elem ) { - return jQuery.event.trigger( type, data, elem, true ); - } - } -} ); - - -var - rbracket = /\[\]$/, - rCRLF = /\r?\n/g, - rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, - rsubmittable = /^(?:input|select|textarea|keygen)/i; - -function buildParams( prefix, obj, traditional, add ) { - var name; - - if ( Array.isArray( obj ) ) { - - // Serialize array item. - jQuery.each( obj, function( i, v ) { - if ( traditional || rbracket.test( prefix ) ) { - - // Treat each array item as a scalar. - add( prefix, v ); - - } else { - - // Item is non-scalar (array or object), encode its numeric index. - buildParams( - prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", - v, - traditional, - add - ); - } - } ); - - } else if ( !traditional && toType( obj ) === "object" ) { - - // Serialize object item. - for ( name in obj ) { - buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); - } - - } else { - - // Serialize scalar item. - add( prefix, obj ); - } -} - -// Serialize an array of form elements or a set of -// key/values into a query string -jQuery.param = function( a, traditional ) { - var prefix, - s = [], - add = function( key, valueOrFunction ) { - - // If value is a function, invoke it and use its return value - var value = isFunction( valueOrFunction ) ? - valueOrFunction() : - valueOrFunction; - - s[ s.length ] = encodeURIComponent( key ) + "=" + - encodeURIComponent( value == null ? "" : value ); - }; - - if ( a == null ) { - return ""; - } - - // If an array was passed in, assume that it is an array of form elements. - if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { - - // Serialize the form elements - jQuery.each( a, function() { - add( this.name, this.value ); - } ); - - } else { - - // If traditional, encode the "old" way (the way 1.3.2 or older - // did it), otherwise encode params recursively. - for ( prefix in a ) { - buildParams( prefix, a[ prefix ], traditional, add ); - } - } - - // Return the resulting serialization - return s.join( "&" ); -}; - -jQuery.fn.extend( { - serialize: function() { - return jQuery.param( this.serializeArray() ); - }, - serializeArray: function() { - return this.map( function() { - - // Can add propHook for "elements" to filter or add form elements - var elements = jQuery.prop( this, "elements" ); - return elements ? jQuery.makeArray( elements ) : this; - } ).filter( function() { - var type = this.type; - - // Use .is( ":disabled" ) so that fieldset[disabled] works - return this.name && !jQuery( this ).is( ":disabled" ) && - rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && - ( this.checked || !rcheckableType.test( type ) ); - } ).map( function( _i, elem ) { - var val = jQuery( this ).val(); - - if ( val == null ) { - return null; - } - - if ( Array.isArray( val ) ) { - return jQuery.map( val, function( val ) { - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ); - } - - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ).get(); - } -} ); - - -var - r20 = /%20/g, - rhash = /#.*$/, - rantiCache = /([?&])_=[^&]*/, - rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, - - // trac-7653, trac-8125, trac-8152: local protocol detection - rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, - rnoContent = /^(?:GET|HEAD)$/, - rprotocol = /^\/\//, - - /* Prefilters - * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) - * 2) These are called: - * - BEFORE asking for a transport - * - AFTER param serialization (s.data is a string if s.processData is true) - * 3) key is the dataType - * 4) the catchall symbol "*" can be used - * 5) execution will start with transport dataType and THEN continue down to "*" if needed - */ - prefilters = {}, - - /* Transports bindings - * 1) key is the dataType - * 2) the catchall symbol "*" can be used - * 3) selection will start with transport dataType and THEN go to "*" if needed - */ - transports = {}, - - // Avoid comment-prolog char sequence (trac-10098); must appease lint and evade compression - allTypes = "*/".concat( "*" ), - - // Anchor tag for parsing the document origin - originAnchor = document.createElement( "a" ); - -originAnchor.href = location.href; - -// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport -function addToPrefiltersOrTransports( structure ) { - - // dataTypeExpression is optional and defaults to "*" - return function( dataTypeExpression, func ) { - - if ( typeof dataTypeExpression !== "string" ) { - func = dataTypeExpression; - dataTypeExpression = "*"; - } - - var dataType, - i = 0, - dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; - - if ( isFunction( func ) ) { - - // For each dataType in the dataTypeExpression - while ( ( dataType = dataTypes[ i++ ] ) ) { - - // Prepend if requested - if ( dataType[ 0 ] === "+" ) { - dataType = dataType.slice( 1 ) || "*"; - ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); - - // Otherwise append - } else { - ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); - } - } - } - }; -} - -// Base inspection function for prefilters and transports -function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { - - var inspected = {}, - seekingTransport = ( structure === transports ); - - function inspect( dataType ) { - var selected; - inspected[ dataType ] = true; - jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { - var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); - if ( typeof dataTypeOrTransport === "string" && - !seekingTransport && !inspected[ dataTypeOrTransport ] ) { - - options.dataTypes.unshift( dataTypeOrTransport ); - inspect( dataTypeOrTransport ); - return false; - } else if ( seekingTransport ) { - return !( selected = dataTypeOrTransport ); - } - } ); - return selected; - } - - return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); -} - -// A special extend for ajax options -// that takes "flat" options (not to be deep extended) -// Fixes trac-9887 -function ajaxExtend( target, src ) { - var key, deep, - flatOptions = jQuery.ajaxSettings.flatOptions || {}; - - for ( key in src ) { - if ( src[ key ] !== undefined ) { - ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; - } - } - if ( deep ) { - jQuery.extend( true, target, deep ); - } - - return target; -} - -/* Handles responses to an ajax request: - * - finds the right dataType (mediates between content-type and expected dataType) - * - returns the corresponding response - */ -function ajaxHandleResponses( s, jqXHR, responses ) { - - var ct, type, finalDataType, firstDataType, - contents = s.contents, - dataTypes = s.dataTypes; - - // Remove auto dataType and get content-type in the process - while ( dataTypes[ 0 ] === "*" ) { - dataTypes.shift(); - if ( ct === undefined ) { - ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); - } - } - - // Check if we're dealing with a known content-type - if ( ct ) { - for ( type in contents ) { - if ( contents[ type ] && contents[ type ].test( ct ) ) { - dataTypes.unshift( type ); - break; - } - } - } - - // Check to see if we have a response for the expected dataType - if ( dataTypes[ 0 ] in responses ) { - finalDataType = dataTypes[ 0 ]; - } else { - - // Try convertible dataTypes - for ( type in responses ) { - if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { - finalDataType = type; - break; - } - if ( !firstDataType ) { - firstDataType = type; - } - } - - // Or just use first one - finalDataType = finalDataType || firstDataType; - } - - // If we found a dataType - // We add the dataType to the list if needed - // and return the corresponding response - if ( finalDataType ) { - if ( finalDataType !== dataTypes[ 0 ] ) { - dataTypes.unshift( finalDataType ); - } - return responses[ finalDataType ]; - } -} - -/* Chain conversions given the request and the original response - * Also sets the responseXXX fields on the jqXHR instance - */ -function ajaxConvert( s, response, jqXHR, isSuccess ) { - var conv2, current, conv, tmp, prev, - converters = {}, - - // Work with a copy of dataTypes in case we need to modify it for conversion - dataTypes = s.dataTypes.slice(); - - // Create converters map with lowercased keys - if ( dataTypes[ 1 ] ) { - for ( conv in s.converters ) { - converters[ conv.toLowerCase() ] = s.converters[ conv ]; - } - } - - current = dataTypes.shift(); - - // Convert to each sequential dataType - while ( current ) { - - if ( s.responseFields[ current ] ) { - jqXHR[ s.responseFields[ current ] ] = response; - } - - // Apply the dataFilter if provided - if ( !prev && isSuccess && s.dataFilter ) { - response = s.dataFilter( response, s.dataType ); - } - - prev = current; - current = dataTypes.shift(); - - if ( current ) { - - // There's only work to do if current dataType is non-auto - if ( current === "*" ) { - - current = prev; - - // Convert response if prev dataType is non-auto and differs from current - } else if ( prev !== "*" && prev !== current ) { - - // Seek a direct converter - conv = converters[ prev + " " + current ] || converters[ "* " + current ]; - - // If none found, seek a pair - if ( !conv ) { - for ( conv2 in converters ) { - - // If conv2 outputs current - tmp = conv2.split( " " ); - if ( tmp[ 1 ] === current ) { - - // If prev can be converted to accepted input - conv = converters[ prev + " " + tmp[ 0 ] ] || - converters[ "* " + tmp[ 0 ] ]; - if ( conv ) { - - // Condense equivalence converters - if ( conv === true ) { - conv = converters[ conv2 ]; - - // Otherwise, insert the intermediate dataType - } else if ( converters[ conv2 ] !== true ) { - current = tmp[ 0 ]; - dataTypes.unshift( tmp[ 1 ] ); - } - break; - } - } - } - } - - // Apply converter (if not an equivalence) - if ( conv !== true ) { - - // Unless errors are allowed to bubble, catch and return them - if ( conv && s.throws ) { - response = conv( response ); - } else { - try { - response = conv( response ); - } catch ( e ) { - return { - state: "parsererror", - error: conv ? e : "No conversion from " + prev + " to " + current - }; - } - } - } - } - } - } - - return { state: "success", data: response }; -} - -jQuery.extend( { - - // Counter for holding the number of active queries - active: 0, - - // Last-Modified header cache for next request - lastModified: {}, - etag: {}, - - ajaxSettings: { - url: location.href, - type: "GET", - isLocal: rlocalProtocol.test( location.protocol ), - global: true, - processData: true, - async: true, - contentType: "application/x-www-form-urlencoded; charset=UTF-8", - - /* - timeout: 0, - data: null, - dataType: null, - username: null, - password: null, - cache: null, - throws: false, - traditional: false, - headers: {}, - */ - - accepts: { - "*": allTypes, - text: "text/plain", - html: "text/html", - xml: "application/xml, text/xml", - json: "application/json, text/javascript" - }, - - contents: { - xml: /\bxml\b/, - html: /\bhtml/, - json: /\bjson\b/ - }, - - responseFields: { - xml: "responseXML", - text: "responseText", - json: "responseJSON" - }, - - // Data converters - // Keys separate source (or catchall "*") and destination types with a single space - converters: { - - // Convert anything to text - "* text": String, - - // Text to html (true = no transformation) - "text html": true, - - // Evaluate text as a json expression - "text json": JSON.parse, - - // Parse text as xml - "text xml": jQuery.parseXML - }, - - // For options that shouldn't be deep extended: - // you can add your own custom options here if - // and when you create one that shouldn't be - // deep extended (see ajaxExtend) - flatOptions: { - url: true, - context: true - } - }, - - // Creates a full fledged settings object into target - // with both ajaxSettings and settings fields. - // If target is omitted, writes into ajaxSettings. - ajaxSetup: function( target, settings ) { - return settings ? - - // Building a settings object - ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : - - // Extending ajaxSettings - ajaxExtend( jQuery.ajaxSettings, target ); - }, - - ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), - ajaxTransport: addToPrefiltersOrTransports( transports ), - - // Main method - ajax: function( url, options ) { - - // If url is an object, simulate pre-1.5 signature - if ( typeof url === "object" ) { - options = url; - url = undefined; - } - - // Force options to be an object - options = options || {}; - - var transport, - - // URL without anti-cache param - cacheURL, - - // Response headers - responseHeadersString, - responseHeaders, - - // timeout handle - timeoutTimer, - - // Url cleanup var - urlAnchor, - - // Request state (becomes false upon send and true upon completion) - completed, - - // To know if global events are to be dispatched - fireGlobals, - - // Loop variable - i, - - // uncached part of the url - uncached, - - // Create the final options object - s = jQuery.ajaxSetup( {}, options ), - - // Callbacks context - callbackContext = s.context || s, - - // Context for global events is callbackContext if it is a DOM node or jQuery collection - globalEventContext = s.context && - ( callbackContext.nodeType || callbackContext.jquery ) ? - jQuery( callbackContext ) : - jQuery.event, - - // Deferreds - deferred = jQuery.Deferred(), - completeDeferred = jQuery.Callbacks( "once memory" ), - - // Status-dependent callbacks - statusCode = s.statusCode || {}, - - // Headers (they are sent all at once) - requestHeaders = {}, - requestHeadersNames = {}, - - // Default abort message - strAbort = "canceled", - - // Fake xhr - jqXHR = { - readyState: 0, - - // Builds headers hashtable if needed - getResponseHeader: function( key ) { - var match; - if ( completed ) { - if ( !responseHeaders ) { - responseHeaders = {}; - while ( ( match = rheaders.exec( responseHeadersString ) ) ) { - responseHeaders[ match[ 1 ].toLowerCase() + " " ] = - ( responseHeaders[ match[ 1 ].toLowerCase() + " " ] || [] ) - .concat( match[ 2 ] ); - } - } - match = responseHeaders[ key.toLowerCase() + " " ]; - } - return match == null ? null : match.join( ", " ); - }, - - // Raw string - getAllResponseHeaders: function() { - return completed ? responseHeadersString : null; - }, - - // Caches the header - setRequestHeader: function( name, value ) { - if ( completed == null ) { - name = requestHeadersNames[ name.toLowerCase() ] = - requestHeadersNames[ name.toLowerCase() ] || name; - requestHeaders[ name ] = value; - } - return this; - }, - - // Overrides response content-type header - overrideMimeType: function( type ) { - if ( completed == null ) { - s.mimeType = type; - } - return this; - }, - - // Status-dependent callbacks - statusCode: function( map ) { - var code; - if ( map ) { - if ( completed ) { - - // Execute the appropriate callbacks - jqXHR.always( map[ jqXHR.status ] ); - } else { - - // Lazy-add the new callbacks in a way that preserves old ones - for ( code in map ) { - statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; - } - } - } - return this; - }, - - // Cancel the request - abort: function( statusText ) { - var finalText = statusText || strAbort; - if ( transport ) { - transport.abort( finalText ); - } - done( 0, finalText ); - return this; - } - }; - - // Attach deferreds - deferred.promise( jqXHR ); - - // Add protocol if not provided (prefilters might expect it) - // Handle falsy url in the settings object (trac-10093: consistency with old signature) - // We also use the url parameter if available - s.url = ( ( url || s.url || location.href ) + "" ) - .replace( rprotocol, location.protocol + "//" ); - - // Alias method option to type as per ticket trac-12004 - s.type = options.method || options.type || s.method || s.type; - - // Extract dataTypes list - s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; - - // A cross-domain request is in order when the origin doesn't match the current origin. - if ( s.crossDomain == null ) { - urlAnchor = document.createElement( "a" ); - - // Support: IE <=8 - 11, Edge 12 - 15 - // IE throws exception on accessing the href property if url is malformed, - // e.g. http://example.com:80x/ - try { - urlAnchor.href = s.url; - - // Support: IE <=8 - 11 only - // Anchor's host property isn't correctly set when s.url is relative - urlAnchor.href = urlAnchor.href; - s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== - urlAnchor.protocol + "//" + urlAnchor.host; - } catch ( e ) { - - // If there is an error parsing the URL, assume it is crossDomain, - // it can be rejected by the transport if it is invalid - s.crossDomain = true; - } - } - - // Convert data if not already a string - if ( s.data && s.processData && typeof s.data !== "string" ) { - s.data = jQuery.param( s.data, s.traditional ); - } - - // Apply prefilters - inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); - - // If request was aborted inside a prefilter, stop there - if ( completed ) { - return jqXHR; - } - - // We can fire global events as of now if asked to - // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (trac-15118) - fireGlobals = jQuery.event && s.global; - - // Watch for a new set of requests - if ( fireGlobals && jQuery.active++ === 0 ) { - jQuery.event.trigger( "ajaxStart" ); - } - - // Uppercase the type - s.type = s.type.toUpperCase(); - - // Determine if request has content - s.hasContent = !rnoContent.test( s.type ); - - // Save the URL in case we're toying with the If-Modified-Since - // and/or If-None-Match header later on - // Remove hash to simplify url manipulation - cacheURL = s.url.replace( rhash, "" ); - - // More options handling for requests with no content - if ( !s.hasContent ) { - - // Remember the hash so we can put it back - uncached = s.url.slice( cacheURL.length ); - - // If data is available and should be processed, append data to url - if ( s.data && ( s.processData || typeof s.data === "string" ) ) { - cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; - - // trac-9682: remove data so that it's not used in an eventual retry - delete s.data; - } - - // Add or update anti-cache param if needed - if ( s.cache === false ) { - cacheURL = cacheURL.replace( rantiCache, "$1" ); - uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce.guid++ ) + - uncached; - } - - // Put hash and anti-cache on the URL that will be requested (gh-1732) - s.url = cacheURL + uncached; - - // Change '%20' to '+' if this is encoded form body content (gh-2658) - } else if ( s.data && s.processData && - ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { - s.data = s.data.replace( r20, "+" ); - } - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - if ( jQuery.lastModified[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); - } - if ( jQuery.etag[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); - } - } - - // Set the correct header, if data is being sent - if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { - jqXHR.setRequestHeader( "Content-Type", s.contentType ); - } - - // Set the Accepts header for the server, depending on the dataType - jqXHR.setRequestHeader( - "Accept", - s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? - s.accepts[ s.dataTypes[ 0 ] ] + - ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : - s.accepts[ "*" ] - ); - - // Check for headers option - for ( i in s.headers ) { - jqXHR.setRequestHeader( i, s.headers[ i ] ); - } - - // Allow custom headers/mimetypes and early abort - if ( s.beforeSend && - ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { - - // Abort if not done already and return - return jqXHR.abort(); - } - - // Aborting is no longer a cancellation - strAbort = "abort"; - - // Install callbacks on deferreds - completeDeferred.add( s.complete ); - jqXHR.done( s.success ); - jqXHR.fail( s.error ); - - // Get transport - transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); - - // If no transport, we auto-abort - if ( !transport ) { - done( -1, "No Transport" ); - } else { - jqXHR.readyState = 1; - - // Send global event - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); - } - - // If request was aborted inside ajaxSend, stop there - if ( completed ) { - return jqXHR; - } - - // Timeout - if ( s.async && s.timeout > 0 ) { - timeoutTimer = window.setTimeout( function() { - jqXHR.abort( "timeout" ); - }, s.timeout ); - } - - try { - completed = false; - transport.send( requestHeaders, done ); - } catch ( e ) { - - // Rethrow post-completion exceptions - if ( completed ) { - throw e; - } - - // Propagate others as results - done( -1, e ); - } - } - - // Callback for when everything is done - function done( status, nativeStatusText, responses, headers ) { - var isSuccess, success, error, response, modified, - statusText = nativeStatusText; - - // Ignore repeat invocations - if ( completed ) { - return; - } - - completed = true; - - // Clear timeout if it exists - if ( timeoutTimer ) { - window.clearTimeout( timeoutTimer ); - } - - // Dereference transport for early garbage collection - // (no matter how long the jqXHR object will be used) - transport = undefined; - - // Cache response headers - responseHeadersString = headers || ""; - - // Set readyState - jqXHR.readyState = status > 0 ? 4 : 0; - - // Determine if successful - isSuccess = status >= 200 && status < 300 || status === 304; - - // Get response data - if ( responses ) { - response = ajaxHandleResponses( s, jqXHR, responses ); - } - - // Use a noop converter for missing script but not if jsonp - if ( !isSuccess && - jQuery.inArray( "script", s.dataTypes ) > -1 && - jQuery.inArray( "json", s.dataTypes ) < 0 ) { - s.converters[ "text script" ] = function() {}; - } - - // Convert no matter what (that way responseXXX fields are always set) - response = ajaxConvert( s, response, jqXHR, isSuccess ); - - // If successful, handle type chaining - if ( isSuccess ) { - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - modified = jqXHR.getResponseHeader( "Last-Modified" ); - if ( modified ) { - jQuery.lastModified[ cacheURL ] = modified; - } - modified = jqXHR.getResponseHeader( "etag" ); - if ( modified ) { - jQuery.etag[ cacheURL ] = modified; - } - } - - // if no content - if ( status === 204 || s.type === "HEAD" ) { - statusText = "nocontent"; - - // if not modified - } else if ( status === 304 ) { - statusText = "notmodified"; - - // If we have data, let's convert it - } else { - statusText = response.state; - success = response.data; - error = response.error; - isSuccess = !error; - } - } else { - - // Extract error from statusText and normalize for non-aborts - error = statusText; - if ( status || !statusText ) { - statusText = "error"; - if ( status < 0 ) { - status = 0; - } - } - } - - // Set data for the fake xhr object - jqXHR.status = status; - jqXHR.statusText = ( nativeStatusText || statusText ) + ""; - - // Success/Error - if ( isSuccess ) { - deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); - } else { - deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); - } - - // Status-dependent callbacks - jqXHR.statusCode( statusCode ); - statusCode = undefined; - - if ( fireGlobals ) { - globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", - [ jqXHR, s, isSuccess ? success : error ] ); - } - - // Complete - completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); - - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); - - // Handle the global AJAX counter - if ( !( --jQuery.active ) ) { - jQuery.event.trigger( "ajaxStop" ); - } - } - } - - return jqXHR; - }, - - getJSON: function( url, data, callback ) { - return jQuery.get( url, data, callback, "json" ); - }, - - getScript: function( url, callback ) { - return jQuery.get( url, undefined, callback, "script" ); - } -} ); - -jQuery.each( [ "get", "post" ], function( _i, method ) { - jQuery[ method ] = function( url, data, callback, type ) { - - // Shift arguments if data argument was omitted - if ( isFunction( data ) ) { - type = type || callback; - callback = data; - data = undefined; - } - - // The url can be an options object (which then must have .url) - return jQuery.ajax( jQuery.extend( { - url: url, - type: method, - dataType: type, - data: data, - success: callback - }, jQuery.isPlainObject( url ) && url ) ); - }; -} ); - -jQuery.ajaxPrefilter( function( s ) { - var i; - for ( i in s.headers ) { - if ( i.toLowerCase() === "content-type" ) { - s.contentType = s.headers[ i ] || ""; - } - } -} ); - - -jQuery._evalUrl = function( url, options, doc ) { - return jQuery.ajax( { - url: url, - - // Make this explicit, since user can override this through ajaxSetup (trac-11264) - type: "GET", - dataType: "script", - cache: true, - async: false, - global: false, - - // Only evaluate the response if it is successful (gh-4126) - // dataFilter is not invoked for failure responses, so using it instead - // of the default converter is kludgy but it works. - converters: { - "text script": function() {} - }, - dataFilter: function( response ) { - jQuery.globalEval( response, options, doc ); - } - } ); -}; - - -jQuery.fn.extend( { - wrapAll: function( html ) { - var wrap; - - if ( this[ 0 ] ) { - if ( isFunction( html ) ) { - html = html.call( this[ 0 ] ); - } - - // The elements to wrap the target around - wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); - - if ( this[ 0 ].parentNode ) { - wrap.insertBefore( this[ 0 ] ); - } - - wrap.map( function() { - var elem = this; - - while ( elem.firstElementChild ) { - elem = elem.firstElementChild; - } - - return elem; - } ).append( this ); - } - - return this; - }, - - wrapInner: function( html ) { - if ( isFunction( html ) ) { - return this.each( function( i ) { - jQuery( this ).wrapInner( html.call( this, i ) ); - } ); - } - - return this.each( function() { - var self = jQuery( this ), - contents = self.contents(); - - if ( contents.length ) { - contents.wrapAll( html ); - - } else { - self.append( html ); - } - } ); - }, - - wrap: function( html ) { - var htmlIsFunction = isFunction( html ); - - return this.each( function( i ) { - jQuery( this ).wrapAll( htmlIsFunction ? html.call( this, i ) : html ); - } ); - }, - - unwrap: function( selector ) { - this.parent( selector ).not( "body" ).each( function() { - jQuery( this ).replaceWith( this.childNodes ); - } ); - return this; - } -} ); - - -jQuery.expr.pseudos.hidden = function( elem ) { - return !jQuery.expr.pseudos.visible( elem ); -}; -jQuery.expr.pseudos.visible = function( elem ) { - return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); -}; - - - - -jQuery.ajaxSettings.xhr = function() { - try { - return new window.XMLHttpRequest(); - } catch ( e ) {} -}; - -var xhrSuccessStatus = { - - // File protocol always yields status code 0, assume 200 - 0: 200, - - // Support: IE <=9 only - // trac-1450: sometimes IE returns 1223 when it should be 204 - 1223: 204 - }, - xhrSupported = jQuery.ajaxSettings.xhr(); - -support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); -support.ajax = xhrSupported = !!xhrSupported; - -jQuery.ajaxTransport( function( options ) { - var callback, errorCallback; - - // Cross domain only allowed if supported through XMLHttpRequest - if ( support.cors || xhrSupported && !options.crossDomain ) { - return { - send: function( headers, complete ) { - var i, - xhr = options.xhr(); - - xhr.open( - options.type, - options.url, - options.async, - options.username, - options.password - ); - - // Apply custom fields if provided - if ( options.xhrFields ) { - for ( i in options.xhrFields ) { - xhr[ i ] = options.xhrFields[ i ]; - } - } - - // Override mime type if needed - if ( options.mimeType && xhr.overrideMimeType ) { - xhr.overrideMimeType( options.mimeType ); - } - - // X-Requested-With header - // For cross-domain requests, seeing as conditions for a preflight are - // akin to a jigsaw puzzle, we simply never set it to be sure. - // (it can always be set on a per-request basis or even using ajaxSetup) - // For same-domain requests, won't change header if already provided. - if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { - headers[ "X-Requested-With" ] = "XMLHttpRequest"; - } - - // Set headers - for ( i in headers ) { - xhr.setRequestHeader( i, headers[ i ] ); - } - - // Callback - callback = function( type ) { - return function() { - if ( callback ) { - callback = errorCallback = xhr.onload = - xhr.onerror = xhr.onabort = xhr.ontimeout = - xhr.onreadystatechange = null; - - if ( type === "abort" ) { - xhr.abort(); - } else if ( type === "error" ) { - - // Support: IE <=9 only - // On a manual native abort, IE9 throws - // errors on any property access that is not readyState - if ( typeof xhr.status !== "number" ) { - complete( 0, "error" ); - } else { - complete( - - // File: protocol always yields status 0; see trac-8605, trac-14207 - xhr.status, - xhr.statusText - ); - } - } else { - complete( - xhrSuccessStatus[ xhr.status ] || xhr.status, - xhr.statusText, - - // Support: IE <=9 only - // IE9 has no XHR2 but throws on binary (trac-11426) - // For XHR2 non-text, let the caller handle it (gh-2498) - ( xhr.responseType || "text" ) !== "text" || - typeof xhr.responseText !== "string" ? - { binary: xhr.response } : - { text: xhr.responseText }, - xhr.getAllResponseHeaders() - ); - } - } - }; - }; - - // Listen to events - xhr.onload = callback(); - errorCallback = xhr.onerror = xhr.ontimeout = callback( "error" ); - - // Support: IE 9 only - // Use onreadystatechange to replace onabort - // to handle uncaught aborts - if ( xhr.onabort !== undefined ) { - xhr.onabort = errorCallback; - } else { - xhr.onreadystatechange = function() { - - // Check readyState before timeout as it changes - if ( xhr.readyState === 4 ) { - - // Allow onerror to be called first, - // but that will not handle a native abort - // Also, save errorCallback to a variable - // as xhr.onerror cannot be accessed - window.setTimeout( function() { - if ( callback ) { - errorCallback(); - } - } ); - } - }; - } - - // Create the abort callback - callback = callback( "abort" ); - - try { - - // Do send the request (this may raise an exception) - xhr.send( options.hasContent && options.data || null ); - } catch ( e ) { - - // trac-14683: Only rethrow if this hasn't been notified as an error yet - if ( callback ) { - throw e; - } - } - }, - - abort: function() { - if ( callback ) { - callback(); - } - } - }; - } -} ); - - - - -// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) -jQuery.ajaxPrefilter( function( s ) { - if ( s.crossDomain ) { - s.contents.script = false; - } -} ); - -// Install script dataType -jQuery.ajaxSetup( { - accepts: { - script: "text/javascript, application/javascript, " + - "application/ecmascript, application/x-ecmascript" - }, - contents: { - script: /\b(?:java|ecma)script\b/ - }, - converters: { - "text script": function( text ) { - jQuery.globalEval( text ); - return text; - } - } -} ); - -// Handle cache's special case and crossDomain -jQuery.ajaxPrefilter( "script", function( s ) { - if ( s.cache === undefined ) { - s.cache = false; - } - if ( s.crossDomain ) { - s.type = "GET"; - } -} ); - -// Bind script tag hack transport -jQuery.ajaxTransport( "script", function( s ) { - - // This transport only deals with cross domain or forced-by-attrs requests - if ( s.crossDomain || s.scriptAttrs ) { - var script, callback; - return { - send: function( _, complete ) { - script = jQuery( " - """, - """ - """, """ const pathtoroot = "./"; loadScripts(); @@ -437,6 +429,10 @@ public class TestSearch extends JavadocTester { holder="Search documentation (type /)" aria-label="Search in documentation" auto\ complete="off" spellcheck="false">"""); + checkOutput(fileName, false, + "jquery-ui.min.css", + "jquery-3.7.1.min.js", + "jquery-ui.min.js"); } void checkSingleIndex() { @@ -669,14 +665,15 @@ public class TestSearch extends JavadocTester { "AnotherClass.java:68: warning: invalid usage of tag {@index"); } - void checkJqueryAndImageFiles(boolean expectedOutput) { + void checkImageFiles(boolean expectedOutput) { checkFiles(expectedOutput, "script-files/search.js", - "script-files/jquery-3.7.1.min.js", - "script-files/jquery-ui.min.js", - "resource-files/jquery-ui.min.css", "resource-files/x.svg", "resource-files/glass.svg"); + checkFiles(false, + "script-files/jquery-3.7.1.min.js", + "script-files/jquery-ui.min.js", + "resource-files/jquery-ui.min.css"); } void checkSearchJS() { @@ -689,9 +686,7 @@ public class TestSearch extends JavadocTester { "function getURLPrefix(item, category) {", "url += item.l;"); - checkOutput("script-files/search-page.js", true, - "function renderResults(result) {", - "function selectTab(category) {"); + checkFiles(false, "script-files/search-page.js"); checkCssClasses("script-files/search.js", "resource-files/stylesheet.css"); } @@ -701,8 +696,8 @@ public class TestSearch extends JavadocTester { // are also defined as class selectors somewhere in the stylesheet file. String js = readOutputFile(jsFile); Set cssClasses = new TreeSet<>(); - addMatches(js, Pattern.compile("class=\\\\*\"([^\\\\\"]+)\\\\*\""), cssClasses); - addMatches(js, Pattern.compile("attr\\(\"class\", \"([^\"]+)\"\\)"), cssClasses); + addMatches(js, Pattern.compile("class=[\"']([-\\w]+)[\"']"), cssClasses); + addMatches(js, Pattern.compile("classList.add\\([\"']([-\\w]+)[\"']\\)"), cssClasses); // verify that the regex did find use of CSS class names checking("Checking CSS classes found"); if (cssClasses.isEmpty()) { diff --git a/test/langtools/jdk/javadoc/doclet/testSeeTag/TestSeeTag.java b/test/langtools/jdk/javadoc/doclet/testSeeTag/TestSeeTag.java index f3294f6647c..57854396cf4 100644 --- a/test/langtools/jdk/javadoc/doclet/testSeeTag/TestSeeTag.java +++ b/test/langtools/jdk/javadoc/doclet/testSeeTag/TestSeeTag.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ /* * @test * @bug 8017191 8182765 8200432 8239804 8250766 8262992 8281944 8307377 + * 8284315 * @summary Javadoc is confused by at-link to imported classes outside of the set of generated packages * @library /tools/lib ../../lib * @modules jdk.javadoc/jdk.javadoc.internal.tool @@ -103,7 +104,12 @@ public class TestSeeTag extends JavadocTester {
    See Also:
      -
    • Object
    • +
    • +
      + invalid reference +
      Object[]
      +
      +
    • invalid reference diff --git a/test/langtools/jdk/javadoc/doclet/testSerializedFormWithClassFile/TestSerializedFormWithClassFile.java b/test/langtools/jdk/javadoc/doclet/testSerializedFormWithClassFile/TestSerializedFormWithClassFile.java index 25613dffd95..1db0b6af8d4 100644 --- a/test/langtools/jdk/javadoc/doclet/testSerializedFormWithClassFile/TestSerializedFormWithClassFile.java +++ b/test/langtools/jdk/javadoc/doclet/testSerializedFormWithClassFile/TestSerializedFormWithClassFile.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -106,8 +106,8 @@ public class TestSerializedFormWithClassFile extends JavadocTester { new JavacTask(tb).files(srcDir.resolve("A.java")).outdir(classes).run(); new ClassBuilder(tb, "B") - .setExtends("A") .setModifiers("public", "class") + .setExtends("A") .write(srcDir); } } diff --git a/test/langtools/jdk/javadoc/doclet/testStylesheet/TestStylesheet.java b/test/langtools/jdk/javadoc/doclet/testStylesheet/TestStylesheet.java index 012e9ce00de..a2c2a603212 100644 --- a/test/langtools/jdk/javadoc/doclet/testStylesheet/TestStylesheet.java +++ b/test/langtools/jdk/javadoc/doclet/testStylesheet/TestStylesheet.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -132,6 +132,7 @@ public class TestStylesheet extends JavadocTester { min-height:12px; font-size:0; visibility:hidden; + cursor: pointer; }""", """ ::placeholder { diff --git a/test/langtools/jdk/javadoc/doclet/testVisibleMembers/TestVisibleMembers.java b/test/langtools/jdk/javadoc/doclet/testVisibleMembers/TestVisibleMembers.java index 09f6f92e2b6..fe5f1212b09 100644 --- a/test/langtools/jdk/javadoc/doclet/testVisibleMembers/TestVisibleMembers.java +++ b/test/langtools/jdk/javadoc/doclet/testVisibleMembers/TestVisibleMembers.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -135,7 +135,7 @@ public class TestVisibleMembers extends JavadocTester { "@param lvalue an lvalue", "@return something"); new ClassBuilder(tb, "p.B") - .setModifiers( "public", "interface") + .setModifiers("public", "interface") .setExtends("A") .addMembers(mbWith1, mbWith2) .write(srcDir); @@ -358,7 +358,7 @@ public class TestVisibleMembers extends JavadocTester { MethodBuilder.parse("public I sub() {return null;}"), MethodBuilder.parse("public I sub1() {return null;}") .setComments(Kind.INHERIT_DOC), - MethodBuilder.parse(" public void method() {}") + MethodBuilder.parse("public void method() {}") .setComments("A method ", "@see #sub", "@see #sub1"), MethodBuilder.parse("public int length(){return 1;}") .setComments(Kind.NO_API_COMMENT) @@ -380,7 +380,7 @@ public class TestVisibleMembers extends JavadocTester { ).write(srcDir); new ClassBuilder(tb, "p.QLong") - .setModifiers("public interface") + .setModifiers("public", "interface") .addMembers( MethodBuilder.parse("default void forEach(Q action) {}") ).write(srcDir); @@ -663,7 +663,7 @@ public class TestVisibleMembers extends JavadocTester { ).write(srcDir); new ClassBuilder(tb, "p.I3") - .setExtends("I1, I2") + .addImplements("I1", "I2") .setModifiers("public", "interface") .addMembers( FieldBuilder.parse("public static int field = 3;"), @@ -677,8 +677,8 @@ public class TestVisibleMembers extends JavadocTester { .write(srcDir); new ClassBuilder(tb, "p.C2") - .setExtends("C1") .setModifiers("public", "abstract", "class") + .setExtends("C1") .addMembers( FieldBuilder.parse("public int field;"), MethodBuilder.parse("public void method(){}"), diff --git a/test/langtools/jdk/javadoc/taglet/JdkTaglets.java b/test/langtools/jdk/javadoc/taglet/JdkTaglets.java new file mode 100644 index 00000000000..537904450aa --- /dev/null +++ b/test/langtools/jdk/javadoc/taglet/JdkTaglets.java @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.stream.Stream; + +import toolbox.JavacTask; +import toolbox.ToolBox; + +/// Utilities to build the JDK-specific taglets. +/// This guy uses JavacTask so can't be in javadoc.tester. +public final class JdkTaglets { + + /// Build a taglet and return its path for `-tagletpath`. + public static Path build(ToolBox tb, Path base, String... tagletFiles) throws IOException { + Path tagletOutDir = base.resolve("tagletClasses"); + Files.createDirectories(tagletOutDir); + tb.cleanDirectory(tagletOutDir); + Path tagletRoot = tb.findFromTestRoot("../../make/jdk/src/classes/build/tools/taglet"); + + new JavacTask(tb) + .files(Stream.of(tagletFiles) + .map(tagletFile -> tagletRoot.resolve(tagletFile + ".java")) + .toArray(Path[]::new)) + .outdir(tagletOutDir) + .run(JavacTask.Expect.SUCCESS); + return tagletOutDir; + } + + private JdkTaglets() {} +} diff --git a/test/langtools/jdk/javadoc/taglet/sealedGraph/TestSealedTaglet.java b/test/langtools/jdk/javadoc/taglet/sealedGraph/TestSealedTaglet.java new file mode 100644 index 00000000000..3ac6f0601b2 --- /dev/null +++ b/test/langtools/jdk/javadoc/taglet/sealedGraph/TestSealedTaglet.java @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @summary General tests for SealedGraph block tag + * @bug 8380913 + * @library /tools/lib /jdk/javadoc/lib ../ + * @modules jdk.compiler/com.sun.tools.javac.api + * jdk.compiler/com.sun.tools.javac.main + * jdk.javadoc/jdk.javadoc.internal.tool + * @build javadoc.tester.* toolbox.ToolBox builder.ClassBuilder JdkTaglets + * @run main ${test.main.class} + */ + +import java.nio.file.Path; + +import builder.ClassBuilder; +import javadoc.tester.JavadocTester; +import toolbox.ToolBox; + +public class TestSealedTaglet extends JavadocTester { + + final ToolBox tb; + final Path tagletPath; + + public static void main(String... args) throws Exception { + var tester = new TestSealedTaglet(); + tester.runTests(); + } + + TestSealedTaglet() throws Exception { + tb = new ToolBox(); + tagletPath = JdkTaglets.build(tb, Path.of(""), "SealedGraph"); + setAutomaticCheckLinks(false); // Don't fail for missing svg + } + + @Test + public void testInvisibleInMiddle(Path base) throws Exception { + Path srcDir = base.resolve("src"); + Path outDir = base.resolve("out"); + + tb.writeFile(srcDir.resolve("module-info.java"), + """ + module test { + exports pkg; + } + """); + new ClassBuilder(tb, "pkg.A") + .setModifiers("public", "abstract", "sealed", "interface") + .setComments("@sealedGraph") + .addPermits("pkg.B") + .write(srcDir); + new ClassBuilder(tb, "pkg.B") + .setModifiers("abstract", "sealed", "interface") + .addImplements("pkg.A") + .addPermits("pkg.C", "pkg.D") + .write(srcDir); + new ClassBuilder(tb, "pkg.C") + .setModifiers("abstract", "sealed", "interface") + .addImplements("pkg.A", "pkg.B") + .addPermits("pkg.D") + .write(srcDir); + new ClassBuilder(tb, "pkg.D") + .setModifiers("public", "final", "class") + .addImplements("pkg.B", "pkg.C") + .write(srcDir); + + System.setProperty("sealedDotOutputDir", outDir.toString()); + + javadoc("-tagletpath", tagletPath.toString(), + "-taglet", "build.tools.taglet.SealedGraph", + "-d", outDir.toString(), + "-sourcepath", srcDir.toString(), + "pkg"); + + checkExit(Exit.OK); + // D is displayed as a direct subtype of A, bypassing B, C, one link only + checkUnique("test_pkg.A.dot", "\"pkg.D\" -> \"pkg.A\";"); + } +} diff --git a/test/langtools/jdk/javadoc/tool/api/basic/APITest.java b/test/langtools/jdk/javadoc/tool/api/basic/APITest.java index 71908f34e99..bd8d0cf7953 100644 --- a/test/langtools/jdk/javadoc/tool/api/basic/APITest.java +++ b/test/langtools/jdk/javadoc/tool/api/basic/APITest.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -207,7 +207,6 @@ class APITest { "resource-files/copy.svg", "resource-files/down.svg", "resource-files/glass.svg", - "resource-files/jquery-ui.min.css", "resource-files/left.svg", "resource-files/link.svg", "resource-files/moon.svg", @@ -241,11 +240,8 @@ class APITest { "resource-files/fonts/DejaVuLGCSerif-Italic.woff2", "resource-files/fonts/DejaVuLGCSerif.woff", "resource-files/fonts/DejaVuLGCSerif.woff2", - "script-files/jquery-3.7.1.min.js", - "script-files/jquery-ui.min.js", "script-files/script.js", "script-files/search.js", - "script-files/search-page.js", "tag-search-index.js", "type-search-index.js" )); @@ -255,11 +251,8 @@ class APITest { !s.endsWith("-search-index.js") && !s.equals("index-all.html") && !s.equals("resource-files/glass.svg") - && !s.equals("resource-files/jquery-ui.min.css") && !s.equals("resource-files/x.svg") - && !s.startsWith("script-files/jquery-") && !s.equals("script-files/search.js") - && !s.equals("script-files/search-page.js") && !s.equals("search.html") && !s.equals("allclasses-index.html") && !s.equals("allpackages-index.html") diff --git a/test/langtools/jdk/jshell/ConcurrentHistoryLoadingTest.java b/test/langtools/jdk/jshell/ConcurrentHistoryLoadingTest.java new file mode 100644 index 00000000000..926dbbf50b4 --- /dev/null +++ b/test/langtools/jdk/jshell/ConcurrentHistoryLoadingTest.java @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.io.ByteArrayInputStream; +import java.util.Arrays; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.prefs.Preferences; + +import jdk.jshell.tool.JavaShellToolBuilder; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +/* + * @test + * @bug 8347418 + * @summary Verify that loading of JShell history doesn't lead to a + * NullPointerException when the Preferences are modified concurrently. + * @run junit ConcurrentHistoryLoadingTest + */ +public class ConcurrentHistoryLoadingTest { + + private static final String HISTORY_LINE_PREFIX = "HISTORY_LINE_"; + + @Test + public void testConcurrentHistoryLoading() throws Throwable { + AtomicBoolean removeOnAccess = new AtomicBoolean(); + Preferences testPrefs = new ReplToolTesting.MemoryPreferences() { + @Override + protected String getSpi(String key) { + String result = super.getSpi(key); + if (key.startsWith(HISTORY_LINE_PREFIX) && removeOnAccess.getAndSet(false)) { + for (String key2Remote : keysSpi()) { + remove(key2Remote); + } + } + return result; + } + }; + StringBuilder input = new StringBuilder(); + int max = 10; + for (int j = 0; j < max; j++) { + input.append("int x").append(j).append(" = 42\n"); + } + JavaShellToolBuilder + .builder() + .persistence(testPrefs) + .in(new ByteArrayInputStream(input.toString().getBytes()), null) + .start(); + Assertions.assertEquals(10, Arrays.stream(testPrefs.keys()) + .filter(key -> key.startsWith(HISTORY_LINE_PREFIX)) + .count()); + removeOnAccess.set(true); + JavaShellToolBuilder + .builder() + .persistence(testPrefs) + .in(new ByteArrayInputStream(input.toString().getBytes()), null) + .start(); + + } +} diff --git a/test/langtools/jdk/jshell/InputUITest.java b/test/langtools/jdk/jshell/InputUITest.java index 1a420d2c345..6886e1302a2 100644 --- a/test/langtools/jdk/jshell/InputUITest.java +++ b/test/langtools/jdk/jshell/InputUITest.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,7 +23,7 @@ /* * @test - * @bug 8356165 8358552 + * @bug 8356165 8358552 8378251 * @summary Check user input works properly * @modules * jdk.compiler/com.sun.tools.javac.api @@ -99,4 +99,34 @@ public class InputUITest extends UITesting { waitOutput(out, patternQuote("==> 65")); }, false); } -} \ No newline at end of file + + @Test + public void testAltBackspaceDeletesPreviousWord() throws Exception { + doRunTest((inputSink, out) -> { + inputSink.write("int x = 12 24" + ESC_DEL + "\n"); + waitOutput(out, "int x = 12 24\u001B\\[2D\u001B\\[K\n" + + "\u001B\\[\\?2004lx ==> 12\n" + + "\u001B\\[\\?2004h" + PROMPT); + inputSink.write("System.in" + ESC_DEL + "out.println(x)\n"); + waitOutput(out, "System.in\u001B\\[2D\u001B\\[Kout.println\\(x\\)\u001B\\[3D\u001B\\[3C\n" + + "\u001B\\[\\?2004l12\n" + + "\u001B\\[\\?2004h" + PROMPT); + }, false); + } + + @Test + public void testAltDDeletesNextWord() throws Exception { + doRunTest((inputSink, out) -> { + inputSink.write("int x = 12 24" + ESC_B + ESC_D + "\n"); + waitOutput(out, "int x = 12 24\u001B\\[2D\u001B\\[K\n" + + "\u001B\\[\\?2004lx ==> 12\n" + + "\u001B\\[\\?2004h" + PROMPT); + inputSink.write("System.in.println" + ESC_B + ESC_B + ESC_D + + "out" + ESC_F + ESC_F + "(x)\n"); + waitOutput(out, "System.in.println\u001B\\[7D\u001B\\[3D\u001B\\[2P" + + "\u001B\\[1@o\u001B\\[1@u\u001B\\[1@t\u001B\\[C\u001B\\[7C\\(x\\)\u001B\\[3D\u001B\\[3C\n" + + "\u001B\\[\\?2004l12\n" + + "\u001B\\[\\?2004h" + PROMPT); + }, false); + } +} diff --git a/test/langtools/jdk/jshell/ReplToolTesting.java b/test/langtools/jdk/jshell/ReplToolTesting.java index 429a0a7ce02..2dabf29e1f9 100644 --- a/test/langtools/jdk/jshell/ReplToolTesting.java +++ b/test/langtools/jdk/jshell/ReplToolTesting.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -914,7 +914,7 @@ public class ReplToolTesting { } } - public static final class MemoryPreferences extends AbstractPreferences { + public static class MemoryPreferences extends AbstractPreferences { private final Map values = new HashMap<>(); private final Map nodes = new HashMap<>(); @@ -943,17 +943,17 @@ public class ReplToolTesting { } @Override - protected void removeNodeSpi() throws BackingStoreException { + protected void removeNodeSpi() { ((MemoryPreferences) parent()).nodes.remove(name()); } @Override - protected String[] keysSpi() throws BackingStoreException { + protected String[] keysSpi() { return values.keySet().toArray(new String[0]); } @Override - protected String[] childrenNamesSpi() throws BackingStoreException { + protected String[] childrenNamesSpi() { return nodes.keySet().toArray(new String[0]); } @@ -963,11 +963,11 @@ public class ReplToolTesting { } @Override - protected void syncSpi() throws BackingStoreException { + protected void syncSpi() { } @Override - protected void flushSpi() throws BackingStoreException { + protected void flushSpi() { } } diff --git a/test/langtools/jdk/jshell/TerminalNoExecTest.java b/test/langtools/jdk/jshell/TerminalNoExecTest.java index 3d76157fd26..d7cd20046af 100644 --- a/test/langtools/jdk/jshell/TerminalNoExecTest.java +++ b/test/langtools/jdk/jshell/TerminalNoExecTest.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2024, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,7 +39,7 @@ import java.io.Writer; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; -import java.util.concurrent.TimeUnit; +import java.util.HashMap; import java.util.concurrent.atomic.AtomicBoolean; import jdk.jfr.consumer.RecordingStream; import jdk.jshell.tool.JavaShellToolBuilder; @@ -58,7 +58,9 @@ public class TerminalNoExecTest { spawnedNewProcess.set(true); }); rs.startAsync(); - JavaShellToolBuilder.builder().run("--execution=local", "--no-startup"); + JavaShellToolBuilder.builder() + .persistence(new HashMap<>()) + .run("--execution=local", "--no-startup"); rs.stop(); } if (spawnedNewProcess.get()) { diff --git a/test/langtools/jdk/jshell/UITesting.java b/test/langtools/jdk/jshell/UITesting.java index a1bd8f35dee..d63a8460889 100644 --- a/test/langtools/jdk/jshell/UITesting.java +++ b/test/langtools/jdk/jshell/UITesting.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -53,6 +53,10 @@ public class UITesting { protected static final String UP = "\033[A"; protected static final String DOWN = "\033[B"; protected static final String CTRL_D = "\u0004"; + protected static final String ESC_DEL = "\u001B\u007F"; // ESC + DEL (common Alt+Backspace) + protected static final String ESC_B = "\u001Bb"; // ESC + b (common Alt+b) + protected static final String ESC_F = "\u001Bf"; // ESC + f (common Alt+f) + protected static final String ESC_D = "\u001Bd"; // ESC + d (common Alt+d) private final boolean laxLineEndings; public UITesting() { diff --git a/test/langtools/lib/combo/tools/javac/combo/CompilationTestCase.java b/test/langtools/lib/combo/tools/javac/combo/CompilationTestCase.java index 0760352e23b..c3aa92d1f37 100644 --- a/test/langtools/lib/combo/tools/javac/combo/CompilationTestCase.java +++ b/test/langtools/lib/combo/tools/javac/combo/CompilationTestCase.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -112,6 +112,10 @@ public class CompilationTestCase extends JavacTemplateTestBase { assertCompile(expandMarkers(constructs), () -> assertCompileSucceededWithWarning(warning), false); } + protected void assertOKWithWarning(String warning, int numberOfTimes, String... constructs) { + assertCompile(expandMarkers(constructs), () -> assertCompileSucceededWithWarning(warning, numberOfTimes), false); + } + protected void assertFail(String expectedDiag, String... constructs) { assertCompile(expandMarkers(constructs), () -> assertCompileFailed(expectedDiag), false); } diff --git a/test/langtools/lib/combo/tools/javac/combo/Diagnostics.java b/test/langtools/lib/combo/tools/javac/combo/Diagnostics.java index 47f496b5891..87ee54b375a 100644 --- a/test/langtools/lib/combo/tools/javac/combo/Diagnostics.java +++ b/test/langtools/lib/combo/tools/javac/combo/Diagnostics.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -84,6 +84,12 @@ public class Diagnostics implements javax.tools.DiagnosticListener d.getCode().equals(key)); } + public boolean containsWarningKey(String key, int numberOfWarnings) { + return diags.stream() + .filter(d -> d.getKind() == Diagnostic.Kind.WARNING || d.getKind() == Diagnostic.Kind.MANDATORY_WARNING) + .filter(d -> d.getCode().equals(key)).count() == numberOfWarnings; + } + /** Get the error keys */ public List errorKeys() { return diags.stream() diff --git a/test/langtools/lib/combo/tools/javac/combo/JavacTemplateTestBase.java b/test/langtools/lib/combo/tools/javac/combo/JavacTemplateTestBase.java index ed5936210a8..3b4b67e7968 100644 --- a/test/langtools/lib/combo/tools/javac/combo/JavacTemplateTestBase.java +++ b/test/langtools/lib/combo/tools/javac/combo/JavacTemplateTestBase.java @@ -154,6 +154,14 @@ public abstract class JavacTemplateTestBase { } } + protected void assertCompileSucceededWithWarning(String warning, int numberOfWarnings) { + if (diags.errorsFound()) + fail("Expected successful compilation"); + if (!diags.containsWarningKey(warning, numberOfWarnings)) { + fail(String.format("Expected compilation warning with %s, found %s", warning, diags.keys())); + } + } + /** * If the provided boolean is true, assert all previous compiles succeeded, * otherwise assert that a compile failed. diff --git a/test/langtools/tools/doclint/ReferenceTest.java b/test/langtools/tools/doclint/ReferenceTest.java index ca677d64927..63512d1efc4 100644 --- a/test/langtools/tools/doclint/ReferenceTest.java +++ b/test/langtools/tools/doclint/ReferenceTest.java @@ -1,6 +1,6 @@ /* * @test /nodynamiccopyright/ - * @bug 8004832 8020556 8002154 8200432 8177280 + * @bug 8004832 8020556 8002154 8200432 8177280 8284315 * @summary Add new doclint package * @modules jdk.javadoc/jdk.javadoc.internal.doclint * @build DocLintTester diff --git a/test/langtools/tools/doclint/ReferenceTest.out b/test/langtools/tools/doclint/ReferenceTest.out index 0b39ea20a41..4232f7b12ba 100644 --- a/test/langtools/tools/doclint/ReferenceTest.out +++ b/test/langtools/tools/doclint/ReferenceTest.out @@ -31,12 +31,24 @@ ReferenceTest.java:71: error: reference not found ReferenceTest.java:74: error: reference not found * @see not.Found ^ +ReferenceTest.java:79: error: reference not found + * {@link java.lang.String[]} + ^ +ReferenceTest.java:80: error: reference not found + * {@link java.lang.String[]#equals} + ^ ReferenceTest.java:81: error: reference not found * {@link not.Found[]} ^ +ReferenceTest.java:82: error: reference not found + * @see java.lang.String[] + ^ +ReferenceTest.java:83: error: reference not found + * @see java.lang.String[]#equals + ^ ReferenceTest.java:84: error: reference not found * @see not.Found[] ^ -12 errors +16 errors 1 warning diff --git a/test/langtools/tools/javac/TextBlockU2028.java b/test/langtools/tools/javac/TextBlockU2028.java new file mode 100644 index 00000000000..a7abfe43263 --- /dev/null +++ b/test/langtools/tools/javac/TextBlockU2028.java @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8380912 + * @summary Verify that trailing whitespace warning is not reported for \u2028 + * inside text block content + * @library /tools/lib + * @modules + * jdk.compiler/com.sun.tools.javac.api + * jdk.compiler/com.sun.tools.javac.main + * @build toolbox.ToolBox toolbox.JavacTask + * @run junit TextBlockU2028 + */ + +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; + +import toolbox.JavacTask; +import toolbox.ToolBox; + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; + +public class TextBlockU2028 { + Path base; + ToolBox tb = new ToolBox(); + + @Test + void testNoFalseTrailingWhitespaceWarning() throws Exception { + Path classes = base.resolve("classes"); + Files.createDirectories(classes); + new JavacTask(tb) + .options("-d", classes.toString(), "-Xlint:text-blocks", "-XDrawDiagnostics", "-Werror") + .sources(""" + public class Test { + String s = \"\"\" + foo \\u2028 bar + \"\"\"; + } + """) + .run() + .writeAll(); + } + + @BeforeEach + public void setUp(TestInfo info) { + base = Paths.get(".") + .resolve(info.getTestMethod() + .orElseThrow() + .getName()); + } +} diff --git a/test/langtools/tools/javac/annotations/6365854/test1.out b/test/langtools/tools/javac/annotations/6365854/test1.out index c8bf69b095d..e69de29bb2d 100644 --- a/test/langtools/tools/javac/annotations/6365854/test1.out +++ b/test/langtools/tools/javac/annotations/6365854/test1.out @@ -1,2 +0,0 @@ -TestCore.class:-:-: compiler.warn.annotation.method.not.found.reason: test.annotation.TestAnnotation, test, (compiler.misc.class.file.not.found: test.annotation.TestAnnotation) -1 warning diff --git a/test/langtools/tools/javac/annotations/8218152/MalformedAnnotationProcessorTests.java b/test/langtools/tools/javac/annotations/8218152/MalformedAnnotationProcessorTests.java index 68e4aea0ab2..85095cc5537 100644 --- a/test/langtools/tools/javac/annotations/8218152/MalformedAnnotationProcessorTests.java +++ b/test/langtools/tools/javac/annotations/8218152/MalformedAnnotationProcessorTests.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -91,8 +91,8 @@ public class MalformedAnnotationProcessorTests extends TestRunner{ .getOutputLines(Task.OutputKind.DIRECT); System.out.println(actualErrors.get(0)); - if (!actualErrors.get(0).contains("- compiler.err.proc.cant.load.class: " + - "Incompatible magic value")) { + if (!actualErrors.get(0).contains("- compiler.err.proc.bad.config.file: " + + "javax.annotation.processing.Processor: Provider BadAnnoProcessor not found")) { throw new AssertionError("Unexpected errors reported: " + actualErrors); } } @@ -162,8 +162,8 @@ public class MalformedAnnotationProcessorTests extends TestRunner{ .writeAll() .getOutputLines(Task.OutputKind.DIRECT); - if (!actualErrors.get(0).contains("- compiler.err.proc.cant.load.class: " + - "WrongClassFileVersion has been compiled by a more recent version")) { + if (!actualErrors.get(0).contains("- compiler.err.proc.bad.config.file: " + + "javax.annotation.processing.Processor: Provider WrongClassFileVersion not found")) { throw new AssertionError("Unexpected errors reported: " + actualErrors); } } diff --git a/test/langtools/tools/javac/annotations/crashOnUnknownAttr/CrashOnUnknownTargetTypeTest.java b/test/langtools/tools/javac/annotations/crashOnUnknownAttr/CrashOnUnknownTargetTypeTest.java index 7f5d49e38c9..d0c19c3ec09 100644 --- a/test/langtools/tools/javac/annotations/crashOnUnknownAttr/CrashOnUnknownTargetTypeTest.java +++ b/test/langtools/tools/javac/annotations/crashOnUnknownAttr/CrashOnUnknownTargetTypeTest.java @@ -3,7 +3,7 @@ * @summary compiler is crashing with AssertionError for annotations with unknown target type * @bug 8296010 * @build A - * @compile/fail/ref=CrashOnUnknownTargetTypeTest.out -XDrawDiagnostics CrashOnUnknownTargetTypeTest.java + * @compile/fail/ref=CrashOnUnknownTargetTypeTest.out -XDrawDiagnostics -Xlint:classfile CrashOnUnknownTargetTypeTest.java */ public class CrashOnUnknownTargetTypeTest { diff --git a/test/langtools/tools/javac/annotations/crashOnUnknownAttr/CrashOnUnknownTargetTypeTest.out b/test/langtools/tools/javac/annotations/crashOnUnknownAttr/CrashOnUnknownTargetTypeTest.out index 8e6925a0e0d..d46ea56acef 100644 --- a/test/langtools/tools/javac/annotations/crashOnUnknownAttr/CrashOnUnknownTargetTypeTest.out +++ b/test/langtools/tools/javac/annotations/crashOnUnknownAttr/CrashOnUnknownTargetTypeTest.out @@ -1,5 +1,5 @@ -- compiler.warn.unknown.enum.constant: ElementType.class, java.lang.annotation.ElementType, NO_SUCH -- compiler.warn.unknown.enum.constant: String.class, java.lang.annotation.ElementType, NO_SUCH +A.class:-:-: compiler.warn.unknown.enum.constant: ElementType.class, java.lang.annotation.ElementType, NO_SUCH +A.class:-:-: compiler.warn.unknown.enum.constant: String.class, java.lang.annotation.ElementType, NO_SUCH CrashOnUnknownTargetTypeTest.java:10:5: compiler.err.annotation.unrecognized.attribute.name: A, NO_SUCH CrashOnUnknownTargetTypeTest.java:11:5: compiler.err.annotation.unrecognized.attribute.name: A, NO_SUCH CrashOnUnknownTargetTypeTest.java:12:14: compiler.err.annotation.unrecognized.attribute.name: A, NO_SUCH diff --git a/test/langtools/tools/javac/annotations/repeatingAnnotations/generatedInRepeating/ComplexGeneratedInRepeating.java b/test/langtools/tools/javac/annotations/repeatingAnnotations/generatedInRepeating/ComplexGeneratedInRepeating.java new file mode 100644 index 00000000000..2ddb15a64ea --- /dev/null +++ b/test/langtools/tools/javac/annotations/repeatingAnnotations/generatedInRepeating/ComplexGeneratedInRepeating.java @@ -0,0 +1,221 @@ +/* + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8378524 + * @summary Check that repeating annotations whose attributes are not-yet-generated classes and their members work. + * @library /tools/lib + * @modules + * jdk.compiler/com.sun.tools.javac.api + * jdk.compiler/com.sun.tools.javac.main + * @build toolbox.ToolBox toolbox.JavacTask + * @run junit ComplexGeneratedInRepeating + */ + +import java.io.IOException; +import java.io.Writer; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.List; +import java.util.Set; +import javax.annotation.processing.AbstractProcessor; +import javax.annotation.processing.RoundEnvironment; +import javax.annotation.processing.SupportedAnnotationTypes; +import javax.lang.model.element.TypeElement; +import toolbox.JavacTask; +import toolbox.Task; +import toolbox.ToolBox; + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; + +public class ComplexGeneratedInRepeating { + + Path base; + ToolBox tb = new ToolBox(); + + @Test + void testMember() throws Exception { + Path classes = base.resolve("classes"); + Files.createDirectories(classes); + new JavacTask(tb) + .options("-d", classes.toString()) + .sources(""" + package test; + + import java.lang.annotation.Repeatable; + + @Rep(Constants.C) + @Rep(Constants.C) + public class Test {} + + @Repeatable(Reps.class) + @interface Rep { + int value(); + } + @interface Reps { + Rep[] value(); + } + """) + .processors(new ProcessorImpl()) + .run() + .writeAll(); + } + + @Test + void testUnresolvableMember() throws Exception { + Path classes = base.resolve("classes"); + Files.createDirectories(classes); + List out = new JavacTask(tb) + .options("-d", classes.toString(), "-XDrawDiagnostics", "-nowarn") + .sources(""" + package test; + + import java.lang.annotation.Repeatable; + + @Rep(Constants.C) + @Rep(Constants.Unknown) + public class Test {} + + @Repeatable(Reps.class) + @interface Rep { + int value(); + } + @interface Reps { + Rep[] value(); + } + """) + .processors(new ProcessorImpl()) + .run(Task.Expect.FAIL) + .writeAll() + .getOutputLines(Task.OutputKind.DIRECT); + tb.checkEqual(out, List.of( + "Test.java:6:15: compiler.err.cant.resolve.location: kindname.variable, Unknown, , , (compiler.misc.location: kindname.class, test.Constants, null)", + "1 error")); + } + + @Test + void testIncompatibleMember() throws Exception { + Path classes = base.resolve("classes"); + Files.createDirectories(classes); + List out = new JavacTask(tb) + .options("-d", classes.toString(), "-XDrawDiagnostics", "-nowarn") + .sources(""" + package test; + + import java.lang.annotation.Repeatable; + + @Rep(Constants.C) + @Rep(Constants.S) + public class Test {} + + @Repeatable(Reps.class) + @interface Rep { + int value(); + } + @interface Reps { + Rep[] value(); + } + """) + .processors(new ProcessorImpl()) + .run(Task.Expect.FAIL) + .writeAll() + .getOutputLines(Task.OutputKind.DIRECT); + tb.checkEqual(out, List.of( + "Test.java:6:15: compiler.err.prob.found.req: (compiler.misc.inconvertible.types: java.lang.String, int)", + "1 error")); + } + + @Test + void testAnnotation() throws Exception { + Path classes = base.resolve("classes"); + Files.createDirectories(classes); + new JavacTask(tb) + .options("-d", classes.toString()) + .sources(""" + package test; + + import java.lang.annotation.Repeatable; + + @Rep(@Ann(Constants.C)) + @Rep(@Ann(Constants.C)) + public class Test {} + + @Repeatable(Reps.class) + @interface Rep { + Ann value(); + } + @interface Reps { + Rep[] value(); + } + """) + .processors(new ProcessorImpl()) + .run() + .writeAll(); + } + + @SupportedAnnotationTypes("*") + private static class ProcessorImpl extends AbstractProcessor { + + int round = 0; + + @Override + public boolean process(Set annotations, RoundEnvironment roundEnv) { + if (round++ == 0) { + try (Writer w = processingEnv.getFiler().createSourceFile("test.Constants").openWriter()) { + w.append(""" + package test; + public class Constants { + public static final int C = 0; + public static final String S = ""; + } + """); + } catch (IOException ex) { + throw new IllegalStateException(ex); + } + try (Writer w = processingEnv.getFiler().createSourceFile("test.Ann").openWriter()) { + w.append(""" + package test; + public @interface Ann { + int value(); + } + """); + } catch (IOException ex) { + throw new IllegalStateException(ex); + } + } + return false; + } + } + + @BeforeEach + public void setUp(TestInfo info) { + base = Paths.get(".") + .resolve(info.getTestMethod() + .orElseThrow() + .getName()); + } +} diff --git a/test/langtools/tools/javac/annotations/typeAnnotations/IncorrectCastOffsetTest.java b/test/langtools/tools/javac/annotations/typeAnnotations/IncorrectCastOffsetTest.java new file mode 100644 index 00000000000..7a00cf1c897 --- /dev/null +++ b/test/langtools/tools/javac/annotations/typeAnnotations/IncorrectCastOffsetTest.java @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8214934 8379201 + * @summary Wrong type annotation offset on casts on expressions + * @library /tools/lib + * @modules jdk.compiler/com.sun.tools.javac.api + * jdk.compiler/com.sun.tools.javac.main + * jdk.jdeps/com.sun.tools.javap + * @build toolbox.ToolBox toolbox.JavapTask + * @run compile -g:none IncorrectCastOffsetTest.java + * @run main IncorrectCastOffsetTest + */ + +import java.lang.annotation.ElementType; +import java.lang.annotation.Target; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; + +import java.nio.file.Path; +import java.nio.file.Paths; + +import java.util.List; + +import toolbox.JavapTask; +import toolbox.Task; +import toolbox.ToolBox; + +public class IncorrectCastOffsetTest { + @Target(ElementType.TYPE_USE) + @Retention(RetentionPolicy.RUNTIME) + @interface TypeUse {} + + @Target(ElementType.TYPE_USE) + @Retention(RetentionPolicy.RUNTIME) + @interface TypeUse2 {} + + class AnnotatedCast1 { + private static String checkcast(boolean test, Object obj, Object obj2) { + return (@TypeUse String)(test ? obj : obj2); + } + } + + class AnnotatedCast2 { + private static String checkcast(Object obj) { + return (@TypeUse String)(obj); + } + } + + class AnnotatedCast3 { + private static String checkcast(boolean test, Object obj, Object obj2) { + return (@TypeUse @TypeUse2 String)(test ? obj : obj2); + } + } + + class AnnotatedCast4 { + private static String checkcast(Object obj) { + return (@TypeUse String)(@TypeUse2 CharSequence)(obj); + } + } + + ToolBox tb; + + IncorrectCastOffsetTest() { + tb = new ToolBox(); + } + + public static void main(String args[]) { + IncorrectCastOffsetTest incorrectCastOffsetTest = new IncorrectCastOffsetTest(); + incorrectCastOffsetTest.run(); + } + + void run() { + test("IncorrectCastOffsetTest$AnnotatedCast1.class", + /* + * generated code: + * 0: iload_0 + * 1: ifeq 8 + * 4: aload_1 + * 5: goto 9 + * 8: aload_2 + * 9: checkcast #13 // class java/lang/String + * 12: areturn + */ + List.of( + "RuntimeVisibleTypeAnnotations:", + "0: #24(): CAST, offset=9, type_index=0", + "IncorrectCastOffsetTest$TypeUse" + ) + ); + test("IncorrectCastOffsetTest$AnnotatedCast2.class", + /* + * generated code: + * 0: aload_0 + * 1: checkcast #13 // class java/lang/String + * 4: areturn + */ + List.of( + "RuntimeVisibleTypeAnnotations:", + "0: #23(): CAST, offset=1, type_index=0", + "IncorrectCastOffsetTest$TypeUse" + ) + ); + test("IncorrectCastOffsetTest$AnnotatedCast3.class", + /* + * generated code: + * 0: iload_0 + * 1: ifeq 8 + * 4: aload_1 + * 5: goto 9 + * 8: aload_2 + * 9: checkcast #13 // class java/lang/String + * 12: areturn + */ + List.of( + "RuntimeVisibleTypeAnnotations:", + "0: #24(): CAST, offset=9, type_index=0", + "IncorrectCastOffsetTest$TypeUse", + "1: #25(): CAST, offset=9, type_index=0", + "IncorrectCastOffsetTest$TypeUse2" + ) + ); + test("IncorrectCastOffsetTest$AnnotatedCast4.class", + /* + * generated code: + * 0: aload_0 + * 1: checkcast #13 // class java/lang/CharSequence + * 4: checkcast #15 // class java/lang/String + * 7: areturn + */ + List.of( + "RuntimeVisibleTypeAnnotations:", + "0: #25(): CAST, offset=4, type_index=0", + "IncorrectCastOffsetTest$TypeUse", + "1: #26(): CAST, offset=1, type_index=0", + "IncorrectCastOffsetTest$TypeUse2" + ) + ); + } + + void test(String clazz, List expectedOutput) { + Path pathToClass = Paths.get(ToolBox.testClasses, clazz); + String javapOut = new JavapTask(tb) + .options("-v", "-p") + .classes(pathToClass.toString()) + .run() + .getOutput(Task.OutputKind.DIRECT); + + for (String expected : expectedOutput) { + if (!javapOut.contains(expected)) { + throw new AssertionError("unexpected output"); + } + } + } + +} diff --git a/test/langtools/tools/javac/annotations/typeAnnotations/TypeAnnotationsOnTypes.java b/test/langtools/tools/javac/annotations/typeAnnotations/TypeAnnotationsOnTypes.java new file mode 100644 index 00000000000..5ae6b17a24c --- /dev/null +++ b/test/langtools/tools/javac/annotations/typeAnnotations/TypeAnnotationsOnTypes.java @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8374020 + * @summary Verify types are set back to the AST. + * @library /tools/lib + * @modules + * jdk.compiler/com.sun.tools.javac.api + * jdk.compiler/com.sun.tools.javac.main + * @build toolbox.ToolBox toolbox.JavacTask + * @compile TypeAnnotationsOnTypes.java + * @run main TypeAnnotationsOnTypes + */ + +import com.sun.source.tree.VariableTree; +import com.sun.source.util.TaskEvent; +import com.sun.source.util.TaskListener; +import com.sun.source.util.TreePath; +import com.sun.source.util.TreePathScanner; +import com.sun.source.util.Trees; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; +import javax.lang.model.type.TypeKind; +import javax.lang.model.type.TypeMirror; +import javax.lang.model.type.UnionType; +import toolbox.JavacTask; +import toolbox.ToolBox; + +public class TypeAnnotationsOnTypes { + + public static void main(String... args) throws Exception { + new TypeAnnotationsOnTypes().run(); + } + + ToolBox tb = new ToolBox(); + + void run() throws Exception { + typeAnnotationInConstantExpressionFieldInit(Paths.get(".")); + } + + void typeAnnotationInConstantExpressionFieldInit(Path base) throws Exception { + Path src = base.resolve("src"); + Path classes = base.resolve("classes"); + tb.writeJavaFiles(src, + """ + import java.lang.annotation.ElementType; + import java.util.List; + import java.lang.annotation.Target; + + class Test { + + void f() { + @TA List l1; + @TA String[] l2; + @TA TypeVar l3; + try { + } catch (@TA IllegalStateException | NullPointerException | @TA IllegalArgumentException ex) {} + } + + @Target(ElementType.TYPE_USE) + @interface TA {} + } + """); + Files.createDirectories(classes); + List actual = new ArrayList<>(); + new JavacTask(tb) + .options("-d", classes.toString()) + .files(tb.findJavaFiles(src)) + .callback(task -> { + task.addTaskListener(new TaskListener() { + @Override + public void finished(TaskEvent e) { + if (e.getKind() != TaskEvent.Kind.ANALYZE) { + return ; + } + Trees trees = Trees.instance(task); + new TreePathScanner() { + @Override + public Void visitVariable(VariableTree node, Void p) { + TreePath typePath = + new TreePath(getCurrentPath(), node.getType()); + actual.add(node.getName() + + ": type on variable: " + + typeToString(trees.getTypeMirror(getCurrentPath())) + + ": type on type: " + + typeToString(trees.getTypeMirror(typePath))); + return super.visitVariable(node, p); + } + }.scan(e.getCompilationUnit(), null); + } + }); + }) + .run() + .writeAll(); + + List expected = List.of( + "l1: type on variable: java.util.@Test.TA List: type on type: java.util.@Test.TA List", + "l2: type on variable: java.lang.@Test.TA String[]: type on type: java.lang.@Test.TA String[]", + "l3: type on variable: @Test.TA TypeVar: type on type: @Test.TA TypeVar", + "ex: type on variable: java.lang.@Test.TA IllegalStateException | java.lang.NullPointerException | java.lang.@Test.TA IllegalArgumentException: " + + "type on type: java.lang.@Test.TA IllegalStateException | java.lang.NullPointerException | java.lang.@Test.TA IllegalArgumentException" + ); + + actual.forEach(System.out::println); + if (!expected.equals(actual)) { + throw new AssertionError("Expected: " + expected + ", but got: " + actual); + } + } + + static String typeToString(TypeMirror type) { + if (type != null && type.getKind() == TypeKind.UNION) { + return ((UnionType) type).getAlternatives().stream().map(t -> typeToString(t)).collect(Collectors.joining(" | ")); + } else { + return String.valueOf(type); + } + } +} diff --git a/test/langtools/tools/javac/annotations/typeAnnotations/TypeAnnotationsOnVariables.java b/test/langtools/tools/javac/annotations/typeAnnotations/TypeAnnotationsOnVariables.java new file mode 100644 index 00000000000..e5a1e5650d3 --- /dev/null +++ b/test/langtools/tools/javac/annotations/typeAnnotations/TypeAnnotationsOnVariables.java @@ -0,0 +1,424 @@ +/* + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8371155 8379550 + * @summary Verify type annotations on local-like variables are propagated to + * their types at an appropriate time. + * @library /tools/lib + * @modules + * jdk.compiler/com.sun.tools.javac.api + * jdk.compiler/com.sun.tools.javac.main + * jdk.jdeps/com.sun.tools.javap + * @build toolbox.ToolBox toolbox.JavacTask + * @run junit TypeAnnotationsOnVariables + */ + +import com.sun.source.tree.LambdaExpressionTree; +import com.sun.source.tree.Tree; +import com.sun.source.tree.VariableTree; +import com.sun.source.util.TaskEvent; +import com.sun.source.util.TaskListener; +import com.sun.source.util.TreePathScanner; +import com.sun.source.util.Trees; +import java.io.IOException; +import java.lang.classfile.Attributes; +import java.lang.classfile.ClassFile; +import java.lang.classfile.ClassModel; +import java.lang.classfile.MethodModel; +import java.lang.classfile.attribute.RuntimeInvisibleTypeAnnotationsAttribute; +import java.lang.classfile.constantpool.ConstantPool; +import java.lang.classfile.constantpool.Utf8Entry; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import javax.lang.model.type.TypeKind; +import javax.lang.model.type.TypeMirror; +import javax.lang.model.type.UnionType; +import static org.junit.jupiter.api.Assertions.*; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; +import toolbox.JavacTask; +import toolbox.JavapTask; +import toolbox.Task; +import toolbox.ToolBox; + +public class TypeAnnotationsOnVariables { + + private static final Pattern CP_REFERENCE = Pattern.compile("#([1-9][0-9]*)"); + final ToolBox tb = new ToolBox(); + Path base; + + @Test + void typeAnnotationInConstantExpressionFieldInit() throws Exception { + Path src = base.resolve("src"); + Path classes = base.resolve("classes"); + tb.writeJavaFiles(src, + """ + import java.lang.annotation.ElementType; + import java.lang.annotation.Target; + import java.util.function.Supplier; + + class Test { + @Target(ElementType.TYPE_USE) + @interface TypeAnno { } + + @TypeAnno Supplier r_f_i = () -> "r_f_i"; + static @TypeAnno Supplier r_f_s = () -> "r_f_s"; + + { + @TypeAnno Supplier r_init_i = () -> "r_init_i"; + } + + static { + @TypeAnno Supplier r_init_s = () -> "r_init_s"; + } + + void m() { + @TypeAnno Supplier r_m_i = () -> "r_m_i"; + } + + static void g() { + @TypeAnno Supplier r_g_s = () -> "r_g_s"; + } + + void h() { + t_cr(() -> "t_cr"); + } + + void i() { + t_no_cr((@TypeAnno Supplier)() -> "t_no_cr"); + } + + void j() { + t_no_cr((java.io.Serializable & @TypeAnno Supplier)() -> "t_no_cr"); + } + + void k() throws Throwable { + try (@TypeAnno AutoCloseable ac = () -> {}) {} + } + + void l() { + try { + } catch (@TypeAnno Exception e1) {} + } + + void n() { + try { + } catch (@TypeAnno final Exception e2) {} + } + + void o() { + try { + } catch (@TypeAnno IllegalStateException | @TypeAnno NullPointerException | IllegalArgumentException e3) {} + } + + void t_cr(@TypeAnno Supplier r_p) { } + void t_no_cr(@TypeAnno Supplier r_p) { } + } + """); + Files.createDirectories(classes); + List actual = new ArrayList<>(); + new JavacTask(tb) + .options("-d", classes.toString()) + .files(tb.findJavaFiles(src)) + .callback(task -> { + task.addTaskListener(new TaskListener() { + @Override + public void finished(TaskEvent e) { + if (e.getKind() != TaskEvent.Kind.ANALYZE) { + return ; + } + Trees trees = Trees.instance(task); + new TreePathScanner() { + @Override + public Void visitVariable(VariableTree node, Void p) { + actual.add(node.getName() + ": " + typeToString(trees.getTypeMirror(getCurrentPath()))); + return super.visitVariable(node, p); + } + @Override + public Void visitLambdaExpression(LambdaExpressionTree node, Void p) { + actual.add(treeToString(node)+ ": " + typeToString(trees.getTypeMirror(getCurrentPath()))); + return super.visitLambdaExpression(node, p); + } + }.scan(e.getCompilationUnit(), null); + } + }); + }) + .run() + .writeAll(); + + List expected = List.of( + "r_f_i: java.util.function.@Test.TypeAnno Supplier", + "()->\"r_f_i\": java.util.function.@Test.TypeAnno Supplier", + "r_f_s: java.util.function.@Test.TypeAnno Supplier", + "()->\"r_f_s\": java.util.function.@Test.TypeAnno Supplier", + "r_init_i: java.util.function.@Test.TypeAnno Supplier", + "()->\"r_init_i\": java.util.function.@Test.TypeAnno Supplier", + "r_init_s: java.util.function.@Test.TypeAnno Supplier", + "()->\"r_init_s\": java.util.function.@Test.TypeAnno Supplier", + "r_m_i: java.util.function.@Test.TypeAnno Supplier", + "()->\"r_m_i\": java.util.function.@Test.TypeAnno Supplier", + "r_g_s: java.util.function.@Test.TypeAnno Supplier", + "()->\"r_g_s\": java.util.function.@Test.TypeAnno Supplier", + "()->\"t_cr\": java.util.function.@Test.TypeAnno Supplier", + "()->\"t_no_cr\": java.util.function.@Test.TypeAnno Supplier", + "()->\"t_no_cr\": java.lang.Object&java.io.Serializable&java.util.function.@Test.TypeAnno Supplier", + "ac: java.lang.@Test.TypeAnno AutoCloseable", + "()->{ }: java.lang.@Test.TypeAnno AutoCloseable", + "e1: java.lang.@Test.TypeAnno Exception", + "e2: java.lang.@Test.TypeAnno Exception", + "e3: java.lang.@Test.TypeAnno IllegalStateException | java.lang.@Test.TypeAnno NullPointerException | java.lang.IllegalArgumentException", + "r_p: java.util.function.@Test.TypeAnno Supplier", + "r_p: java.util.function.@Test.TypeAnno Supplier" + ); + + actual.forEach(System.out::println); + if (!expected.equals(actual)) { + throw new AssertionError("Expected: " + expected + ", but got: " + actual); + } + } + + static String typeToString(TypeMirror type) { + if (type != null && type.getKind() == TypeKind.UNION) { + return ((UnionType) type).getAlternatives().stream().map(t -> typeToString(t)).collect(Collectors.joining(" | ")); + } else { + return String.valueOf(type); + } + } + + static String treeToString(Tree tree) { + return String.valueOf(tree).replaceAll("\\R", " "); + } + + @Test + void properPathForLocalVarsInLambdas() throws Exception { + Path src = base.resolve("src"); + Path classes = base.resolve("classes"); + tb.writeJavaFiles(src, + """ + import java.lang.annotation.ElementType; + import java.lang.annotation.Target; + import java.util.function.Supplier; + + class Test { + @Target(ElementType.TYPE_USE) + @interface TypeAnno { } + + void o() { + Runnable r = () -> { + @TypeAnno long test1 = 0; + while (true) { + @TypeAnno long test2 = 0; + System.err.println(test2); + try (@TypeAnno AutoCloseable ac = null) { + System.err.println(ac); + } catch (@TypeAnno Exception e1) { + System.err.println(e1); + } + try { + "".length(); + } catch (@TypeAnno final Exception e2) { + System.err.println(e2); + } + try { + "".length(); + } catch (@TypeAnno IllegalStateException | @TypeAnno NullPointerException | IllegalArgumentException e3) { + System.err.println(e3); + } + Runnable r2 = () -> { + @TypeAnno long test3 = 0; + while (true) { + @TypeAnno long test4 = 0; + System.err.println(test4); + } + }; + Object o = null; + if (o instanceof @TypeAnno String s) { + System.err.println(s); + } + } + }; + } + void lambdaInClass() { + class C { + Runnable r = () -> { + @TypeAnno long test1 = 0; + System.err.println(test1); + }; + } + } + void classInLambda() { + Runnable r = () -> { + class C { + void method() { + @TypeAnno long test1 = 0; + System.err.println(test1); + } + } + }; + } + } + """); + Files.createDirectories(classes); + new JavacTask(tb) + .options("-d", classes.toString()) + .files(tb.findJavaFiles(src)) + .run() + .writeAll(); + + Path testClass = classes.resolve("Test.class"); + TestClassDesc testClassDesc = TestClassDesc.create(testClass); + MethodModel oMethod = singletonValue(testClassDesc.name2Method().get("o")); + var oTypeAnnos = getAnnotations(oMethod); + assertFalse(oTypeAnnos.isPresent(), () -> oTypeAnnos.toString()); + + checkTypeAnnotations(testClassDesc, + "lambda$o$0", + " 0: LTest$TypeAnno;(): LOCAL_VARIABLE, {start_pc=2, length=151, index=0}", + " Test$TypeAnno", + " 1: LTest$TypeAnno;(): LOCAL_VARIABLE, {start_pc=4, length=146, index=2}", + " Test$TypeAnno", + " 2: LTest$TypeAnno;(): RESOURCE_VARIABLE, {start_pc=14, length=52, index=4}", + " Test$TypeAnno", + " 3: LTest$TypeAnno;(): EXCEPTION_PARAMETER, exception_index=2", + " Test$TypeAnno", + " 4: LTest$TypeAnno;(): EXCEPTION_PARAMETER, exception_index=3", + " Test$TypeAnno", + " 5: LTest$TypeAnno;(): EXCEPTION_PARAMETER, exception_index=4", + " Test$TypeAnno", + " 6: LTest$TypeAnno;(): EXCEPTION_PARAMETER, exception_index=5", + " Test$TypeAnno", + " 7: LTest$TypeAnno;(): LOCAL_VARIABLE, {start_pc=142, length=8, index=6}", + " Test$TypeAnno"); + + checkTypeAnnotations(testClassDesc, + "lambda$o$1", + " 0: LTest$TypeAnno;(): LOCAL_VARIABLE, {start_pc=2, length=12, index=0}", + " Test$TypeAnno", + " 1: LTest$TypeAnno;(): LOCAL_VARIABLE, {start_pc=4, length=7, index=2}", + " Test$TypeAnno"); + + checkTypeAnnotations(testClassDesc, + "lambda$classInLambda$0"); + + checkTypeAnnotations(TestClassDesc.create(classes.resolve("Test$1C.class")), + "lambda$new$0", + " 0: LTest$TypeAnno;(): LOCAL_VARIABLE, {start_pc=2, length=8, index=0}", + " Test$TypeAnno"); + } + + private void checkTypeAnnotations(TestClassDesc testClassDesc, + String lambdaMethodName, + String... expectedEntries) throws IOException { + MethodModel lambdaMethod = singletonValue(testClassDesc.name2Method().get(lambdaMethodName)); + var lambdaTypeAnnos = getAnnotations(lambdaMethod); + if (expectedEntries.length == 0) { + assertFalse(lambdaTypeAnnos.isPresent(), () -> lambdaTypeAnnos.toString()); + } else { + assertTrue(lambdaTypeAnnos.isPresent(), () -> lambdaTypeAnnos.toString()); + assertEquals(expectedEntries.length / 2, + lambdaTypeAnnos.orElseThrow().annotations().size(), + () -> lambdaTypeAnnos.orElseThrow().annotations().toString()); + + checkJavapOutput(testClassDesc, + List.of(expectedEntries)); + } + } + + private T singletonValue(List values) { + assertEquals(1, values.size()); + return values.get(0); + } + + private Optional getAnnotations(MethodModel m) { + return m.findAttribute(Attributes.code()) + .orElseThrow() + .findAttribute(Attributes.runtimeInvisibleTypeAnnotations()); + } + + void checkJavapOutput(TestClassDesc testClassDesc, List expectedOutput) throws IOException { + String javapOut = new JavapTask(tb) + .options("-v", "-p") + .classes(testClassDesc.pathToClass().toString()) + .run() + .getOutput(Task.OutputKind.DIRECT); + + StringBuilder expandedJavapOutBuilder = new StringBuilder(); + Matcher m = CP_REFERENCE.matcher(javapOut); + + while (m.find()) { + String cpIndexText = m.group(1); + int cpIndex = Integer.parseInt(cpIndexText); + m.appendReplacement(expandedJavapOutBuilder, Matcher.quoteReplacement(testClassDesc.cpIndex2Name().getOrDefault(cpIndex, cpIndexText))); + } + + m.appendTail(expandedJavapOutBuilder); + + String expandedJavapOut = expandedJavapOutBuilder.toString(); + + for (String expected : expectedOutput) { + if (!expandedJavapOut.contains(expected)) { + System.err.println(expandedJavapOut); + throw new AssertionError("unexpected output"); + } + } + } + + record TestClassDesc(Path pathToClass, + Map> name2Method, + Map cpIndex2Name) { + public static TestClassDesc create(Path pathToClass) throws IOException{ + ClassModel model = ClassFile.of().parse(pathToClass); + Map> name2Method = + model.methods() + .stream() + .collect(Collectors.groupingBy(m -> m.methodName().stringValue())); + ConstantPool cp = model.constantPool(); + int cpSize = cp.size(); + Map cpIndex2Name = new HashMap<>(); + + for (int i = 1; i < cpSize; i++) { + if (cp.entryByIndex(i) instanceof Utf8Entry string) { + cpIndex2Name.put(i, string.stringValue()); + } + } + + return new TestClassDesc(pathToClass, name2Method, cpIndex2Name); + } + } + + @BeforeEach + void setUp(TestInfo thisTest) { + base = Path.of(thisTest.getTestMethod().orElseThrow().getName()); + } +} diff --git a/test/langtools/tools/javac/annotations/typeAnnotations/classfile/BridgeShouldHaveNoInteriorAnnotationsTest.java b/test/langtools/tools/javac/annotations/typeAnnotations/classfile/BridgeShouldHaveNoInteriorAnnotationsTest.java index 0ec1a790d72..3426bfedd30 100644 --- a/test/langtools/tools/javac/annotations/typeAnnotations/classfile/BridgeShouldHaveNoInteriorAnnotationsTest.java +++ b/test/langtools/tools/javac/annotations/typeAnnotations/classfile/BridgeShouldHaveNoInteriorAnnotationsTest.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -71,7 +71,7 @@ public class BridgeShouldHaveNoInteriorAnnotationsTest }; - // Expected output can't be directly encoded into NestedLambdasCastedTest !!! + // Expected output can't be directly encoded into BridgeShouldHaveNoInteriorAnnotationsTest !!! static class OutputExpectedOnceHolder { public String[] outputs = { "0: #120(): CAST, offset=1, type_index=0, location=[TYPE_ARGUMENT(0)]", diff --git a/test/langtools/tools/javac/annotations/typeAnnotations/classfile/LocalClassesTest.java b/test/langtools/tools/javac/annotations/typeAnnotations/classfile/LocalClassesTest.java new file mode 100644 index 00000000000..66b55c8d209 --- /dev/null +++ b/test/langtools/tools/javac/annotations/typeAnnotations/classfile/LocalClassesTest.java @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8371817 + * @summary Check for type annotating types that refer to local classes read + * from classfiles + * @library /tools/lib + * @modules + * jdk.compiler/com.sun.tools.javac.api + * jdk.compiler/com.sun.tools.javac.main + * @build toolbox.ToolBox toolbox.JavacTask + * @run junit LocalClassesTest + */ + +import com.sun.source.tree.ClassTree; +import com.sun.source.util.TaskEvent; +import com.sun.source.util.TaskListener; +import com.sun.source.util.TreePathScanner; +import com.sun.source.util.Trees; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; +import javax.annotation.processing.AbstractProcessor; +import javax.annotation.processing.RoundEnvironment; +import javax.annotation.processing.SupportedAnnotationTypes; +import javax.lang.model.SourceVersion; +import javax.lang.model.element.Element; +import javax.lang.model.element.TypeElement; +import javax.lang.model.type.DeclaredType; +import javax.lang.model.type.TypeMirror; +import javax.lang.model.util.ElementFilter; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; +import toolbox.JavacTask; +import toolbox.ToolBox; + +public class LocalClassesTest { + + ToolBox tb = new ToolBox(); + Path base; + + @Test + void test() throws Exception { + Path classes = base.resolve("classes"); + Files.createDirectories(classes); + + Map local2enclosing = new HashMap<>(); + new JavacTask(tb) + .options("-d", classes.toString()) + .sources(""" + import java.lang.annotation.ElementType; + import java.lang.annotation.Target; + + public class Test { + public static void m1() { + class Local1 { + @Nullable Local1 l; + } + } + public void m2() { + class Local2 { + @Nullable Local2 l; + } + } + } + + @Target({ElementType.TYPE_USE}) + @interface Nullable {} + """) + .callback(task -> { + task.addTaskListener(new TaskListener() { + @Override + public void finished(TaskEvent e) { + if (e.getKind() == TaskEvent.Kind.ANALYZE) { + Trees trees = Trees.instance(task); + new TreePathScanner<>() { + @Override + public Object visitClass(ClassTree node, Object p) { + if (node.getSimpleName().toString().startsWith("Local")) { + Element el = trees.getElement(getCurrentPath()); + TypeMirror type = trees.getTypeMirror(getCurrentPath()); + local2enclosing.put(el.getSimpleName().toString(), ((DeclaredType) type).getEnclosingType().toString()); + } + return super.visitClass(node, p); + } + }.scan(e.getCompilationUnit(), null); + } + } + }); + }) + .run() + .writeAll(); + + Path classes2 = base.resolve("classes2"); + Files.createDirectories(classes2); + + ProcessorImpl p = new ProcessorImpl(); + new JavacTask(tb) + .options("-cp", classes.toString(), "-d", classes2.toString()) + .processors(p) + .classes("Test$1Local1", "Test$1Local2") + .run() + .writeAll(); + + Assertions.assertEquals(local2enclosing.get("Local1"), p.local2enclosing.get("Local1")); + Assertions.assertEquals(local2enclosing.get("Local2"), p.local2enclosing.get("Local2")); + } + + @SupportedAnnotationTypes("*") + private static class ProcessorImpl extends AbstractProcessor { + private Map local2enclosing = new HashMap<>(); + + @Override + public boolean process(Set annotations, RoundEnvironment roundEnv) { + for (TypeElement te : ElementFilter.typesIn(roundEnv.getRootElements())) { + if (te.getSimpleName().toString().startsWith("Local")) { + local2enclosing.put(te.getSimpleName().toString(), ((DeclaredType) te.asType()).getEnclosingType().toString()); + } + } + return false; + } + + @Override + public SourceVersion getSupportedSourceVersion() { + return SourceVersion.latestSupported(); + } + } + + @BeforeEach + public void setup(TestInfo info) { + base = Paths.get(".") + .resolve(info.getTestMethod() + .orElseThrow() + .getName()); + } +} diff --git a/test/langtools/tools/javac/annotations/typeAnnotations/failures/CantAnnotatePackages.java b/test/langtools/tools/javac/annotations/typeAnnotations/failures/CantAnnotatePackages.java index d35351b6f40..5031a74bc5a 100644 --- a/test/langtools/tools/javac/annotations/typeAnnotations/failures/CantAnnotatePackages.java +++ b/test/langtools/tools/javac/annotations/typeAnnotations/failures/CantAnnotatePackages.java @@ -1,6 +1,6 @@ /* * @test /nodynamiccopyright/ - * @bug 8026564 8043226 8334055 + * @bug 8026564 8043226 8334055 8179187 * @summary The parts of a fully-qualified type can't be annotated. * @author Werner Dietl * @compile/fail/ref=CantAnnotatePackages.out -XDrawDiagnostics CantAnnotatePackages.java diff --git a/test/langtools/tools/javac/annotations/typeAnnotations/failures/CantAnnotatePackages.out b/test/langtools/tools/javac/annotations/typeAnnotations/failures/CantAnnotatePackages.out index b91d65828b9..6e2b9cb93b0 100644 --- a/test/langtools/tools/javac/annotations/typeAnnotations/failures/CantAnnotatePackages.out +++ b/test/langtools/tools/javac/annotations/typeAnnotations/failures/CantAnnotatePackages.out @@ -1,5 +1,5 @@ -CantAnnotatePackages.java:16:14: compiler.err.cant.resolve.location: kindname.class, java, , , (compiler.misc.location: kindname.class, CantAnnotatePackages, null) -CantAnnotatePackages.java:17:9: compiler.err.cant.resolve.location: kindname.class, lang, , , (compiler.misc.location: kindname.package, java, null) -CantAnnotatePackages.java:18:14: compiler.err.cant.resolve.location: kindname.class, lang, , , (compiler.misc.location: kindname.package, java, null) CantAnnotatePackages.java:14:18: compiler.err.type.annotation.inadmissible: (compiler.misc.type.annotation.1: @TA), java.lang, @TA java.lang.Object +CantAnnotatePackages.java:16:14: compiler.err.type.annotation.inadmissible: (compiler.misc.type.annotation.1: @TA), java.lang, @TA java.lang.Object +CantAnnotatePackages.java:17:9: compiler.err.type.annotation.inadmissible: (compiler.misc.type.annotation.1: @TA), java.lang, @TA java.lang.Object +CantAnnotatePackages.java:18:14: compiler.err.type.annotation.inadmissible: (compiler.misc.type.annotation.1: @TA), java.lang, @TA java.lang.Object 4 errors diff --git a/test/langtools/tools/javac/annotations/typeAnnotations/failures/CantAnnotateScoping.java b/test/langtools/tools/javac/annotations/typeAnnotations/failures/CantAnnotateScoping.java index 4bdd791909c..427c1fef3a8 100644 --- a/test/langtools/tools/javac/annotations/typeAnnotations/failures/CantAnnotateScoping.java +++ b/test/langtools/tools/javac/annotations/typeAnnotations/failures/CantAnnotateScoping.java @@ -1,6 +1,6 @@ /* * @test /nodynamiccopyright/ - * @bug 8006733 8006775 8043226 8334055 + * @bug 8006733 8006775 8043226 8334055 8179187 * @summary Ensure behavior for nested types is correct. * @author Werner Dietl * @compile/fail/ref=CantAnnotateScoping.out -XDrawDiagnostics CantAnnotateScoping.java diff --git a/test/langtools/tools/javac/annotations/typeAnnotations/failures/CantAnnotateScoping.out b/test/langtools/tools/javac/annotations/typeAnnotations/failures/CantAnnotateScoping.out index ade5333a446..2ae736ad315 100644 --- a/test/langtools/tools/javac/annotations/typeAnnotations/failures/CantAnnotateScoping.out +++ b/test/langtools/tools/javac/annotations/typeAnnotations/failures/CantAnnotateScoping.out @@ -1,12 +1,13 @@ -CantAnnotateScoping.java:63:9: compiler.err.cant.resolve.location: kindname.class, lang, , , (compiler.misc.location: kindname.package, java, null) -CantAnnotateScoping.java:68:9: compiler.err.cant.resolve.location: kindname.class, XXX, , , (compiler.misc.location: kindname.package, java, null) -CantAnnotateScoping.java:71:9: compiler.err.cant.resolve.location: kindname.class, lang, , , (compiler.misc.location: kindname.package, java, null) +CantAnnotateScoping.java:68:18: compiler.err.doesnt.exist: java.XXX CantAnnotateScoping.java:38:14: compiler.err.type.annotation.inadmissible: (compiler.misc.type.annotation.1: @TA), Test.Outer, @TA Test.Outer.SInner CantAnnotateScoping.java:51:18: compiler.err.type.annotation.inadmissible: (compiler.misc.type.annotation.1: @TA), java.lang, @TA java.lang.Object CantAnnotateScoping.java:60:37: compiler.err.type.annotation.inadmissible: (compiler.misc.type.annotation: @TA,@TA2), java.lang, @DTA @TA @TA2 java.lang.Object CantAnnotateScoping.java:40:14: compiler.err.type.annotation.inadmissible: (compiler.misc.type.annotation.1: @TA), Test.Outer, @TA Test.Outer.SInner +CantAnnotateScoping.java:63:11: compiler.err.annotation.type.not.applicable.to.type: DA +CantAnnotateScoping.java:68:11: compiler.err.annotation.type.not.applicable.to.type: DA +CantAnnotateScoping.java:71:9: compiler.err.type.annotation.inadmissible: (compiler.misc.type.annotation.1: @TA), java.lang, @TA java.lang.Object CantAnnotateScoping.java:44:34: compiler.err.type.annotation.inadmissible: (compiler.misc.type.annotation: @TA,@TA2), Test.Outer, @TA @TA2 Test.Outer.SInner CantAnnotateScoping.java:44:25: compiler.err.annotation.type.not.applicable.to.type: DA CantAnnotateScoping.java:48:38: compiler.err.type.annotation.inadmissible: (compiler.misc.type.annotation.1: @TA), Test.Outer, @TA Test.Outer.SInner CantAnnotateScoping.java:48:34: compiler.err.annotation.type.not.applicable.to.type: DA -11 errors +12 errors diff --git a/test/langtools/tools/javac/annotations/typeAnnotations/failures/MethodArguments.java b/test/langtools/tools/javac/annotations/typeAnnotations/failures/MethodArguments.java new file mode 100644 index 00000000000..e539721855b --- /dev/null +++ b/test/langtools/tools/javac/annotations/typeAnnotations/failures/MethodArguments.java @@ -0,0 +1,47 @@ +/* + * @test /nodynamiccopyright/ + * @summary Omit type-use annotations from diagnostics + * @compile/fail/ref=MethodArguments.out -XDrawDiagnostics MethodArguments.java p/A.java p/B.java + */ + +import java.util.List; +import p.A; +import p.B; + +public final class MethodArguments { + public static void main(String[] args) { + // error non-static.cant.be.ref: + // non-static ... cannot be referenced from a static context + B.one("bar"); + + B b = new B(); + + // error ref.ambiguous: + // reference to ... is ambiguous + // ... + // both ... and ... match + b.one(null); + + // error report.access: + // ... has private access in ... + b.two("foo"); + // ... has protected access in ... + b.three("foo"); + + // error not.def.public.cant.access: + // ... is not public in ... cannot be accessed from outside package + b.four("foo"); + } + + void five(@A String s) { + } + + void five(@A String s) { + } + + void six(List<@A String> s) { + } + + void six(List<@A String> s) { + } +} diff --git a/test/langtools/tools/javac/annotations/typeAnnotations/failures/MethodArguments.out b/test/langtools/tools/javac/annotations/typeAnnotations/failures/MethodArguments.out new file mode 100644 index 00000000000..b4ecee21403 --- /dev/null +++ b/test/langtools/tools/javac/annotations/typeAnnotations/failures/MethodArguments.out @@ -0,0 +1,8 @@ +MethodArguments.java:39:8: compiler.err.already.defined: kindname.method, five(java.lang.String), kindname.class, MethodArguments +MethodArguments.java:45:8: compiler.err.already.defined: kindname.method, six(java.util.List), kindname.class, MethodArguments +MethodArguments.java:15:6: compiler.err.non-static.cant.be.ref: kindname.method, one(java.lang.String) +MethodArguments.java:23:6: compiler.err.ref.ambiguous: one, kindname.method, one(java.lang.String), p.B, kindname.method, one(java.lang.Integer), p.B +MethodArguments.java:27:6: compiler.err.report.access: two(java.lang.String), private, p.B +MethodArguments.java:29:6: compiler.err.report.access: three(java.lang.String), protected, p.B +MethodArguments.java:33:6: compiler.err.not.def.public.cant.access: four(java.lang.String), p.B +7 errors diff --git a/test/langtools/tools/javac/annotations/typeAnnotations/failures/p/A.java b/test/langtools/tools/javac/annotations/typeAnnotations/failures/p/A.java new file mode 100644 index 00000000000..9e3bb15dab5 --- /dev/null +++ b/test/langtools/tools/javac/annotations/typeAnnotations/failures/p/A.java @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2026, Google LLC. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package p; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Target; + +@Target(ElementType.TYPE_USE) +public @interface A {} diff --git a/test/langtools/tools/javac/annotations/typeAnnotations/failures/p/B.java b/test/langtools/tools/javac/annotations/typeAnnotations/failures/p/B.java new file mode 100644 index 00000000000..4dfe16b312d --- /dev/null +++ b/test/langtools/tools/javac/annotations/typeAnnotations/failures/p/B.java @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2026, Google LLC. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package p; + +public class B { + public void one(@A String s) { + } + + public void one(@A Integer i) { + } + + private void two(@A String s) { + } + + protected void three(@A String s) { + } + + void four(@A String s) { + } +} diff --git a/test/langtools/tools/javac/classreader/Annotations.java b/test/langtools/tools/javac/classreader/Annotations.java new file mode 100644 index 00000000000..30d0ae48b9a --- /dev/null +++ b/test/langtools/tools/javac/classreader/Annotations.java @@ -0,0 +1,220 @@ +/* + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8305250 + * @summary Check behavior w.r.t. annotations missing from the classpath. + * @library /tools/lib + * @modules jdk.compiler/com.sun.tools.javac.api + * jdk.compiler/com.sun.tools.javac.main + * @build toolbox.ToolBox toolbox.JavacTask + * @run junit Annotations + */ + +import java.nio.file.Files; +import java.util.List; +import java.nio.file.Path; + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; +import toolbox.ToolBox; +import toolbox.JavacTask; +import toolbox.Task; + +public class Annotations { + private ToolBox tb = new ToolBox(); + private Path base; + + @Test + public void testParameterModifiersNotVisible() throws Exception { + Path ann = base.resolve("annotations"); + Path annSrc = ann.resolve("src"); + Path annClasses = ann.resolve("classes"); + + tb.writeJavaFiles(annSrc, + """ + package annotations; + public @interface Ann { + public E e(); + } + """, + """ + package annotations; + public enum E { + A; + } + """); + + Files.createDirectories(annClasses); + + new JavacTask(tb) + .outdir(annClasses) + .files(tb.findJavaFiles(annSrc)) + .run() + .writeAll(); + + Path lib = base.resolve("lib"); + Path libSrc = lib.resolve("src"); + Path libClasses = lib.resolve("classes"); + + tb.writeJavaFiles(libSrc, + """ + package lib; + import annotations.*; + @Ann(e = E.A) + public class Lib { + } + """); + + Files.createDirectories(libClasses); + + new JavacTask(tb) + .outdir(libClasses) + .classpath(annClasses) + .files(tb.findJavaFiles(libSrc)) + .run() + .writeAll(); + + Path test = base.resolve("test"); + Path testSrc = test.resolve("src"); + Path testClasses = test.resolve("classes"); + + tb.writeJavaFiles(testSrc, + """ + package test; + import lib.*; + public class Test { + Lib l; + } + """); + + Files.createDirectories(testClasses); + + //annotations available, no errors/warnings: + new JavacTask(tb) + .outdir(testClasses) + .classpath(libClasses, annClasses) + .options("-Werror", "-Xlint:classfile") + .files(tb.findJavaFiles(testSrc)) + .run() + .writeAll(); + + //annotation and enum missing, no errors/warnings: + new JavacTask(tb) + .outdir(testClasses) + .classpath(libClasses) + .options("-Werror", "-Xlint:classfile") + .files(tb.findJavaFiles(testSrc)) + .run() + .writeAll(); + + tb.writeJavaFiles(annSrc, + """ + package annotations; + public enum E { + B; + } + """); + + Files.createDirectories(annClasses); + + new JavacTask(tb) + .outdir(annClasses) + .files(tb.findJavaFiles(annSrc)) + .run() + .writeAll(); + + List log; + + //enum missing the enum constant recorded in the classfile, report warning: + log = new JavacTask(tb) + .outdir(testClasses) + .classpath(libClasses, annClasses) + .options("-Xlint:classfile", "-XDrawDiagnostics") + .files(tb.findJavaFiles(testSrc)) + .run() + .writeAll() + .getOutputLines(Task.OutputKind.DIRECT); + + tb.checkEqual(log, + List.of("Lib.class:-:-: compiler.warn.unknown.enum.constant: E.class, annotations.E, A", + "1 warning")); + + //enum is missing, but the annotation is not, report warning: + Files.delete(annClasses.resolve("annotations").resolve("E.class")); + + log = new JavacTask(tb) + .outdir(testClasses) + .classpath(libClasses, annClasses) + .options("-Xlint:classfile", "-XDrawDiagnostics") + .files(tb.findJavaFiles(testSrc)) + .run() + .writeAll() + .getOutputLines(Task.OutputKind.DIRECT); + + tb.checkEqual(log, + List.of("Lib.class:-:-: compiler.warn.unknown.enum.constant.reason: Ann.class, annotations.E, A, (compiler.misc.class.file.not.found: annotations.E)", + "1 warning")); + + tb.writeJavaFiles(annSrc, + """ + package annotations; + public @interface Ann { + public E nue(); + } + """, + """ + package annotations; + public enum E { + A; + } + """); + + new JavacTask(tb) + .outdir(annClasses) + .files(tb.findJavaFiles(annSrc)) + .run() + .writeAll(); + + //enum is OK and the annotation exists, but the annotation is missing the required attribute method, report warning: + log = new JavacTask(tb) + .outdir(testClasses) + .classpath(libClasses, annClasses) + .options("-Xlint:classfile", "-XDrawDiagnostics") + .files(tb.findJavaFiles(testSrc)) + .run() + .writeAll() + .getOutputLines(Task.OutputKind.DIRECT); + + tb.checkEqual(log, + List.of("Lib.class:-:-: compiler.warn.annotation.method.not.found: annotations.Ann, e", + "1 warning")); + } + + @BeforeEach + public void setup(TestInfo ti) { + base = Path.of(".").resolve(ti.getTestMethod().orElseThrow().getName()); + } +} diff --git a/test/langtools/tools/javac/diags/CheckExamples.java b/test/langtools/tools/javac/diags/CheckExamples.java index 502f5fa8b88..7a188614080 100644 --- a/test/langtools/tools/javac/diags/CheckExamples.java +++ b/test/langtools/tools/javac/diags/CheckExamples.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,12 +45,22 @@ import java.util.*; /** * Check invariants for a set of examples. + * + * READ THIS IF THIS TEST FAILS AFTER ADDING A NEW KEY TO 'compiler.properties': + * The 'examples' subdirectory contains a number of examples which provoke + * the reporting of most of the compiler message keys. + * * -- each example should exactly declare the keys that will be generated when * it is run. + * -- this is done by the "// key:"-comment in each fine. * -- together, the examples should cover the set of resource keys in the * compiler.properties bundle. A list of exceptions may be given in the * not-yet.txt file. Entries on the not-yet.txt list should not be * covered by examples. + * -- some keys are only reported by the compiler when specific options are + * supplied. For the purposes of this test, this can be specified by a + * comment e.g. like this: "// options: -Xlint:empty" + * * When new keys are added to the resource bundle, it is strongly recommended * that corresponding new examples be added here, if at all practical, instead * of simply and lazily being added to the not-yet.txt list. diff --git a/test/langtools/tools/javac/doctree/ReferenceTest.java b/test/langtools/tools/javac/doctree/ReferenceTest.java index 540cb9a6621..83a10ee4d20 100644 --- a/test/langtools/tools/javac/doctree/ReferenceTest.java +++ b/test/langtools/tools/javac/doctree/ReferenceTest.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,7 +23,7 @@ /* * @test - * @bug 7021614 8278373 8164094 8371248 + * @bug 7021614 8278373 8164094 8371248 8284315 * @summary extend com.sun.source API to support parsing javadoc comments * @summary check references in at-see and {at-link} tags * @modules jdk.compiler @@ -56,6 +56,7 @@ import javax.lang.model.element.ExecutableElement; import javax.lang.model.element.QualifiedNameable; import javax.lang.model.element.TypeElement; import javax.lang.model.type.DeclaredType; +import javax.lang.model.type.TypeKind; import javax.lang.model.type.TypeMirror; import javax.tools.Diagnostic.Kind; @@ -84,11 +85,14 @@ import javax.tools.Diagnostic.Kind; * {@link #trees Field} * {@link #getSupportedSourceVersion Method} * {@link #init(ProcessingEnvironment Method} - * {@link double Class} + * {@link double type-only:double} + * {@link int type-only:int} + * {@link void type-only:void} * {@link double.NAN Bad} * {@link double#NAN Bad} * {@link double#double Bad} * {@link java.base/double Bad} + * {@link jdk.javadoc/double Bad} * * {@link List Interface} * {@link List.add Bad} @@ -100,15 +104,20 @@ import javax.tools.Diagnostic.Kind; * {@link Map.Entry#getKey Method} * {@link Map.Entry#setValue(Object) Method} * - * {@link java.base/java.util.List Bad} + * {@link java.lang.String[] type-only:array} + * {@link java.lang.String[].length Bad} + * {@link java.lang.String[]#length type-only:int} + * {@link java.lang.String[]#length() Bad} + * + * {@link java.base/java.util.List Interface} * {@link java.base/java.util.List.add Bad} - * {@link java.base/java.util.List#add Bad} - * {@link java.base/java.util.List#add(Object) Bad} - * {@link java.base/java.util.Map.Entry Bad} - * {@link java.base/java.util.Map.Entry Bad} + * {@link java.base/java.util.List#add Method} + * {@link java.base/java.util.List#add(Object) Method} + * {@link java.base/java.util.Map.Entry Interface} + * {@link java.base/java.util.Map.Entry Interface} * {@link java.base/java.util.Map.Entry.getKey Bad} - * {@link java.base/java.util.Map.Entry#getKey Bad} - * {@link java.base/java.util.Map.Entry#setValue(Object) Bad} + * {@link java.base/java.util.Map.Entry#getKey Method} + * {@link java.base/java.util.Map.Entry#setValue(Object) Method} * * @see java.lang Package * @see java.lang.ERROR Bad @@ -127,6 +136,14 @@ import javax.tools.Diagnostic.Kind; * @see java.lang.String#ERROR Bad * @see java.lang.String#equals(Object) Method * + * @see java.lang.String[] type-only:array + * @see java.lang.String[].length Bad + * @see java.lang.String[]#length type-only:int + * @see java.lang.String[]#length() Bad + * + * @see jdk.javadoc/jdk.javadoc.doclet.Doclet Interface + * @see jdk.compiler/jdk.javadoc.doclet.Doclet Bad + * * @see AbstractProcessor Class * * @see List#add(Object) Method @@ -200,15 +217,23 @@ public class ReferenceTest extends AbstractProcessor { String sig = tree.getSignature(); Element found = trees.getElement(new DocTreePath(getCurrentPath(), tree)); + TypeMirror type = trees.getType(new DocTreePath(getCurrentPath(), tree)); + if (found == null) { System.err.println(sig + " NOT FOUND"); } else { System.err.println(sig + " found " + found.getKind() + " " + found); + if (type == null) { + error(tree, "Did not find type for element " + found); + } else if (!erasure(type).equals(erasure(found.asType()))) { + error(tree, "Type " + erasure(type) + " does not match element " + erasure(found.asType())); + } } String expect = "UNKNOWN"; - if (label.size() > 0 && label.get(0) instanceof TextTree) - expect = ((TextTree) label.get(0)).getBody(); + if (!label.isEmpty() && label.getFirst() instanceof TextTree) { + expect = ((TextTree) label.getFirst()).getBody(); + } if (expect.startsWith("signature:")) { expect = expect.substring("signature:".length()); @@ -216,12 +241,25 @@ public class ReferenceTest extends AbstractProcessor { String signature = found.getKind().name() + ":" + elementSignature(found); if (!expect.equalsIgnoreCase(signature)) { - error(tree, "Unexpected value found: " + signature +", expected: " + expect); + error(tree, "Unexpected value found: " + signature + ", expected: " + expect); + } + } else if (expect.startsWith("type-only:")) { + expect = expect.substring("type-only:".length()); + if (found != null) { + error(tree, "Found element for type-only reference: " + found); + } + if (type == null) { + error(tree, "Found no type, expected: " + expect); + } else if (!expect.equalsIgnoreCase(type.getKind().name())) { + error(tree, "Found unexpected type: " + type + ", expected: " + expect); } } else { if (!expect.equalsIgnoreCase(found == null ? "bad" : found.getKind().name())) { error(tree, "Unexpected value found: " + found +", expected: " + expect); } + if (expect.equalsIgnoreCase("bad") && type != null) { + error(tree, "Found unexpected type: " + type + ", expected none"); + } } } @@ -252,6 +290,12 @@ public class ReferenceTest extends AbstractProcessor { default -> throw new AssertionError("Unhandled type kind: " + type.getKind()); }; } + + TypeMirror erasure(TypeMirror type) { + return type.getKind() == TypeKind.DECLARED + ? processingEnv.getTypeUtils().erasure(type) + : type; + } } /** @@ -317,11 +361,17 @@ class ReferenceTestExtras { * @see #X Field * @see #X() Method * @see #m Method + * @see X Type_Parameter + * @see Y Type_Parameter + * @see X#wait Method + * @see X#wait() Method + * @see Y#getSupportedSourceVersion Method + * @see Y#init(ProcessingEnvironment) Method * @see Inner#X Bad * @see Inner#X() Bad * @see Inner#m Bad */ - interface Inner {} + interface Inner {} } diff --git a/test/langtools/tools/javac/implicitCompile/APImplicitClassesWarnings.java b/test/langtools/tools/javac/implicitCompile/APImplicitClassesWarnings.java new file mode 100644 index 00000000000..8c1e356217c --- /dev/null +++ b/test/langtools/tools/javac/implicitCompile/APImplicitClassesWarnings.java @@ -0,0 +1,208 @@ +/* + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8378740 8378950 + * @summary Verify warnings are properly suppressed for the combination of + * annotation processing and implicit compilation + * @library /tools/lib + * @modules + * jdk.compiler/com.sun.tools.javac.api + * jdk.compiler/com.sun.tools.javac.main + * @build toolbox.ToolBox toolbox.JavacTask + * @run junit APImplicitClassesWarnings + */ + + +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.List; +import java.util.Set; +import javax.annotation.processing.AbstractProcessor; +import javax.annotation.processing.RoundEnvironment; +import javax.annotation.processing.SupportedAnnotationTypes; +import javax.lang.model.SourceVersion; +import javax.lang.model.element.TypeElement; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; +import toolbox.JavacTask; +import toolbox.Task; +import toolbox.ToolBox; + +public class APImplicitClassesWarnings { + + final ToolBox tb = new ToolBox(); + Path base; + + @Test + public void testCorrectSource() throws Exception { + Path src = base.resolve("src"); + Path classes = base.resolve("classes"); + tb.writeJavaFiles(src, + """ + package test; + + @Deprecated(forRemoval=true) + public class Depr { + } + """, + """ + package test; + public class Use { + Implicit implicit; + Depr depr; + } + """, + """ + package test; + public interface Implicit {} + """); + Files.createDirectories(classes); + + List log = new JavacTask(tb) + .options("-d", classes.toString(), + "-XDrawDiagnostics", + "-implicit:class", + "-sourcepath", src.toString()) + .files(src.resolve("test").resolve("Depr.java"), + src.resolve("test").resolve("Use.java")) + .processors(new ProcessorImpl()) + .run() + .writeAll() + .getOutputLines(Task.OutputKind.DIRECT); + + List expected = List.of( + "Use.java:4:5: compiler.warn.has.been.deprecated.for.removal: test.Depr, test", + "1 warning" + ); + + tb.checkEqual(expected, log); + } + + @Test + public void testCorrectSuppress() throws Exception { + Path src = base.resolve("src"); + Path classes = base.resolve("classes"); + tb.writeJavaFiles(src, + //note the added @SuppressWarnings("removal"): + """ + package test; + + @Deprecated(forRemoval=true) + public class Depr { + } + """, + """ + package test; + public class Use { + Implicit implicit; + @SuppressWarnings("removal") + Depr depr; + } + """, + """ + package test; + public interface Implicit {} + """); + Files.createDirectories(classes); + + new JavacTask(tb) + .options("-d", classes.toString(), + "-Werror", + "-implicit:class", + "-sourcepath", src.toString()) + .files(src.resolve("test").resolve("Depr.java"), + src.resolve("test").resolve("Use.java")) + .processors(new ProcessorImpl()) + .run() + .writeAll(); + } + + @Test + public void testCorrectImport() throws Exception { + Path src = base.resolve("src"); + Path classes = base.resolve("classes"); + tb.writeJavaFiles(src, + """ + package test; + + @Deprecated(forRemoval=true) + public class Depr { + public static class Nested {} + } + """, + """ + package test; + import test.Depr.Nested; + public class Use { + Implicit implicit; + Nested nest; + } + """, + """ + package test; + public interface Implicit {} + """); + Files.createDirectories(classes); + + List log = new JavacTask(tb) + .options("-d", classes.toString(), + "-XDrawDiagnostics", + "-implicit:class", + "-sourcepath", src.toString()) + .files(src.resolve("test").resolve("Depr.java"), + src.resolve("test").resolve("Use.java")) + .processors(new ProcessorImpl()) + .run() + .writeAll() + .getOutputLines(Task.OutputKind.DIRECT); + + List expected = List.of( + "Use.java:2:12: compiler.warn.has.been.deprecated.for.removal: test.Depr, test", + "1 warning" + ); + + tb.checkEqual(expected, log); + } + + @SupportedAnnotationTypes("*") + private static class ProcessorImpl extends AbstractProcessor { + @Override + public boolean process(Set annotations, RoundEnvironment roundEnv) { + return false; + } + @Override + public SourceVersion getSupportedSourceVersion() { + return SourceVersion.latest(); + } + } + + @BeforeEach + public void setUp(TestInfo info) { + base = Paths.get(".") + .resolve(info.getTestMethod().orElseThrow().getName()); + } +} diff --git a/test/langtools/tools/javac/launcher/SourceLauncherTest.java b/test/langtools/tools/javac/launcher/SourceLauncherTest.java index c3c8fb19a1b..d0cf11a0327 100644 --- a/test/langtools/tools/javac/launcher/SourceLauncherTest.java +++ b/test/langtools/tools/javac/launcher/SourceLauncherTest.java @@ -670,6 +670,7 @@ public class SourceLauncherTest extends TestRunner { tb.writeJavaFiles(base, "public class Main { public static void main(String... args) {}}"); String log = new JavaTask(tb) .vmOptions("--source", "21") + .includeStandardOptions(false) // Do not inherit --enable-preview .className(base.resolve("Main.java").toString()) .run(Task.Expect.SUCCESS) .getOutput(Task.OutputKind.STDERR); diff --git a/test/langtools/tools/javac/lexer/AsciiSubCharTest.java b/test/langtools/tools/javac/lexer/AsciiSubCharTest.java new file mode 100644 index 00000000000..0c96b744d72 --- /dev/null +++ b/test/langtools/tools/javac/lexer/AsciiSubCharTest.java @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8371873 + * @summary Check for proper handling of trailing ASCII SUB character + * @library /tools/lib + * @modules + * jdk.compiler/com.sun.tools.javac.api + * jdk.compiler/com.sun.tools.javac.main + * @build toolbox.ToolBox toolbox.JavacTask + * @run junit AsciiSubCharTest + */ + +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.*; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; + +import toolbox.JavacTask; +import toolbox.Task; +import toolbox.ToolBox; + +public class AsciiSubCharTest { + + ToolBox tb = new ToolBox(); + Path base; + + @Test + public void testTrailingAsciiSubIsIgnored() throws Exception { + Path classes = base.resolve("classes"); + Files.createDirectories(classes); + new JavacTask(tb) + .options("-d", classes.toString()) + .sources(""" + public class Test { + void main(String... args) { IO.println("\u001A"); } + } + \u001A""") + .run() + .writeAll(); + } + + @Test + public void testMultipleTrailingAsciiSubAreReported() throws Exception { + Path classes = base.resolve("classes"); + Files.createDirectories(classes); + List out = new JavacTask(tb) + .options("-d", classes.toString(), "-XDrawDiagnostics", "-nowarn") + .sources(""" + public class Test { + void main(String... args) { IO.println("\u001A"); } + } + \u001A\u001A""") + .run(Task.Expect.FAIL) + .writeAll() + .getOutputLines(Task.OutputKind.DIRECT); + tb.checkEqual(out, List.of( + "Test.java:4:1: compiler.err.illegal.char: \\u001a", + "Test.java:4:2: compiler.err.premature.eof", + "2 errors")); + } + + @Test + public void test8371873() throws Exception { + Path classes = base.resolve("classes"); + Files.createDirectories(classes); + List out = new JavacTask(tb) + .options("-d", classes.toString(), "-XDrawDiagnostics", "-nowarn") + .sources(""" + public class Test { + void main(String... args) { IO.println("\u001A"); } + } + \u001A\u0001""") + .run(Task.Expect.FAIL) + .writeAll() + .getOutputLines(Task.OutputKind.DIRECT); + tb.checkEqual(out, List.of( + "Test.java:4:1: compiler.err.illegal.char: \\u001a", + "Test.java:4:2: compiler.err.illegal.char: \\u0001", + "Test.java:4:3: compiler.err.premature.eof", + "3 errors")); + } + + @BeforeEach + public void setUp(TestInfo info) { + base = Paths.get(".") + .resolve(info.getTestMethod() + .orElseThrow() + .getName()); + } +} diff --git a/test/langtools/tools/javac/modules/IncubatingTest.java b/test/langtools/tools/javac/modules/IncubatingTest.java index 68f615abc04..6ff1a0a29c8 100644 --- a/test/langtools/tools/javac/modules/IncubatingTest.java +++ b/test/langtools/tools/javac/modules/IncubatingTest.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,7 +23,7 @@ /* * @test - * @bug 8171177 8187591 + * @bug 8171177 8187591 8378950 * @summary Verify that ModuleResolution attribute flags are honored. * @library /tools/lib * @modules jdk.compiler/com.sun.tools.javac.api @@ -46,10 +46,15 @@ import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; - +import java.util.Set; import java.lang.classfile.*; import java.lang.classfile.attribute.ModuleResolutionAttribute; import java.lang.classfile.constantpool.*; +import javax.annotation.processing.AbstractProcessor; +import javax.annotation.processing.RoundEnvironment; +import javax.annotation.processing.SupportedAnnotationTypes; +import javax.lang.model.SourceVersion; +import javax.lang.model.element.TypeElement; import toolbox.JavacTask; import toolbox.Task; import toolbox.Task.Expect; @@ -242,6 +247,29 @@ public class IncubatingTest extends ModuleTestBase { .outdir(testModuleClasses) .files(findJavaFiles(testModuleSrc)) .run(Expect.SUCCESS); + + //test with annotation processing + log = new JavacTask(tb) + .options("--module-path", classes.toString(), + "-XDrawDiagnostics", + "-Werror") + .outdir(testModuleClasses) + .files(findJavaFiles(testModuleSrc)) + .processors(new ProcessorImpl()) + .run(Expect.FAIL) + .writeAll() + .getOutputLines(Task.OutputKind.DIRECT); + + expected = Arrays.asList( + "- compiler.warn.incubating.modules: jdk.i", + "- compiler.err.warnings.and.werror", + "1 error", + "1 warning" + ); + + if (!expected.equals(log)) { + throw new AssertionError("Unexpected output: " + log); + } } private void copyJavaBase(Path targetDir) throws IOException { @@ -270,4 +298,16 @@ public class IncubatingTest extends ModuleTestBase { out.write(newBytes); } } + + @SupportedAnnotationTypes("*") + private static class ProcessorImpl extends AbstractProcessor { + @Override + public boolean process(Set annotations, RoundEnvironment roundEnv) { + return false; + } + @Override + public SourceVersion getSupportedSourceVersion() { + return SourceVersion.latest(); + } + } } diff --git a/test/langtools/tools/javac/patterns/BreakAndLoops.java b/test/langtools/tools/javac/patterns/BreakAndLoops.java index 31ad8908f53..777d5ae4f42 100644 --- a/test/langtools/tools/javac/patterns/BreakAndLoops.java +++ b/test/langtools/tools/javac/patterns/BreakAndLoops.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -237,4 +237,4 @@ public class BreakAndLoops extends ComboInstance { return code; } } -} \ No newline at end of file +} diff --git a/test/langtools/tools/javac/preview/PreviewAutoSuppress.java b/test/langtools/tools/javac/preview/PreviewAutoSuppress.java index 058ccdf0a2a..f2096aff648 100644 --- a/test/langtools/tools/javac/preview/PreviewAutoSuppress.java +++ b/test/langtools/tools/javac/preview/PreviewAutoSuppress.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,6 +44,8 @@ import java.nio.file.Paths; import java.util.List; public class PreviewAutoSuppress extends TestRunner { + // Major version number (e.g. '27'). + private static final String FEATURE_VERSION = String.valueOf(Runtime.version().feature()); protected ToolBox tb; @@ -83,7 +85,7 @@ public class PreviewAutoSuppress extends TestRunner { List log = new JavacTask(tb, Task.Mode.CMDLINE) .outdir(classes) .options("--enable-preview", - "-source", String.valueOf(Runtime.version().feature()), + "-source", FEATURE_VERSION, "-Xlint:preview", "-XDforcePreview", "-XDrawDiagnostics") @@ -182,7 +184,7 @@ public class PreviewAutoSuppress extends TestRunner { "--add-exports", "java.base/preview.api=ALL-UNNAMED", "--enable-preview", "-Xlint:preview", - "-source", String.valueOf(Runtime.version().feature()), + "-source", FEATURE_VERSION, "-XDrawDiagnostics") .files(tb.findJavaFiles(testSrc)) .run() diff --git a/test/langtools/tools/javac/processing/model/util/types/TestAsElement.java b/test/langtools/tools/javac/processing/model/util/types/TestAsElement.java index 6ac714752c3..cc82c5adddd 100644 --- a/test/langtools/tools/javac/processing/model/util/types/TestAsElement.java +++ b/test/langtools/tools/javac/processing/model/util/types/TestAsElement.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,7 +23,7 @@ /* * @test - * @bug 8300857 + * @bug 8300857 8379156 * @summary Test Types.asElement in cases specified to return null * @library /tools/javac/lib * @build JavacTestingAbstractProcessor TestAsElement @@ -55,18 +55,23 @@ public class TestAsElement extends JavacTestingAbstractProcessor { } private void testNullCases() { - // Test all primitive types + // Test all primitive types and arrays of primitive types for (TypeKind typeKind : TypeKind.values()) { if (typeKind.isPrimitive() ) { - expectNullAsElement(typeUtils.getPrimitiveType(typeKind)); + var primType = typeUtils.getPrimitiveType(typeKind); + expectNullAsElement(primType); + expectNullAsElement(typeUtils.getArrayType(primType)); } } expectNullAsElement(typeUtils.getNoType(TypeKind.VOID)); expectNullAsElement(typeUtils.getNoType(TypeKind.NONE)); expectNullAsElement(typeUtils.getNullType()); - Element objectElement = eltUtils.getTypeElement("java.lang.Object"); - expectNullAsElement(typeUtils.getWildcardType(objectElement.asType(), null)); + Element objectElement = eltUtils.getTypeElement("java.lang.Object"); + TypeMirror objectType = objectElement.asType(); + expectNullAsElement(typeUtils.getWildcardType(objectType, null)); + // check Object[] + expectNullAsElement(typeUtils.getArrayType(objectType)); // Loop over the ExecutableTypes for Object's methods for(var methodElt : ElementFilter.methodsIn(objectElement.getEnclosedElements())) { diff --git a/test/langtools/tools/javac/processing/rounds/OverwriteBetweenCompilations_2.out b/test/langtools/tools/javac/processing/rounds/OverwriteBetweenCompilations_2.out index 826e2b4bcb0..431fd3d9079 100644 --- a/test/langtools/tools/javac/processing/rounds/OverwriteBetweenCompilations_2.out +++ b/test/langtools/tools/javac/processing/rounds/OverwriteBetweenCompilations_2.out @@ -53,5 +53,3 @@ public abstract class GeneratedClass extends java.ut public void test(long a); } -- compiler.note.deprecated.filename: OverwriteBetweenCompilationsSource.java -- compiler.note.deprecated.recompile diff --git a/test/langtools/tools/javac/processing/warnings/TestParserWarnings.java b/test/langtools/tools/javac/processing/warnings/TestParserWarnings.java new file mode 100644 index 00000000000..b611e7a8655 --- /dev/null +++ b/test/langtools/tools/javac/processing/warnings/TestParserWarnings.java @@ -0,0 +1,219 @@ +/* + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8381654 + * @summary AP interference with tokenizer warnings + * @library /tools/lib + * @modules + * jdk.compiler/com.sun.tools.javac.api + * jdk.compiler/com.sun.tools.javac.main + * @build toolbox.ToolBox toolbox.JavacTask + * @run junit ${test.main.class} + */ + +import java.io.IOException; +import java.io.Writer; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; +import java.util.Set; + +import javax.annotation.processing.AbstractProcessor; +import javax.annotation.processing.Filer; +import javax.annotation.processing.Messager; +import javax.annotation.processing.ProcessingEnvironment; +import javax.annotation.processing.RoundEnvironment; +import javax.annotation.processing.SupportedAnnotationTypes; +import javax.lang.model.SourceVersion; +import javax.lang.model.element.TypeElement; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import toolbox.JavacTask; +import toolbox.Task; +import toolbox.ToolBox; + +public class TestParserWarnings { + + static boolean[] apOptions() { + return new boolean[] {false, true}; + } + + final ToolBox tb = new ToolBox(); + Path base, src, classes; + + @ParameterizedTest @MethodSource("apOptions") + public void testPreviewWarning(boolean useProcessor) throws Exception { + tb.writeJavaFiles(src, """ + public record MyRec() {} + """); + + JavacTask task = new JavacTask(tb) + .options("--enable-preview", + "-source", Integer.toString(Runtime.version().feature()), + "-XDforcePreview", + "-XDrawDiagnostics") + .files(tb.findJavaFiles(src)) + .outdir(classes); + if (useProcessor) { + task.processors(new ProcessorImpl()); + } + List log = task + .run() + .writeAll() + .getOutputLines(Task.OutputKind.DIRECT); + + List expected = List.of( + "- compiler.note.preview.filename: MyRec.java, DEFAULT", + "- compiler.note.preview.recompile" + ); + + tb.checkEqual(expected, log); + } + + @ParameterizedTest @MethodSource("apOptions") + public void testTextBlockWarning(boolean useProcessor) throws Exception { + tb.writeJavaFiles(src, """ + class TextBlockWhitespace { + String m() { + return ""\" + \\u0009\\u0009\\u0009\\u0009tab indentation + \\u0020\\u0020\\u0020\\u0020space indentation and trailing space\\u0020 + \\u0020\\u0020\\u0020\\u0020""\"; + } + } + """); + + JavacTask task = new JavacTask(tb) + .options("-Xlint:text-blocks", + "-XDrawDiagnostics") + .files(tb.findJavaFiles(src)) + .outdir(classes); + if (useProcessor) { + task.processors(new ProcessorImpl()); + } + List log = task + .run() + .writeAll() + .getOutputLines(Task.OutputKind.DIRECT); + + List expected = List.of( + "TextBlockWhitespace.java:3:16: compiler.warn.inconsistent.white.space.indentation", + "TextBlockWhitespace.java:3:16: compiler.warn.trailing.white.space.will.be.removed", + "2 warnings" + ); + + tb.checkEqual(expected, log); + } + + @Test + public void testAPGeneratedSource() throws Exception { + tb.writeJavaFiles(src, """ + import java.lang.annotation.ElementType; + import java.lang.annotation.Target; + + @A + class Test {} + + @Target(ElementType.TYPE) + @interface A {} + """); + + List log = new JavacTask(tb) + .options("-Xlint:text-blocks", + "-XDrawDiagnostics") + .files(tb.findJavaFiles(src)) + .outdir(classes) + .processors(new ProcessorImpl()) + .run() + .writeAll() + .getOutputLines(Task.OutputKind.DIRECT); + + List expected = List.of( + "Generated.java:3:16: compiler.warn.inconsistent.white.space.indentation", + "Generated.java:3:16: compiler.warn.trailing.white.space.will.be.removed", + "2 warnings" + ); + + tb.checkEqual(expected, log); + } + + @SupportedAnnotationTypes("*") + private static class ProcessorImpl extends AbstractProcessor { + private boolean done = false; + private Filer filer; + private Messager msgr; + + @Override + public void init(ProcessingEnvironment env) { + filer = env.getFiler(); + msgr = env.getMessager(); + } + + @Override + public boolean process(Set annotations, RoundEnvironment roundEnv) { + if (!done && !annotations.isEmpty()) { + try (Writer pw = filer.createSourceFile("Generated").openWriter()) { + pw.write(""" + public class Generated { + String m() { + return ""\" + \\u0009\\u0009\\u0009\\u0009tab indentation + \\u0020\\u0020\\u0020\\u0020space indentation and trailing space\\u0020 + \\u0020\\u0020\\u0020\\u0020""\"; + } + } + """); + pw.flush(); + pw.close(); + done = true; + } catch (IOException ioe) { + msgr.printError(ioe.getMessage()); + return false; + } + return true; + } + return false; + } + + @Override + public SourceVersion getSupportedSourceVersion() { + return SourceVersion.latest(); + } + } + + @BeforeEach + public void setUp(TestInfo info) throws Exception { + base = Path.of(".").resolve(info.getTestMethod().get().getName()); + if (Files.exists(base)) { + tb.cleanDirectory(base); + } + src = base.resolve("src"); + classes = base.resolve("classes"); + Files.createDirectories(classes); + } +} diff --git a/test/langtools/tools/javac/records/RecordReading.java b/test/langtools/tools/javac/records/RecordReading.java index 6133a04fca9..e57a7f77ceb 100644 --- a/test/langtools/tools/javac/records/RecordReading.java +++ b/test/langtools/tools/javac/records/RecordReading.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,7 +31,6 @@ * @run main RecordReading */ - import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; diff --git a/test/langtools/tools/javac/recovery/ClassBlockExits.java b/test/langtools/tools/javac/recovery/ClassBlockExits.java index b1c54b456ae..3ac04cbe991 100644 --- a/test/langtools/tools/javac/recovery/ClassBlockExits.java +++ b/test/langtools/tools/javac/recovery/ClassBlockExits.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -138,4 +138,4 @@ public class ClassBlockExits extends ComboInstance { return code; } } -} \ No newline at end of file +} diff --git a/test/langtools/tools/javac/types/IsFunctionalInterfaceTest.java b/test/langtools/tools/javac/types/IsFunctionalInterfaceTest.java new file mode 100644 index 00000000000..a4fe4e470c7 --- /dev/null +++ b/test/langtools/tools/javac/types/IsFunctionalInterfaceTest.java @@ -0,0 +1,160 @@ +/* + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8378906 + * @summary Check that Types.isFunctionalInterface() works for interface methods + * with ACC_PUBLIC, ACC_BRIDGE, ACC_ABSTRACT, ACC_SYNTHETIC flags. + * @library /tools/lib + * @modules + * jdk.compiler/com.sun.tools.javac.api + * jdk.compiler/com.sun.tools.javac.main + * @build toolbox.ToolBox toolbox.JavacTask + * @run junit IsFunctionalInterfaceTest + */ + +import java.lang.classfile.ClassFile; +import java.lang.classfile.MethodElement; +import java.lang.classfile.MethodModel; +import java.lang.reflect.AccessFlag; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.*; + +import toolbox.JavacTask; +import toolbox.ToolBox; + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; +import toolbox.Task; + +public class IsFunctionalInterfaceTest { + + Path base; + ToolBox tb = new ToolBox(); + + @Test + void test8378906() throws Exception { + Path classes = base.resolve("classes"); + Files.createDirectories(classes); + new JavacTask(tb) + .options("-d", classes.toString()) + .sources(""" + interface I { + @Deprecated + Object test(); + } + + final class Sub implements I { + public static final Sub INSTANCE = new Sub(); + public Object test() { return null; } + } + + class Util { + public static boolean foo(I a) { return true; } + public static boolean foo(Sub a) { return false; } + } + """) + .run() + .writeAll(); + + Path path = classes.resolve("I.class"); + ClassFile classFile = ClassFile.of(); + byte[] bytes = classFile.transformClass(classFile.parse(path), + (classBuilder, classElement) -> { + if (classElement instanceof MethodModel mm + && mm.methodName().equalsString("test")) { + int flags = mm.flags().flagsMask() | AccessFlag.BRIDGE.mask() | AccessFlag.SYNTHETIC.mask(); + classBuilder.withMethod(mm.methodName(), mm.methodType(), flags, (methodBuilder) -> { + mm.attributes().forEach(attr -> { + if (attr instanceof MethodElement me) { + methodBuilder.with(me); + } + }); + }); + } else { + classBuilder.with(classElement); + } + }); + Files.write(path, bytes); + + new JavacTask(tb) + .options("-d", classes.toString(), "-cp", classes.toString()) + .sources(""" + public class Test { + public void main() { Util.foo(Sub.INSTANCE); } + } + """) + .run() + .writeAll(); + + List out1 = new JavacTask(tb) + .options("-d", classes.toString(), "-cp", classes.toString(), "-XDrawDiagnostics", "-nowarn") + .sources(""" + public class Test { + public void main() { t(() -> null); } + private void t(I i) {} + } + """) + .run(Task.Expect.FAIL) + .writeAll() + .getOutputLines(Task.OutputKind.DIRECT); + tb.checkEqual(out1, List.of( + "Test.java:2:25: compiler.err.cant.apply.symbol: kindname.method, t, I, @27, kindname.class, Test, (compiler.misc.no.conforming.assignment.exists: (compiler.misc.not.a.functional.intf.1: I, (compiler.misc.no.abstracts: kindname.interface, I)))", + "1 error")); + + List out2 = new JavacTask(tb) + .options("-d", classes.toString(), "-cp", classes.toString(), "-XDrawDiagnostics", "-nowarn") + .sources(""" + public class Impl implements I { + } + """) + .run(Task.Expect.FAIL) + .writeAll() + .getOutputLines(Task.OutputKind.DIRECT); + tb.checkEqual(out2, List.of( + "Impl.java:1:8: compiler.err.does.not.override.abstract: Impl, test(), I", + "1 error")); + + new JavacTask(tb) + .options("-d", classes.toString(), "-cp", classes.toString()) + .sources(""" + public class Impl implements I { + public Object test() { return null; } + } + """) + .run() + .writeAll(); + } + + @BeforeEach + public void setUp(TestInfo info) { + base = Paths.get(".") + .resolve(info.getTestMethod() + .orElseThrow() + .getName()); + } +} diff --git a/test/langtools/tools/javap/T4975569.java b/test/langtools/tools/javap/T4975569.java index c43598b41db..ce75ba84586 100644 --- a/test/langtools/tools/javap/T4975569.java +++ b/test/langtools/tools/javap/T4975569.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -105,4 +105,3 @@ public class T4975569 { protected class Prot { } private class Priv { int i; } } - diff --git a/test/langtools/tools/lib/builder/AbstractBuilder.java b/test/langtools/tools/lib/builder/AbstractBuilder.java index e528fe60792..4717f456d08 100644 --- a/test/langtools/tools/lib/builder/AbstractBuilder.java +++ b/test/langtools/tools/lib/builder/AbstractBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -203,6 +203,13 @@ public abstract class AbstractBuilder { this.modifiers = modifiers; } + boolean isInterface() { + if (modifiers.isEmpty()) { + throw new IllegalStateException("modifiers not initialized"); + } + return modifiers.getLast().endsWith("interface"); + } + @Override public String toString() { OutputWriter ow = new OutputWriter(); diff --git a/test/langtools/tools/lib/builder/ClassBuilder.java b/test/langtools/tools/lib/builder/ClassBuilder.java index feafa77db56..2c57f0e0c13 100644 --- a/test/langtools/tools/lib/builder/ClassBuilder.java +++ b/test/langtools/tools/lib/builder/ClassBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,6 +30,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.ListIterator; import java.util.regex.Matcher; @@ -54,6 +55,7 @@ public class ClassBuilder extends AbstractBuilder { private String extendsType; private final List implementsTypes; + private final List permitsTypes; private final List members; private final List inners; private final List nested; @@ -86,6 +88,7 @@ public class ClassBuilder extends AbstractBuilder { } imports = new ArrayList<>(); implementsTypes = new ArrayList<>(); + permitsTypes = new ArrayList<>(); members = new ArrayList<>(); nested = new ArrayList<>(); inners = new ArrayList<>(); @@ -153,7 +156,11 @@ public class ClassBuilder extends AbstractBuilder { * @return this builder. */ public ClassBuilder setExtends(String name) { - extendsType = name; + if (modifiers.isInterface()) { + implementsTypes.add(name); + } else { + extendsType = name; + } return this; } @@ -163,7 +170,17 @@ public class ClassBuilder extends AbstractBuilder { * @return this builder. */ public ClassBuilder addImplements(String... names) { - implementsTypes.addAll(List.of(names)); + implementsTypes.addAll(Arrays.asList(names)); + return this; + } + + /** + * Adds a permits declaration(s). + * @param names the subtypes + * @return this builder + */ + public ClassBuilder addPermits(String... names) { + permitsTypes.addAll(Arrays.asList(names)); return this; } @@ -225,28 +242,25 @@ public class ClassBuilder extends AbstractBuilder { ow.println("// NO_API_COMMENT"); break; } + assert !modifiers.modifiers.isEmpty(); ow.print(modifiers.toString()); ow.print(clsname); if (typeParameter != null) { - ow.print(typeParameter + " "); - } else { - ow.print(" "); + ow.print(typeParameter); } if (extendsType != null && !extendsType.isEmpty()) { - ow.print("extends " + extendsType + " "); + assert !modifiers.isInterface(); + ow.print(" extends " + extendsType); } if (!implementsTypes.isEmpty()) { - ow.print("implements "); - - ListIterator iter = implementsTypes.listIterator(); - while (iter.hasNext()) { - String s = iter.next() ; - ow.print(s); - if (iter.hasNext()) - ow.print(", "); - } + ow.print(modifiers.isInterface() ? " extends " : " implements "); + ow.print(String.join(", ", implementsTypes)); } - ow.print("{"); + if (!permitsTypes.isEmpty()) { + ow.print(" permits "); + ow.print(String.join(", ", permitsTypes)); + } + ow.print(" {"); if (!nested.isEmpty()) { ow.println(""); nested.forEach(m -> ow.println(m.toString())); diff --git a/test/langtools/tools/lib/toolbox/ToolBox.java b/test/langtools/tools/lib/toolbox/ToolBox.java index ee217ab2c0c..1cb2fa6d3f9 100644 --- a/test/langtools/tools/lib/toolbox/ToolBox.java +++ b/test/langtools/tools/lib/toolbox/ToolBox.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -666,6 +666,27 @@ public class ToolBox { return Path.of(testJDK, "bin", tool); } + /** + * Finds a file with a path relative to the langtools test root directory. + * + * @param path the desired path from test/langtools + * @return the file, if found + */ + public Path findFromTestRoot(String path) { + Path testSrc = Path.of(System.getProperty("test.src", ".")); + + for (Path d = testSrc; d != null; d = d.getParent()) { + if (Files.exists(d.resolve("TEST.ROOT"))) { + Path file = d.resolve(path); + if (Files.exists(file)) { + return file; + } + } + } + + return null; + } + /** * Returns a string representing the contents of an {@code Iterable} as a list. * diff --git a/test/lib-test/TEST.ROOT b/test/lib-test/TEST.ROOT index f23d38c1e66..33c9a9c2a43 100644 --- a/test/lib-test/TEST.ROOT +++ b/test/lib-test/TEST.ROOT @@ -31,6 +31,9 @@ keys=randomness # Minimum jtreg version requiredVersion=8.2.1+1 +# Prevent TestNG-based tests under this root, use @run junit actions instead +disallowedActions=testng + # Allow querying of various System properties in @requires clauses requires.extraPropDefns = ../jtreg-ext/requires/VMProps.java requires.extraPropDefns.bootlibs = ../lib/jdk/test/whitebox diff --git a/test/lib-test/jdk/test/lib/TestMutuallyExclusivePlatformPredicates.java b/test/lib-test/jdk/test/lib/TestMutuallyExclusivePlatformPredicates.java index 8047b81c1e6..801874f38c2 100644 --- a/test/lib-test/jdk/test/lib/TestMutuallyExclusivePlatformPredicates.java +++ b/test/lib-test/jdk/test/lib/TestMutuallyExclusivePlatformPredicates.java @@ -50,7 +50,7 @@ public class TestMutuallyExclusivePlatformPredicates { OS("isAix", "isLinux", "isOSX", "isWindows"), VM_TYPE("isClient", "isServer", "isMinimal", "isZero", "isEmbedded"), MODE("isInt", "isMixed", "isComp"), - IGNORED("isEmulatedClient", "isDebugBuild", "isFastDebugBuild", "isMusl", + IGNORED("isDebugBuild", "isFastDebugBuild", "isMusl", "isStatic", "isSlowDebugBuild", "hasSA", "isRoot", "isTieredSupported", "areCustomLoadersSupportedForCDS", "isDefaultCDSArchiveSupported", "isHardenedOSX", "hasOSXPlistEntries", "isOracleLinux7", "isOnWayland"); diff --git a/test/lib-test/jdk/test/lib/jittester/MethodTemplateTest.java b/test/lib-test/jdk/test/lib/jittester/MethodTemplateTest.java index 3cee24b4684..69751bc281c 100644 --- a/test/lib-test/jdk/test/lib/jittester/MethodTemplateTest.java +++ b/test/lib-test/jdk/test/lib/jittester/MethodTemplateTest.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,8 +25,8 @@ package jdk.test.lib.jittester; import java.lang.reflect.Executable; -import org.testng.annotations.Test; -import static org.testng.Assert.*; +import org.junit.jupiter.api.Test; +import static org.junit.jupiter.api.Assertions.*; /* * @test @@ -35,7 +35,7 @@ import static org.testng.Assert.*; * @library /test/lib * /test/hotspot/jtreg/testlibrary/jittester/src * - * @run testng jdk.test.lib.jittester.MethodTemplateTest + * @run junit jdk.test.lib.jittester.MethodTemplateTest */ public class MethodTemplateTest { diff --git a/test/lib/RedefineClassHelper.java b/test/lib/RedefineClassHelper.java index ce27fb33f44..064778b3a2a 100644 --- a/test/lib/RedefineClassHelper.java +++ b/test/lib/RedefineClassHelper.java @@ -107,7 +107,7 @@ public class RedefineClassHelper { * Main method to be invoked before test to create the redefineagent.jar */ public static void main(String[] args) throws Exception { - String manifest = "Premain-Class: RedefineClassHelper\nCan-Redefine-Classes: true\n"; + String manifest = "Premain-Class: RedefineClassHelper\nCan-Redefine-Classes: true\nCan-Retransform-Classes: true\n"; ClassFileInstaller.writeJar("redefineagent.jar", ClassFileInstaller.Manifest.fromString(manifest), "RedefineClassHelper"); } } diff --git a/test/lib/jdk/test/lib/Convert.java b/test/lib/jdk/test/lib/Convert.java index 7ed1112c94d..2738e04c2f0 100644 --- a/test/lib/jdk/test/lib/Convert.java +++ b/test/lib/jdk/test/lib/Convert.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,26 +41,6 @@ public class Convert { return result; } - /* - * Convert a hexadecimal string to the corresponding little-ending number - * as a BigInteger. The clearHighBit argument determines whether the most - * significant bit of the highest byte should be set to 0 in the result. - */ - public static - BigInteger hexStringToBigInteger(boolean clearHighBit, String str) { - BigInteger result = BigInteger.ZERO; - for (int i = 0; i < str.length() / 2; i++) { - int curVal = Character.digit(str.charAt(2 * i), 16); - curVal <<= 4; - curVal += Character.digit(str.charAt(2 * i + 1), 16); - if (clearHighBit && i == str.length() / 2 - 1) { - curVal &= 0x7F; - } - result = result.add(BigInteger.valueOf(curVal).shiftLeft(8 * i)); - } - return result; - } - private static EdECPoint byteArrayToEdPoint(byte[] arr) { byte msb = arr[arr.length - 1]; boolean xOdd = (msb & 0x80) != 0; diff --git a/test/lib/jdk/test/lib/Platform.java b/test/lib/jdk/test/lib/Platform.java index 170e53930d8..892de2338e3 100644 --- a/test/lib/jdk/test/lib/Platform.java +++ b/test/lib/jdk/test/lib/Platform.java @@ -75,10 +75,6 @@ public class Platform { return vmInfo.contains("static"); } - public static boolean isEmulatedClient() { - return vmInfo.contains(" emulated-client"); - } - public static boolean isTieredSupported() { return (compiler != null) && compiler.contains("Tiered Compilers"); } diff --git a/test/lib/jdk/test/lib/SA/SATestUtils.java b/test/lib/jdk/test/lib/SA/SATestUtils.java index 50f5d71f1f1..754ef4c40dd 100644 --- a/test/lib/jdk/test/lib/SA/SATestUtils.java +++ b/test/lib/jdk/test/lib/SA/SATestUtils.java @@ -333,7 +333,7 @@ public class SATestUtils { .get(); String dir = buildID.substring(0, 2); String file = buildID.substring(2); - debuginfoPath = Path.of("/usr/lib/debug/.build_id", dir, file + ".debug"); + debuginfoPath = Path.of("/usr/lib/debug/.build-id", dir, file + ".debug"); exists = Files.exists(debuginfoPath); } catch (NoSuchElementException _) { // return null if vDSO not found. diff --git a/test/lib/jdk/test/lib/Utils.java b/test/lib/jdk/test/lib/Utils.java index 2f46ed87340..95b7a117b2c 100644 --- a/test/lib/jdk/test/lib/Utils.java +++ b/test/lib/jdk/test/lib/Utils.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -105,6 +105,11 @@ public final class Utils { */ public static final String TEST_SRC = System.getProperty("test.src", "").trim(); + /** + * Returns the value of 'test.src.path' system property. + */ + public static final String TEST_SRC_PATH = System.getProperty("test.src.path", "").trim(); + /** * Returns the value of 'test.root' system property. */ diff --git a/test/lib/jdk/test/lib/cds/CDSAppTester.java b/test/lib/jdk/test/lib/cds/CDSAppTester.java index 18356dd8aa9..3eac8a35a37 100644 --- a/test/lib/jdk/test/lib/cds/CDSAppTester.java +++ b/test/lib/jdk/test/lib/cds/CDSAppTester.java @@ -480,11 +480,20 @@ abstract public class CDSAppTester { // See JEP 483 public void runAOTWorkflow(String... args) throws Exception { this.workflow = Workflow.AOT; - boolean oneStepTraining = true; // by default use onestep trainning + + // By default use twostep training -- tests are much easier to write this way, as + // the stdout/stderr of the training run is clearly separated from the assembly phase. + // + // Many older test cases written before JEP 514 were not aware of one step treaining + // and may not check the stdout/stderr correctly. + boolean oneStepTraining = false; if (System.getProperty("CDSAppTester.two.step.training") != null) { oneStepTraining = false; } + if (System.getProperty("CDSAppTester.one.step.training") != null) { + oneStepTraining = true; + } if (args.length > 1) { // Tests such as test/hotspot/jtreg/runtime/cds/appcds/aotCache/SpecialCacheNames.java diff --git a/test/lib/jdk/test/lib/cds/CDSTestUtils.java b/test/lib/jdk/test/lib/cds/CDSTestUtils.java index 13ac2e4e97a..59e4a1bbbde 100644 --- a/test/lib/jdk/test/lib/cds/CDSTestUtils.java +++ b/test/lib/jdk/test/lib/cds/CDSTestUtils.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -324,7 +324,7 @@ public class CDSTestUtils { public static void checkCommonExecExceptions(OutputAnalyzer output, Exception e) throws Exception { if (output.getStdout().contains("https://bugreport.java.com/bugreport/crash.jsp")) { - throw new RuntimeException("Hotspot crashed"); + throw new RuntimeException(getCrashMessage(output.getStdout())); } if (output.getStdout().contains("TEST FAILED")) { throw new RuntimeException("Test Failed"); @@ -696,11 +696,16 @@ public class CDSTestUtils { System.out.println("[STDOUT]\n" + output.getStdout()); if (output.getExitValue() != 0 && output.getStdout().contains("A fatal error has been detected")) { - throw new RuntimeException("Hotspot crashed"); + throw new RuntimeException(getCrashMessage(output.getStdout())); } return output; } + static String getCrashMessage(String stdOut) { + int start = stdOut.indexOf("# A fatal error has been detected by the Java Runtime Environment:"); + int end = stdOut.indexOf(".log", start) + 4; + return stdOut.substring(start, end); + } private static void writeFile(File file, String content) throws Exception { FileOutputStream fos = new FileOutputStream(file); diff --git a/test/lib/jdk/test/lib/cli/CommandLineOptionTest.java b/test/lib/jdk/test/lib/cli/CommandLineOptionTest.java index a9ae57bca71..48738d4a0da 100644 --- a/test/lib/jdk/test/lib/cli/CommandLineOptionTest.java +++ b/test/lib/jdk/test/lib/cli/CommandLineOptionTest.java @@ -201,10 +201,6 @@ public abstract class CommandLineOptionTest { if (!Platform.isStatic()) { finalOptions.add(CommandLineOptionTest.getVMTypeOption()); } - String extraFlagForEmulated = CommandLineOptionTest.getVMTypeOptionForEmulated(); - if (extraFlagForEmulated != null) { - finalOptions.add(extraFlagForEmulated); - } Collections.addAll(finalOptions, options); CommandLineOptionTest.verifyJVMStartup(expectedMessages, @@ -401,10 +397,6 @@ public abstract class CommandLineOptionTest { if (!Platform.isStatic()) { finalOptions.add(CommandLineOptionTest.getVMTypeOption()); } - String extraFlagForEmulated = CommandLineOptionTest.getVMTypeOptionForEmulated(); - if (extraFlagForEmulated != null) { - finalOptions.add(extraFlagForEmulated); - } Collections.addAll(finalOptions, additionalVMOpts); CommandLineOptionTest.verifyOptionValue(optionName, expectedValue, @@ -512,18 +504,6 @@ public abstract class CommandLineOptionTest { throw new RuntimeException("Unknown VM mode."); } - /** - * @return addtional VMoptions(Emulated related) required to start a new VM with the same type as current. - */ - private static String getVMTypeOptionForEmulated() { - if (Platform.isServer() && !Platform.isEmulatedClient()) { - return "-XX:-NeverActAsServerClassMachine"; - } else if (Platform.isEmulatedClient()) { - return "-XX:+NeverActAsServerClassMachine"; - } - return null; - } - private final BooleanSupplier predicate; /** diff --git a/test/lib/jdk/test/lib/jfr/EventNames.java b/test/lib/jdk/test/lib/jfr/EventNames.java index 8b0113f75f4..06ee62a2f7c 100644 --- a/test/lib/jdk/test/lib/jfr/EventNames.java +++ b/test/lib/jdk/test/lib/jfr/EventNames.java @@ -115,6 +115,7 @@ public class EventNames { public static final String ShenandoahHeapRegionInformation = PREFIX + "ShenandoahHeapRegionInformation"; public static final String ShenandoahHeapRegionStateChange = PREFIX + "ShenandoahHeapRegionStateChange"; public static final String ShenandoahEvacuationInformation = PREFIX + "ShenandoahEvacuationInformation"; + public static final String ShenandoahPromotionInformation = PREFIX + "ShenandoahPromotionInformation"; public static final String TenuringDistribution = PREFIX + "TenuringDistribution"; public static final String GarbageCollection = PREFIX + "GarbageCollection"; public static final String ParallelOldGarbageCollection = PREFIX + "ParallelOldGarbageCollection"; diff --git a/test/lib/jdk/test/lib/jfr/Events.java b/test/lib/jdk/test/lib/jfr/Events.java index 8bbf22ca63a..03e1e39cfe3 100644 --- a/test/lib/jdk/test/lib/jfr/Events.java +++ b/test/lib/jdk/test/lib/jfr/Events.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,6 +33,7 @@ import java.io.IOException; import java.nio.file.Path; import java.time.Duration; import java.time.Instant; +import java.util.Comparator; import java.util.List; import jdk.jfr.AnnotationElement; @@ -280,6 +281,20 @@ public class Events { return RecordingFile.readAllEvents(makeCopy(recording)); } + /** + * Creates a list of events from a recording, ordered by the end time. + * + * @param recording recording, not {@code null} + * @return a list, not null + * @throws IOException if an event set could not be created due to I/O + * errors. + */ + public static List fromRecordingOrdered(Recording recording) throws IOException { + List events = fromRecording(recording); + events.sort(Comparator.comparing(RecordedEvent::getEndTime)); + return events; + } + public static RecordingFile copyTo(Recording r) throws IOException { return new RecordingFile(makeCopy(r)); } diff --git a/test/lib/jdk/test/lib/json/JSONValue.java b/test/lib/jdk/test/lib/json/JSONValue.java index f89d13b3bba..3ac6441b4c0 100644 --- a/test/lib/jdk/test/lib/json/JSONValue.java +++ b/test/lib/jdk/test/lib/json/JSONValue.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,46 +25,55 @@ package jdk.test.lib.json; import java.math.BigInteger; import java.util.ArrayList; import java.util.HashMap; -import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.NoSuchElementException; +import java.util.Objects; +import java.util.Optional; -public interface JSONValue { +public sealed interface JSONValue + permits JSONValue.JSONObject, JSONValue.JSONArray, JSONValue.JSONString, + JSONValue.JSONNumber, JSONValue.JSONBoolean, JSONValue.JSONNull { public final class JSONObject implements JSONValue { - private final Map value; + private final Map members; - public JSONObject(Map value) { - this.value = value; + private JSONObject(Map members) { + this.members = Map.copyOf(members); } @Override - public JSONObject asObject() { - return this; - } - - public JSONValue get(String k) { - return value.get(k); + public JSONValue get(String name) { + JSONValue v = members.get(name); + if (v == null) { + throw new NoSuchElementException("member " + name + " does not exist"); + } + return v; } @Override - public int size() { - return value.size(); + public Optional getOrAbsent(String name) { + return Optional.ofNullable(members.get(name)); + } + + @Override + public Map members() { + return members; } @Override public String toString() { var builder = new StringBuilder(); builder.append("{"); - for (var key : value.keySet()) { + for (String key : members.keySet()) { builder.append("\""); builder.append(key); builder.append("\":"); - builder.append(value.get(key).toString()); + builder.append(members.get(key).toString()); builder.append(","); } - var end = builder.length() - 1; + int end = builder.length() - 1; if (builder.charAt(end) == ',') { builder.deleteCharAt(end); } @@ -72,13 +81,17 @@ public interface JSONValue { builder.append("}"); return builder.toString(); } + + public static JSONObject of(Map members) { + return new JSONObject(members); + } } public final class JSONString implements JSONValue { private final String value; - public JSONString(String value) { - this.value = value; + private JSONString(String value) { + this.value = Objects.requireNonNull(value); } @Override @@ -88,9 +101,6 @@ public interface JSONValue { @Override public String toString() { - if (value == null) { - return "null"; - } var builder = new StringBuilder(); builder.append("\""); @@ -131,26 +141,17 @@ public interface JSONValue { builder.append("\""); return builder.toString(); } + + public static JSONString of(String value) { + return new JSONString(value); + } } - public final class JSONArray implements JSONValue, Iterable { - private final List values; + public final class JSONArray implements JSONValue { + private final List elements; - public JSONArray(List array) { - this.values = array; - } - - @Override - public JSONArray asArray() { - return this; - } - - public JSONValue get(int i) { - return values.get(i); - } - - public int size() { - return values.size(); + JSONArray(List elements) { + this.elements = List.copyOf(elements); } @Override @@ -158,9 +159,10 @@ public interface JSONValue { var builder = new StringBuilder(); builder.append("["); - for (var i = 0; i < size(); i++) { - builder.append(get(i).toString()); - if (i != (size() - 1)) { + int size = elements.size(); + for (int i = 0; i < size; i++) { + builder.append(elements.get(i).toString()); + if (i != (size - 1)) { builder.append(","); } } @@ -169,8 +171,106 @@ public interface JSONValue { } @Override - public Iterator iterator() { - return values.iterator(); + public List elements() { + return elements; + } + + @Override + public JSONValue element(int index) { + return elements.get(index); + } + + public static JSONArray of(List elements) { + return new JSONArray(elements); + } + } + + public final class JSONNumber implements JSONValue { + private final String value; + + private JSONNumber(String value) { + this.value = Objects.requireNonNull(value); + } + + @Override + public int asInt() { + return Integer.parseInt(value); + } + + @Override + public long asLong() { + return Long.parseLong(value); + } + + @Override + public double asDouble() { + return Double.parseDouble(value); + } + + @Override + public String toString() { + return value; + } + + public static JSONNumber of(String value) { + return new JSONNumber(value); + } + + public static JSONNumber of(int value) { + return of(String.valueOf(value)); + } + + public static JSONNumber of(long value) { + return of(String.valueOf(value)); + } + + public static JSONNumber of(double value) { + return of(String.valueOf(value)); + } + } + + public final class JSONBoolean implements JSONValue { + private static JSONBoolean TRUE = new JSONBoolean(true); + private static JSONBoolean FALSE = new JSONBoolean(false); + + private final boolean value; + + private JSONBoolean(boolean value) { + this.value = value; + } + + @Override + public boolean asBoolean() { + return value; + } + + @Override + public String toString() { + return String.valueOf(value); + } + + public static JSONBoolean of(boolean value) { + return value ? TRUE : FALSE; + } + } + + public final class JSONNull implements JSONValue { + private static JSONNull NULL = new JSONNull(); + + private JSONNull() {} + + @Override + public Optional valueOrNull() { + return Optional.empty(); + } + + @Override + public String toString() { + return "null"; + } + + public static JSONNull of() { + return NULL; } } @@ -181,8 +281,8 @@ public interface JSONValue { JSONParser() { } - private IllegalStateException failure(String message) { - return new IllegalStateException(String.format("[%d]: %s : %s", pos, message, input)); + private IllegalArgumentException failure(String message) { + return new IllegalArgumentException(String.format("[%d]: %s : %s", pos, message, input)); } private char current() { @@ -220,13 +320,13 @@ public interface JSONValue { } } - private JSONString parseBoolean() { + private JSONBoolean parseBoolean() { if (current() == 't') { expect('r'); expect('u'); expect('e'); advance(); - return new JSONString("true"); + return JSONBoolean.of(true); } if (current() == 'f') { @@ -235,7 +335,7 @@ public interface JSONValue { expect('s'); expect('e'); advance(); - return new JSONString("false"); + return JSONBoolean.of(false); } throw failure("a boolean can only be 'true' or 'false'"); } @@ -319,11 +419,10 @@ public interface JSONValue { var value = builder.toString(); if (isInteger) { new BigInteger(value); - return new JSONString(value); } else { Double.parseDouble(value); - return new JSONString(value); } + return JSONNumber.of(value); } private JSONString parseString() { @@ -374,7 +473,7 @@ public interface JSONValue { } advance(); // step beyond closing " - return new JSONString(builder.toString()); + return JSONString.of(builder.toString()); } private JSONArray parseArray() { @@ -397,15 +496,15 @@ public interface JSONValue { } advance(); // step beyond closing ']' - return new JSONArray(list); + return JSONArray.of(list); } - public JSONString parseNull() { + public JSONNull parseNull() { expect('u'); expect('l'); expect('l'); advance(); - return new JSONString(null); + return JSONNull.of(); } public JSONObject parseObject() { @@ -438,7 +537,7 @@ public interface JSONValue { } advance(); // step beyond '}' - return new JSONObject(map); + return JSONObject.of(map); } private boolean isDigit(char c) { @@ -526,27 +625,51 @@ public interface JSONValue { } } - public static JSONValue parse(String s) { + static JSONValue parse(String s) { return new JSONParser().parse(s); } - default int size() { - throw new IllegalStateException("Size operation unsupported"); + default JSONValue get(String name) { + throw new UnsupportedOperationException("Unsupported conversion to object"); + } + + default Optional getOrAbsent(String name) { + throw new UnsupportedOperationException("Unsupported conversion to object"); + } + + default Optional valueOrNull() { + return Optional.of(this); + } + + default Map members() { + throw new UnsupportedOperationException("Unsupported conversion to object"); + } + + default List elements() { + throw new UnsupportedOperationException("Unsupported conversion to array"); + } + + default JSONValue element(int index) { + throw new UnsupportedOperationException("Unsupported conversion to array"); } default String asString() { - throw new IllegalStateException("Unsupported conversion to String"); + throw new UnsupportedOperationException("Unsupported conversion to string"); } - default JSONArray asArray() { - throw new IllegalStateException("Unsupported conversion to array"); + default int asInt() { + throw new UnsupportedOperationException("Unsupported conversion to number"); } - default JSONObject asObject() { - throw new IllegalStateException("Unsupported conversion to object"); + default long asLong() { + throw new UnsupportedOperationException("Unsupported conversion to number"); } - default JSONValue get(String field) { - return asObject().get(field); + default double asDouble() { + throw new UnsupportedOperationException("Unsupported conversion to number"); + } + + default boolean asBoolean() { + throw new UnsupportedOperationException("Unsupported conversion to boolean"); } } diff --git a/test/lib/jdk/test/lib/net/IPSupport.java b/test/lib/jdk/test/lib/net/IPSupport.java index 31255e20c6a..4a77c3a9bae 100644 --- a/test/lib/jdk/test/lib/net/IPSupport.java +++ b/test/lib/jdk/test/lib/net/IPSupport.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,6 +34,7 @@ import java.net.InetAddress; import java.net.ProtocolFamily; import java.net.StandardProtocolFamily; import java.nio.channels.SocketChannel; +import java.util.Optional; import jtreg.SkippedException; @@ -124,13 +125,33 @@ public class IPSupport { * is non-operational */ public static void throwSkippedExceptionIfNonOperational() throws SkippedException { + Optional configurationIssue = diagnoseConfigurationIssue(); + configurationIssue.map(SkippedException::new).ifPresent(x -> { + throw x; + }); + } + + /** + * Checks that the platform supports the ability to create a + * minimally-operational socket whose protocol is either one of IPv4 + * or IPv6. + * + *

      A minimally-operation socket is one that can be created and + * bound to an IP-specific loopback address. IP support is + * considered non-operational if a socket cannot be bound to either + * one of, an IPv4 loopback address, or the IPv6 loopback address. + * + * @return Optinal with config issue or empty Optinal if no issue found + */ + public static Optional diagnoseConfigurationIssue(){ if (!currentConfigurationIsValid()) { ByteArrayOutputStream os = new ByteArrayOutputStream(); PrintStream ps = new PrintStream(os); ps.println("Invalid networking configuration"); printPlatformSupport(ps); - throw new SkippedException(os.toString()); + return Optional.of(os.toString()); } + return Optional.empty(); } /** diff --git a/test/lib/jdk/test/lib/process/OutputAnalyzer.java b/test/lib/jdk/test/lib/process/OutputAnalyzer.java index d8b3f470260..553e13b28ff 100644 --- a/test/lib/jdk/test/lib/process/OutputAnalyzer.java +++ b/test/lib/jdk/test/lib/process/OutputAnalyzer.java @@ -56,8 +56,23 @@ public final class OutputAnalyzer { * @throws IOException If an I/O error occurs. */ public OutputAnalyzer(Process process, Charset cs) throws IOException { - buffer = OutputBuffer.of(process, cs); + this(process, cs, true); } + + /** + * Create an OutputAnalyzer, a utility class for verifying output and exit + * value from a Process, with a configurable verbosity level. + * + * @param process Process to analyze + * @param cs The charset used to convert stdout/stderr from bytes to chars + * or null for the default charset. + * @param verbose Set to false to limit logging to stdout. + * @throws IOException If an I/O error occurs. + */ + public OutputAnalyzer(Process process, Charset cs, boolean verbose) throws IOException { + buffer = OutputBuffer.of(process, cs, verbose); + } + /** * Create an OutputAnalyzer, a utility class for verifying output and exit * value from a Process @@ -66,7 +81,19 @@ public final class OutputAnalyzer { * @throws IOException If an I/O error occurs. */ public OutputAnalyzer(Process process) throws IOException { - buffer = OutputBuffer.of(process); + this(process, true); + } + + /** + * Create an OutputAnalyzer, a utility class for verifying output and exit + * value from a Process, with a configurable verbosity level. + * + * @param process Process to analyze + * @param verbose Set to false to limit logging to stdout. + * @throws IOException If an I/O error occurs. + */ + public OutputAnalyzer(Process process, boolean verbose) throws IOException { + buffer = OutputBuffer.of(process, verbose); } /** diff --git a/test/lib/jdk/test/lib/process/OutputBuffer.java b/test/lib/jdk/test/lib/process/OutputBuffer.java index 0390535bf89..57e00aa73c7 100644 --- a/test/lib/jdk/test/lib/process/OutputBuffer.java +++ b/test/lib/jdk/test/lib/process/OutputBuffer.java @@ -89,12 +89,20 @@ public interface OutputBuffer { */ public long pid(); + public static OutputBuffer of(Process p, boolean quiet) { + return of(p, null, quiet); + } + public static OutputBuffer of(Process p, Charset cs) { - return new LazyOutputBuffer(p, cs); + return of(p, cs, false); } public static OutputBuffer of(Process p) { - return new LazyOutputBuffer(p, null); + return of(p, null, false); + } + + public static OutputBuffer of(Process p, Charset cs, boolean quiet) { + return new LazyOutputBuffer(p, cs, quiet); } public static OutputBuffer of(String stdout, String stderr, int exitValue) { @@ -130,19 +138,23 @@ public interface OutputBuffer { } } + private final boolean verbose; private final StreamTask outTask; private final StreamTask errTask; private final Process p; private volatile Integer exitValue; // null implies we don't yet know private final void logProgress(String state) { + if (verbose) { System.out.println("[" + Instant.now().toString() + "] " + state - + " for process " + p.pid()); + + " for process " + p.pid()); System.out.flush(); + } } - private LazyOutputBuffer(Process p, Charset cs) { + private LazyOutputBuffer(Process p, Charset cs, boolean verbose) { this.p = p; + this.verbose = verbose; logProgress("Gathering output"); outTask = new StreamTask(p.getInputStream(), cs); errTask = new StreamTask(p.getErrorStream(), cs); diff --git a/test/lib/jdk/test/lib/process/ProcessTools.java b/test/lib/jdk/test/lib/process/ProcessTools.java index fe9c1de9f30..e7dd20c6286 100644 --- a/test/lib/jdk/test/lib/process/ProcessTools.java +++ b/test/lib/jdk/test/lib/process/ProcessTools.java @@ -575,7 +575,7 @@ public final class ProcessTools { * "test.vm.opts" and "test.java.opts" and this method will * not do that. * - *

      If you still chose to use + *

      If you still choose to use * createLimitedTestJavaProcessBuilder() you should probably use * it in combination with @requires vm.flagless JTREG * anotation as to not waste energy and test resources. @@ -609,7 +609,7 @@ public final class ProcessTools { * "test.vm.opts" and "test.java.opts" and this method will * not do that. * - *

      If you still chose to use + *

      If you still choose to use * createLimitedTestJavaProcessBuilder() you should probably use * it in combination with @requires vm.flagless JTREG * anotation as to not waste energy and test resources. diff --git a/test/lib/jdk/test/lib/security/SecurityUtils.java b/test/lib/jdk/test/lib/security/SecurityUtils.java index be6ff1cc0e3..78dc2aa2729 100644 --- a/test/lib/jdk/test/lib/security/SecurityUtils.java +++ b/test/lib/jdk/test/lib/security/SecurityUtils.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,8 +23,10 @@ package jdk.test.lib.security; +import java.io.ByteArrayOutputStream; import java.io.File; import java.io.IOException; +import java.io.PrintStream; import java.nio.ByteBuffer; import java.security.KeyStore; import java.security.Security; @@ -219,5 +221,40 @@ public final class SecurityUtils { return ((m.get() & 0xFF) << 8) | (m.get() & 0xFF); } + // Helper method to run and get log. + public static String runAndGetLog(Runnable runnable) { + System.setProperty("javax.net.debug", "ssl"); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + PrintStream err = new PrintStream(baos); + PrintStream origErr = System.err; + System.setErr(err); + + runnable.run(); + err.close(); + + // Save the log output and then print it as usual. + String log = baos.toString(); + System.setErr(origErr); + System.err.print(log); + return log; + } + + // Helper method to find log messages. + public static int countSubstringOccurrences(String str, String sub) { + if (str == null || sub == null || sub.isEmpty()) { + return 0; + } + + int count = 0; + int lastIndex = 0; + + while ((lastIndex = str.indexOf(sub, lastIndex)) != -1) { + count++; + lastIndex += sub.length(); + } + + return count; + } + private SecurityUtils() {} } diff --git a/test/lib/jdk/test/lib/thread/ThreadWrapper.java b/test/lib/jdk/test/lib/thread/ThreadWrapper.java new file mode 100644 index 00000000000..ab850bf5a4a --- /dev/null +++ b/test/lib/jdk/test/lib/thread/ThreadWrapper.java @@ -0,0 +1,246 @@ +/* + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + + +package jdk.test.lib.thread; + +import java.time.Duration; +import java.util.Map; + +/* + The ThreadWrapper is a helper class that allows to extend + coverage of virtual threads testing for existing tests with threads. + + Specifically, it is useful for the pattern where Thread is extended + by some class. Example: + + class resumethrd02Thread extends Thread {...} + ... + resumethrd02Thread thr = new resumethrd02Thread(); + + The test can be updated to use this wrapper: + class resumethrd02Thread extends ThreadWrapper {...} + ... + resumethrd02Thread thr = new resumethrd02Thread(); + + So resumethrd02Thread can be run with platform or virtual threads. + + Method getThread() is used to get instance of Thread. + + It is not expected to use this wrapper for new tests or classes that + are not extending Thread. The TestThreadFactory should be used to + create threads in such cases. + */ + +public class ThreadWrapper implements Runnable { + private final Thread thread; + + @SuppressWarnings("this-escape") + public ThreadWrapper() { + // thread is a platform or virtual thread + thread = TestThreadFactory.newThread(this); + } + + @SuppressWarnings("this-escape") + public ThreadWrapper(String name) { + // thread is a platform or virtual thread + thread = TestThreadFactory.newThread(this, name); + } + + public Thread getThread() { + return thread; + } + + public static Thread currentThread() { + return Thread.currentThread(); + } + + public static void yield() { + Thread.yield(); + } + + public static void sleep(long millis) throws InterruptedException { + Thread.sleep(millis); + } + + public static void sleep(long millis, int nanos) throws InterruptedException { + Thread.sleep(millis, nanos); + } + + public static void sleep(Duration duration) throws InterruptedException { + Thread.sleep(duration); + } + + public static void onSpinWait() { + Thread.onSpinWait(); + } + + public static Thread.Builder.OfPlatform ofPlatform() { + return Thread.ofPlatform(); + } + + public static Thread.Builder.OfVirtual ofVirtual() { + return Thread.ofVirtual(); + } + + public static Thread startVirtualThread(Runnable task) { + return Thread.startVirtualThread(task); + } + + public boolean isVirtual() { + return thread.isVirtual(); + } + + public void start() { + thread.start(); + } + + public void run() { + } + + public void interrupt() { + thread.interrupt(); + } + + public static boolean interrupted() { + return Thread.interrupted(); + } + + public boolean isInterrupted() { + return thread.isInterrupted(); + } + + public boolean isAlive() { + return thread.isAlive(); + } + + public void setPriority(int newPriority) { + thread.setPriority(newPriority); + } + + public int getPriority() { + return thread.getPriority(); + } + + public void setName(String name) { + thread.setName(name); + } + + public String getName() { + return thread.getName(); + } + + public ThreadGroup getThreadGroup() { + return thread.getThreadGroup(); + } + + public static int activeCount() { + return Thread.activeCount(); + } + + public static int enumerate(Thread[] tarray) { + return Thread.enumerate(tarray); + } + + public void join(long millis) throws InterruptedException { + thread.join(millis); + } + + public void join(long millis, int nanos) throws InterruptedException { + thread.join(millis, nanos); + } + + public void join() throws InterruptedException { + thread.join(); + } + + public boolean join(Duration duration) throws InterruptedException { + return thread.join(duration); + } + + public static void dumpStack() { + Thread.dumpStack(); + } + + public void setDaemon(boolean on) { + thread.setDaemon(on); + } + + public boolean isDaemon() { + return thread.isDaemon(); + } + + @Override + public String toString() { + return thread.toString(); + } + + public ClassLoader getContextClassLoader() { + return thread.getContextClassLoader(); + } + + public void setContextClassLoader(ClassLoader cl) { + thread.setContextClassLoader(cl); + } + + public static boolean holdsLock(Object obj) { + return Thread.holdsLock(obj); + } + + public StackTraceElement[] getStackTrace() { + return thread.getStackTrace(); + } + + public static Map getAllStackTraces() { + return Thread.getAllStackTraces(); + } + + @Deprecated(since = "19") + public long getId() { + return thread.getId(); + } + + public long threadId() { + return thread.threadId(); + } + + public Thread.State getState() { + return thread.getState(); + } + + public static void setDefaultUncaughtExceptionHandler(Thread.UncaughtExceptionHandler ueh) { + Thread.setDefaultUncaughtExceptionHandler(ueh); + } + + public static Thread.UncaughtExceptionHandler getDefaultUncaughtExceptionHandler() { + return Thread.getDefaultUncaughtExceptionHandler(); + } + + public Thread.UncaughtExceptionHandler getUncaughtExceptionHandler() { + return thread.getUncaughtExceptionHandler(); + } + + public void setUncaughtExceptionHandler(Thread.UncaughtExceptionHandler ueh) { + thread.setUncaughtExceptionHandler(ueh); + } +} diff --git a/test/lib/jdk/test/lib/threaddump/ThreadDump.java b/test/lib/jdk/test/lib/threaddump/ThreadDump.java index ca728e625fc..77dd3dd8c03 100644 --- a/test/lib/jdk/test/lib/threaddump/ThreadDump.java +++ b/test/lib/jdk/test/lib/threaddump/ThreadDump.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,13 +32,17 @@ import java.util.Map; import java.util.Optional; import java.util.OptionalLong; import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; import java.util.stream.Stream; import jdk.test.lib.json.JSONValue; /** * Represents a thread dump that is obtained by parsing JSON text. A thread dump in JSON * format is generated with the {@code com.sun.management.HotSpotDiagnosticMXBean} API or - * using {@code jcmd Thread.dump_to_file -format=json }. + * using {@code jcmd Thread.dump_to_file -format=json }. The thread dump + * format is documented in {@code + * src/jdk.management/share/classes/com/sun/management/doc-files/threadDump.schema.json}. * *

      The following is an example thread dump that is parsed by this class. Many of the * objects are collapsed to reduce the size. @@ -46,9 +50,10 @@ import jdk.test.lib.json.JSONValue; *

      {@code
        * {
        *   "threadDump": {
      - *     "processId": "63406",
      - *     "time": "2022-05-20T07:37:16.308017Z",
      - *     "runtimeVersion": "19",
      + *     "formatVersion": 2,
      + *     "processId": 63406,
      + *     "time": "2026-03-25T09:20:08.591503Z",
      + *     "runtimeVersion": "27",
        *     "threadContainers": [
        *       {
        *         "container": "",
      @@ -56,12 +61,12 @@ import jdk.test.lib.json.JSONValue;
        *         "owner": null,
        *         "threads": [
        *          {
      - *            "tid": "1",
      + *            "tid": 1,
        *            "name": "main",
        *            "stack": [...]
        *          },
        *          {
      - *            "tid": "8",
      + *            "tid": 8,
        *            "name": "Reference Handler",
        *            "state": "RUNNABLE",
        *            "stack": [
      @@ -76,21 +81,21 @@ import jdk.test.lib.json.JSONValue;
        *          {"name": "Monitor Ctrl-Break"...},
        *          {"name": "Notification Thread"...}
        *         ],
      - *         "threadCount": "7"
      + *         "threadCount": 7
        *       },
        *       {
        *         "container": "ForkJoinPool.commonPool\/jdk.internal.vm.SharedThreadContainer@56aac163",
        *         "parent": "",
        *         "owner": null,
        *         "threads": [...],
      - *         "threadCount": "1"
      + *         "threadCount": 1
        *       },
        *       {
        *         "container": "java.util.concurrent.ThreadPoolExecutor@20322d26\/jdk.internal.vm.SharedThreadContainer@184f6be2",
        *         "parent": "",
        *         "owner": null,
        *         "threads": [...],
      - *         "threadCount": "1"
      + *         "threadCount": 1
        *       }
        *     ]
        *   }
      @@ -149,14 +154,6 @@ public final class ThreadDump {
                   children.add(container);
               }
       
      -        /**
      -         * Returns the value of a property of this thread container, as a string.
      -         */
      -        private String getStringProperty(String propertyName) {
      -            JSONValue value = containerObj.get(propertyName);
      -            return (value != null) ? value.asString() : null;
      -        }
      -
               /**
                * Returns the thread container name.
                */
      @@ -168,10 +165,10 @@ public final class ThreadDump {
                * Return the thread identifier of the owner or empty OptionalLong if not owned.
                */
               public OptionalLong owner() {
      -            String owner = getStringProperty("owner");
      -            return (owner != null)
      -                    ? OptionalLong.of(Long.parseLong(owner))
      -                    : OptionalLong.empty();
      +            return containerObj.get("owner")  // number or null
      +                    .valueOrNull()
      +                    .map(v -> OptionalLong.of(v.asLong()))
      +                    .orElse(OptionalLong.empty());
               }
       
               /**
      @@ -192,12 +189,10 @@ public final class ThreadDump {
                * Returns a stream of {@code ThreadInfo} objects for the threads in this container.
                */
               public Stream threads() {
      -            JSONValue.JSONArray threadsObj = containerObj.get("threads").asArray();
      -            Set threadInfos = new HashSet<>();
      -            for (JSONValue threadObj : threadsObj) {
      -                threadInfos.add(new ThreadInfo(threadObj));
      -            }
      -            return threadInfos.stream();
      +            return containerObj.get("threads")
      +                    .elements()
      +                    .stream()
      +                    .map(ThreadInfo::new);
               }
       
               /**
      @@ -237,29 +232,10 @@ public final class ThreadDump {
               private final JSONValue threadObj;
       
               ThreadInfo(JSONValue threadObj) {
      -            this.tid = Long.parseLong(threadObj.get("tid").asString());
      +            this.tid = threadObj.get("tid").asLong();
                   this.threadObj = threadObj;
               }
       
      -        /**
      -         * Returns the value of a property of this thread object, as a string.
      -         */
      -        private String getStringProperty(String propertyName) {
      -            JSONValue value = threadObj.get(propertyName);
      -            return (value != null) ? value.asString() : null;
      -        }
      -
      -        /**
      -         * Returns the value of a property of an object in this thread object, as a string.
      -         */
      -        private String getStringProperty(String objectName, String propertyName) {
      -            if (threadObj.get(objectName) instanceof JSONValue.JSONObject obj
      -                    && obj.get(propertyName) instanceof JSONValue value) {
      -                return value.asString();
      -            }
      -            return null;
      -        }
      -
               /**
                * Returns the thread identifier.
                */
      @@ -271,83 +247,92 @@ public final class ThreadDump {
                * Returns the thread name.
                */
               public String name() {
      -            return getStringProperty("name");
      +            return threadObj.get("name").asString();
               }
       
               /**
                * Returns the thread state.
                */
               public String state() {
      -            return getStringProperty("state");
      +            return threadObj.get("state").asString();
               }
       
               /**
                * Returns true if virtual thread.
                */
               public boolean isVirtual() {
      -            String s = getStringProperty("virtual");
      -            return (s != null) ? Boolean.parseBoolean(s) : false;
      +            return threadObj.getOrAbsent("virtual")
      +                    .map(JSONValue::asBoolean)
      +                    .orElse(false);
               }
       
               /**
      -         * Returns the thread's parkBlocker.
      +         * Returns the thread's parkBlocker or null.
                */
               public String parkBlocker() {
      -            return getStringProperty("parkBlocker", "object");
      +            return threadObj.getOrAbsent("parkBlocker")
      +                    .map(v -> v.get("object").asString())
      +                    .orElse(null);
               }
       
               /**
                * Returns the owner of the parkBlocker if the parkBlocker is an AbstractOwnableSynchronizer.
                */
               public OptionalLong parkBlockerOwner() {
      -            String owner = getStringProperty("parkBlocker", "owner");
      -            return (owner != null)
      -                    ? OptionalLong.of(Long.parseLong(owner))
      -                    : OptionalLong.empty();
      +            return threadObj.getOrAbsent("parkBlocker")
      +                    .map(v -> OptionalLong.of(v.get("owner").asLong()))
      +                    .orElse(OptionalLong.empty());
               }
       
               /**
      -         * Returns the object that the thread is blocked entering its monitor.
      +         * Returns the object that the thread is blocked entering its monitor or null.
                */
               public String blockedOn() {
      -            return getStringProperty("blockedOn");
      +            return threadObj.getOrAbsent("blockedOn")
      +                    .map(JSONValue::asString)
      +                    .orElse(null);
               }
       
               /**
      -         * Return the object that is the therad is waiting on with Object.wait.
      +         * Return the object that is the thread is waiting on with Object.wait or null.
                */
               public String waitingOn() {
      -            return getStringProperty("waitingOn");
      +            return threadObj.getOrAbsent("waitingOn")
      +                    .map(JSONValue::asString)
      +                    .orElse(null);
               }
       
               /**
                * Returns the thread stack.
                */
               public Stream stack() {
      -            JSONValue.JSONArray stackObj = threadObj.get("stack").asArray();
      -            List stack = new ArrayList<>();
      -            for (JSONValue steObject : stackObj) {
      -                stack.add(steObject.asString());
      -            }
      -            return stack.stream();
      +            return threadObj.get("stack")
      +                    .elements()
      +                    .stream()
      +                    .map(JSONValue::asString);
               }
       
               /**
                * Return a map of monitors owned.
                */
               public Map> ownedMonitors() {
      -            Map> ownedMonitors = new HashMap<>();
      -            JSONValue monitorsOwnedObj = threadObj.get("monitorsOwned");
      -            if (monitorsOwnedObj != null) {
      -                for (JSONValue obj : monitorsOwnedObj.asArray()) {
      -                    int depth = Integer.parseInt(obj.get("depth").asString());
      -                    for (JSONValue lock : obj.get("locks").asArray()) {
      -                        ownedMonitors.computeIfAbsent(depth, _ -> new ArrayList<>())
      -                                .add(lock.asString());
      -                    }
      -                }
      -            }
      -            return ownedMonitors;
      +            Map> result = new HashMap<>();
      +            threadObj.getOrAbsent("monitorsOwned")
      +                    .map(JSONValue::elements)
      +                    .orElse(List.of())
      +                    .forEach(e -> {
      +                        int depth = e.get("depth").asInt();
      +                        List locks = e.get("locks")
      +                                .elements()
      +                                .stream()
      +                                .map(v -> v.valueOrNull()  // string or null
      +                                        .map(JSONValue::asString)
      +                                        .orElse(null))
      +                                .toList();
      +                        result.computeIfAbsent(depth, _ -> new ArrayList<>()).addAll(locks);
      +                    });
      +
      +            return result;
               }
       
               /**
      @@ -355,10 +340,9 @@ public final class ThreadDump {
                * its carrier.
                */
               public OptionalLong carrier() {
      -            String s = getStringProperty("carrier");
      -            return (s != null)
      -                    ? OptionalLong.of(Long.parseLong(s))
      -                    : OptionalLong.empty();
      +            return threadObj.getOrAbsent("carrier")
      +                    .map(v -> OptionalLong.of(v.asLong()))
      +                    .orElse(OptionalLong.empty());
               }
       
               @Override
      @@ -388,33 +372,24 @@ public final class ThreadDump {
               }
           }
       
      -    /**
      -     * Returns the value of a property of this thread dump, as a string.
      -     */
      -    private String getStringProperty(String propertyName) {
      -        JSONValue value = threadDumpObj.get(propertyName);
      -        return (value != null) ? value.asString() : null;
      -    }
      -
           /**
            * Returns the value of threadDump/processId.
            */
           public long processId() {
      -        return Long.parseLong(getStringProperty("processId"));
      -    }
      +        return threadDumpObj.get("processId").asLong(); }
       
           /**
            * Returns the value of threadDump/time.
            */
           public String time() {
      -        return getStringProperty("time");
      +        return threadDumpObj.get("time").asString();
           }
       
           /**
            * Returns the value of threadDump/runtimeVersion.
            */
           public String runtimeVersion() {
      -        return getStringProperty("runtimeVersion");
      +        return threadDumpObj.get("runtimeVersion").asString();
           }
       
           /**
      @@ -447,35 +422,53 @@ public final class ThreadDump {
            */
           public static ThreadDump parse(String json) {
               JSONValue threadDumpObj = JSONValue.parse(json).get("threadDump");
      +        int formatVersion = threadDumpObj.get("formatVersion").asInt();
      +        if (formatVersion != 2) {
      +            fail("Format " + formatVersion + " not supported");
      +        }
       
               // threadContainers array, preserve insertion order (parents are added before children)
      -        Map containerObjs = new LinkedHashMap<>();
      -        JSONValue threadContainersObj = threadDumpObj.get("threadContainers");
      -        for (JSONValue containerObj : threadContainersObj.asArray()) {
      -            String name = containerObj.get("container").asString();
      -            containerObjs.put(name, containerObj);
      -        }
      +        Map containerObjs = threadDumpObj.get("threadContainers")
      +                .elements()
      +                .stream()
      +                .collect(Collectors.toMap(
      +                        c -> c.get("container").asString(),
      +                        Function.identity(),
      +                        (a, b) -> { fail("Duplicate container"); return null; },
      +                        LinkedHashMap::new
      +                ));
       
               // find root and create tree of thread containers
               ThreadContainer root = null;
               Map map = new HashMap<>();
               for (String name : containerObjs.keySet()) {
                   JSONValue containerObj = containerObjs.get(name);
      -            String parentName = containerObj.get("parent").asString();
      -            if (parentName == null) {
      +            JSONValue parentObj = containerObj.get("parent");
      +            if (parentObj instanceof JSONValue.JSONNull) {
      +                if (root != null) {
      +                    fail("More than one root container");
      +                }
                       root = new ThreadContainer(name, null, containerObj);
                       map.put(name, root);
                   } else {
      -                var parent = map.get(parentName);
      +                String parentName = parentObj.asString();
      +                ThreadContainer parent = map.get(parentName);
                       if (parent == null) {
      -                    throw new RuntimeException("Thread container " + name + " found before " + parentName);
      +                    fail("Thread container " + name + " found before " + parentName);
                       }
                       var container = new ThreadContainer(name, parent, containerObj);
                       parent.addChild(container);
                       map.put(name, container);
                   }
               }
      +        if (root == null) {
      +            fail("No root container");
      +        }
       
               return new ThreadDump(root, map, threadDumpObj);
           }
      -}
      \ No newline at end of file
      +
      +    private static void fail(String message) {
      +        throw new RuntimeException(message);
      +    }
      +}
      diff --git a/test/lib/jdk/test/whitebox/code/BlobType.java b/test/lib/jdk/test/whitebox/code/BlobType.java
      index a2290acc7b6..e2d57f79484 100644
      --- a/test/lib/jdk/test/whitebox/code/BlobType.java
      +++ b/test/lib/jdk/test/whitebox/code/BlobType.java
      @@ -46,8 +46,16 @@ public enum BlobType {
                           || type == BlobType.MethodNonProfiled;
               }
           },
      +    MethodHot(2, "CodeHeap 'hot nmethods'", "HotCodeHeapSize") {
      +        @Override
      +        public boolean allowTypeWhenOverflow(BlobType type) {
      +            return super.allowTypeWhenOverflow(type)
      +                    || type == BlobType.MethodNonProfiled
      +                    || type == BlobType.MethodProfiled;
      +        }
      +    },
           // Non-nmethods like Buffers, Adapters and Runtime Stubs
      -    NonNMethod(2, "CodeHeap 'non-nmethods'", "NonNMethodCodeHeapSize") {
      +    NonNMethod(3, "CodeHeap 'non-nmethods'", "NonNMethodCodeHeapSize") {
               @Override
               public boolean allowTypeWhenOverflow(BlobType type) {
                   return super.allowTypeWhenOverflow(type)
      @@ -56,7 +64,7 @@ public enum BlobType {
               }
           },
           // All types (No code cache segmentation)
      -    All(3, "CodeCache", "ReservedCodeCacheSize");
      +    All(4, "CodeCache", "ReservedCodeCacheSize");
       
           public final int id;
           public final String sizeOptionName;
      @@ -99,6 +107,10 @@ public enum BlobType {
                   // there is no MethodProfiled in non tiered world or pure C1
                   result.remove(MethodProfiled);
               }
      +
      +        if (Long.valueOf(0).equals(whiteBox.getVMFlag("HotCodeHeapSize"))) {
      +            result.remove(MethodHot);
      +        }
               return result;
           }
       
      diff --git a/test/lib/native/testlib_thread_barriers.h b/test/lib/native/testlib_thread_barriers.h
      new file mode 100644
      index 00000000000..8e0b717b57a
      --- /dev/null
      +++ b/test/lib/native/testlib_thread_barriers.h
      @@ -0,0 +1,83 @@
      +/*
      + * Copyright (c) 2022, 2026, Oracle and/or its affiliates. All rights reserved.
      + * Copyright (c) 2026, IBM Corp.
      + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      + *
      + * This code is free software; you can redistribute it and/or modify it
      + * under the terms of the GNU General Public License version 2 only, as
      + * published by the Free Software Foundation.
      + *
      + * This code is distributed in the hope that it will be useful, but WITHOUT
      + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
      + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
      + * version 2 for more details (a copy is included in the LICENSE file that
      + * accompanied this code).
      + *
      + * You should have received a copy of the GNU General Public License version
      + * 2 along with this work; if not, write to the Free Software Foundation,
      + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
      + *
      + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
      + * or visit www.oracle.com if you need additional information or have any
      + * questions.
      + */
      +
      +#ifndef TESTLIB_THREAD_BARRIERS_H
      +#define TESTLIB_THREAD_BARRIERS_H
      +
      +/* MacOS does not have pthread barriers; implement a fallback using condvars. */
      +
      +#ifndef _WIN32
      +#if !defined _POSIX_BARRIERS || _POSIX_BARRIERS < 0
      +
      +#include 
      +
      +#define PTHREAD_BARRIER_SERIAL_THREAD       1
      +
      +#define pthread_barrier_t                       barr_t
      +#define pthread_barrier_init(barr, attr, need)  barr_init(barr, attr, need)
      +#define pthread_barrier_destroy(barr)           barr_destroy(barr)
      +#define pthread_barrier_wait(barr)              barr_wait(barr)
      +
      +typedef struct {
      +    pthread_mutex_t mutex;
      +    pthread_cond_t cond;
      +    int have, need, trigger_count;
      +} barr_t;
      +
      +int barr_init(barr_t* b, void* ignored, int need) {
      +    b->have = b->trigger_count = 0;
      +    b->need = need;
      +    pthread_mutex_init(&b->mutex, NULL);
      +    pthread_cond_init(&b->cond, NULL);
      +    return 0;
      +}
      +
      +int barr_destroy(barr_t* b) {
      +    pthread_mutex_destroy(&b->mutex);
      +    pthread_cond_destroy(&b->cond);
      +    return 0;
      +}
      +
      +int barr_wait(barr_t* b) {
      +    pthread_mutex_lock(&b->mutex);
      +    int my_trigger_count = b->trigger_count;
      +    b->have++;
      +    if (b->have == b->need) {
      +        b->have = 0;
      +        b->trigger_count++;
      +        pthread_cond_broadcast(&b->cond);
      +        pthread_mutex_unlock(&b->mutex);
      +        return PTHREAD_BARRIER_SERIAL_THREAD;
      +    }
      +    while (my_trigger_count == b->trigger_count) { // no spurious wakeups
      +        pthread_cond_wait(&b->cond, &b->mutex);
      +    }
      +    pthread_mutex_unlock(&b->mutex);
      +    return 0;
      +}
      +
      +#endif // !_POSIX_BARRIERS
      +#endif // !_WIN32
      +
      +#endif // TESTLIB_THREAD_BARRIERS_H
      diff --git a/test/micro/org/openjdk/bench/java/lang/foreign/StringLoopJmhBenchmark.java b/test/micro/org/openjdk/bench/java/lang/foreign/StringLoopJmhBenchmark.java
      new file mode 100644
      index 00000000000..1733b73886e
      --- /dev/null
      +++ b/test/micro/org/openjdk/bench/java/lang/foreign/StringLoopJmhBenchmark.java
      @@ -0,0 +1,122 @@
      +/*
      + * Copyright (c) 2026, Google LLC. All rights reserved.
      + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      + *
      + * This code is free software; you can redistribute it and/or modify it
      + * under the terms of the GNU General Public License version 2 only, as
      + * published by the Free Software Foundation.
      + *
      + * This code is distributed in the hope that it will be useful, but WITHOUT
      + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
      + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
      + * version 2 for more details (a copy is included in the LICENSE file that
      + * accompanied this code).
      + *
      + * You should have received a copy of the GNU General Public License version
      + * 2 along with this work; if not, write to the Free Software Foundation,
      + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
      + *
      + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
      + * or visit www.oracle.com if you need additional information or have any
      + * questions.
      + */
      +package org.openjdk.bench.java.lang.foreign;
      +
      +import java.nio.charset.StandardCharsets;
      +import java.util.concurrent.TimeUnit;
      +import org.openjdk.jmh.annotations.Benchmark;
      +import org.openjdk.jmh.annotations.BenchmarkMode;
      +import org.openjdk.jmh.annotations.Fork;
      +import org.openjdk.jmh.annotations.Measurement;
      +import org.openjdk.jmh.annotations.Mode;
      +import org.openjdk.jmh.annotations.OutputTimeUnit;
      +import org.openjdk.jmh.annotations.Param;
      +import org.openjdk.jmh.annotations.Scope;
      +import org.openjdk.jmh.annotations.Setup;
      +import org.openjdk.jmh.annotations.Warmup;
      +import org.openjdk.jmh.annotations.State;
      +
      +@Warmup(time = 1, timeUnit = TimeUnit.SECONDS)
      +@Measurement(time = 1, timeUnit = TimeUnit.SECONDS)
      +@Fork(1)
      +@State(Scope.Benchmark)
      +public class StringLoopJmhBenchmark {
      +  @Param({"10", "100", "1000", "100000"})
      +  int stringLength;
      +
      +  @Param({"ASCII", "LATIN1", "UTF16"})
      +  String encoding;
      +
      +  String stringData;
      +
      +  @Setup
      +  public void setUp() {
      +    stringData = "";
      +
      +    // Character at the _end_ to affect if we hit
      +    // - ASCII = compact strings and compatible with UTF-8
      +    // - LATIN1 = compact strings but not compatible with UTF-8
      +    // - UTF16 = 2-byte char storage and not compatible with UTF-8
      +    String c;
      +    if (encoding.equals("ASCII")) {
      +      c = "a";
      +    } else if (encoding.equals("LATIN1")) {
      +      c = "\u00C4";
      +    } else if (encoding.equals("UTF16")) {
      +      c = "\u2603";
      +    } else {
      +      throw new IllegalArgumentException("Unknown encoding: " + encoding);
      +    }
      +
      +    var stringDataBuilder = new StringBuilder(stringLength + 1);
      +    while (stringDataBuilder.length() < stringLength) {
      +      stringDataBuilder.append((char) (Math.random() * 26) + 'a');
      +    }
      +    stringData = stringDataBuilder.append(c).toString();
      +  }
      +
      +  @Benchmark
      +  public int utf8LenByLoop() {
      +    final String s = stringData;
      +    final int len = s.length();
      +
      +    // ASCII prefix strings.
      +    int idx = 0;
      +    for (char c; idx < len && (c = s.charAt(idx)) < 0x80; ++idx) {}
      +
      +    // Entire string was ASCII.
      +    if (idx == len) {
      +      return len;
      +    }
      +
      +    int utf8Len = len;
      +    for (char c; idx < len; ++idx) {
      +      c = s.charAt(idx);
      +      if (c < 0x80) {
      +        utf8Len++;
      +      } else if (c < 0x800) {
      +        utf8Len += 2;
      +      } else {
      +        utf8Len += 3;
      +        if (Character.isSurrogate(c)) {
      +          int cp = Character.codePointAt(s, idx);
      +          if (cp < Character.MIN_SUPPLEMENTARY_CODE_POINT) {
      +            throw new RuntimeException("Unpaired surrogate");
      +          }
      +          idx++;
      +        }
      +      }
      +    }
      +    return utf8Len;
      +  }
      +
      +  @Benchmark
      +  public int getBytes() throws Exception {
      +    return stringData.getBytes(StandardCharsets.UTF_8).length;
      +  }
      +
      +  @Benchmark
      +  public int encodedLength() throws Exception {
      +    return stringData.encodedLength(StandardCharsets.UTF_8);
      +  }
      +}
      diff --git a/test/micro/org/openjdk/bench/java/net/URLToString.java b/test/micro/org/openjdk/bench/java/net/URLToString.java
      new file mode 100644
      index 00000000000..38f6500c573
      --- /dev/null
      +++ b/test/micro/org/openjdk/bench/java/net/URLToString.java
      @@ -0,0 +1,77 @@
      +/*
      + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
      + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      + *
      + * This code is free software; you can redistribute it and/or modify it
      + * under the terms of the GNU General Public License version 2 only, as
      + * published by the Free Software Foundation.
      + *
      + * This code is distributed in the hope that it will be useful, but WITHOUT
      + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
      + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
      + * version 2 for more details (a copy is included in the LICENSE file that
      + * accompanied this code).
      + *
      + * You should have received a copy of the GNU General Public License version
      + * 2 along with this work; if not, write to the Free Software Foundation,
      + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
      + *
      + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
      + * or visit www.oracle.com if you need additional information or have any
      + * questions.
      + */
      +package org.openjdk.bench.java.net;
      +
      +import org.openjdk.jmh.annotations.*;
      +
      +import java.net.MalformedURLException;
      +import java.net.URI;
      +import java.net.URL;
      +import java.util.concurrent.TimeUnit;
      +
      +/**
      + * Tests java.net.URL.toString performance
      + */
      +@BenchmarkMode(Mode.AverageTime)
      +@OutputTimeUnit(TimeUnit.NANOSECONDS)
      +@State(Scope.Thread)
      +@Warmup(iterations = 5, time = 1)
      +@Measurement(iterations = 5, time = 1)
      +@Fork(value = 3)
      +public class URLToString {
      +
      +    @Param({"false", "true"})
      +    boolean auth;
      +
      +    @Param({"false", "true"})
      +    boolean query;
      +
      +    @Param({"false", "true"})
      +    boolean ref;
      +
      +    private URL url;
      +
      +    @Setup()
      +    public void setup() throws MalformedURLException {
      +        StringBuilder sb = new StringBuilder();
      +        if (auth) {
      +            sb.append("http://hostname");
      +        } else {
      +            sb.append("file:");
      +        }
      +        sb.append("/some/long/path/to/jar/app-1.0.jar!/org/summerframework/samples/horseclinic/HorseClinicApplication.class");
      +        if (query) {
      +            sb.append("?param=value");
      +        }
      +        if (ref) {
      +            sb.append("#fragment");
      +        }
      +
      +        url = URI.create(sb.toString()).toURL();
      +    }
      +
      +    @Benchmark
      +    public String urlToString() {
      +        return url.toString();
      +    }
      +}
      diff --git a/test/micro/org/openjdk/bench/java/nio/CharsetCanEncode.java b/test/micro/org/openjdk/bench/java/nio/CharsetCanEncode.java
      index ebfbc217a95..8c08a876696 100644
      --- a/test/micro/org/openjdk/bench/java/nio/CharsetCanEncode.java
      +++ b/test/micro/org/openjdk/bench/java/nio/CharsetCanEncode.java
      @@ -65,6 +65,9 @@ public class CharsetCanEncode {
           // sun.nio.cs.UTF_16LE
           private CharsetEncoder utf16le = Charset.forName("UTF-16LE").newEncoder();
       
      +    // sun.nio.cs.UTF_32LE
      +    private CharsetEncoder utf32le = Charset.forName("UTF-32LE").newEncoder();
      +
           @Benchmark
           public boolean asciiCanEncodeCharYes() {
               return ascii.canEncode('D');
      @@ -184,4 +187,24 @@ public class CharsetCanEncode {
           public boolean utf16leCanEncodeStringNo() {
               return utf16le.canEncode(String.valueOf(Character.MIN_SURROGATE));
           }
      +
      +    @Benchmark
      +    public boolean utf32leCanEncodeCharYes() {
      +        return utf32le.canEncode('D');
      +    }
      +
      +    @Benchmark
      +    public boolean utf32leCanEncodeStringYes() {
      +        return utf32le.canEncode("D");
      +    }
      +
      +    @Benchmark
      +    public boolean utf32leCanEncodeCharNo() {
      +        return utf32le.canEncode(Character.MIN_SURROGATE);
      +    }
      +
      +    @Benchmark
      +    public boolean utf32leCanEncodeStringNo() {
      +        return utf32le.canEncode(String.valueOf(Character.MIN_SURROGATE));
      +    }
       }
      diff --git a/test/micro/org/openjdk/bench/java/nio/file/FilesCopy.java b/test/micro/org/openjdk/bench/java/nio/file/FilesCopy.java
      new file mode 100644
      index 00000000000..9472920f071
      --- /dev/null
      +++ b/test/micro/org/openjdk/bench/java/nio/file/FilesCopy.java
      @@ -0,0 +1,68 @@
      +/*
      + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
      + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      + *
      + * This code is free software; you can redistribute it and/or modify it
      + * under the terms of the GNU General Public License version 2 only, as
      + * published by the Free Software Foundation.
      + *
      + * This code is distributed in the hope that it will be useful, but WITHOUT
      + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
      + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
      + * version 2 for more details (a copy is included in the LICENSE file that
      + * accompanied this code).
      + *
      + * You should have received a copy of the GNU General Public License version
      + * 2 along with this work; if not, write to the Free Software Foundation,
      + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
      + *
      + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
      + * or visit www.oracle.com if you need additional information or have any
      + * questions.
      + */
      +
      +package org.openjdk.bench.java.nio.file;
      +
      +import java.io.IOException;
      +import java.nio.ByteBuffer;
      +import java.nio.channels.FileChannel;
      +import java.nio.file.Files;
      +import java.nio.file.Path;
      +import static java.nio.file.StandardOpenOption.*;
      +
      +import org.openjdk.jmh.annotations.Benchmark;
      +import org.openjdk.jmh.annotations.Scope;
      +import org.openjdk.jmh.annotations.Setup;
      +import org.openjdk.jmh.annotations.State;
      +import org.openjdk.jmh.annotations.TearDown;
      +
      +@State(Scope.Benchmark)
      +public class FilesCopy {
      +
      +    private static final int SIZE = Integer.MAX_VALUE;
      +    private static final Path FILE = Path.of("file.dat");
      +    private static final Path COPY = Path.of("copy.dat");
      +
      +    @Setup
      +    public void init() throws IOException {
      +        Files.deleteIfExists(FILE);
      +        Files.deleteIfExists(COPY);
      +        try (FileChannel fc = FileChannel.open(FILE, CREATE_NEW, READ, WRITE)) {
      +            fc.position(SIZE);
      +            fc.write(ByteBuffer.wrap(new byte[] {(byte)27}));
      +        }
      +    }
      +
      +    @TearDown
      +    public void cleanup() throws IOException {
      +        Files.deleteIfExists(FILE);
      +        Files.deleteIfExists(COPY);
      +    }
      +
      +    @Benchmark
      +    public void copyFile() throws IOException {
      +        Files.copy(FILE, COPY);
      +        Files.delete(COPY);
      +    }
      +
      +}
      diff --git a/test/micro/org/openjdk/bench/java/util/Base64EncodeToString.java b/test/micro/org/openjdk/bench/java/util/Base64EncodeToString.java
      new file mode 100644
      index 00000000000..5b259230341
      --- /dev/null
      +++ b/test/micro/org/openjdk/bench/java/util/Base64EncodeToString.java
      @@ -0,0 +1,55 @@
      +/*
      + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
      + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      + *
      + * This code is free software; you can redistribute it and/or modify it
      + * under the terms of the GNU General Public License version 2 only, as
      + * published by the Free Software Foundation.
      + *
      + * This code is distributed in the hope that it will be useful, but WITHOUT
      + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
      + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
      + * version 2 for more details (a copy is included in the LICENSE file that
      + * accompanied this code).
      + *
      + * You should have received a copy of the GNU General Public License version
      + * 2 along with this work; if not, write to the Free Software Foundation,
      + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
      + *
      + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
      + * or visit www.oracle.com if you need additional information or have any
      + * questions.
      + */
      +package org.openjdk.micro.bench.java.util;
      +
      +import org.openjdk.jmh.annotations.*;
      +
      +import java.util.Base64;
      +import java.util.Random;
      +import java.util.concurrent.TimeUnit;
      +
      +@State(Scope.Benchmark)
      +@Warmup(iterations = 5, time = 2)
      +@Measurement(iterations = 5, time = 2)
      +@Fork(value = 2)
      +@OutputTimeUnit(TimeUnit.MILLISECONDS)
      +public class Base64EncodeToString {
      +
      +    private byte[] input;
      +
      +    @Param({"10", "100", "1000", "10000"})
      +    private int inputSize;
      +
      +    @Setup
      +    public void setup() {
      +        Random r = new Random(1123);
      +        input = new byte[inputSize];
      +        r.nextBytes(input);
      +    }
      +
      +    @Benchmark
      +    public String testEncodeToString() {
      +        return Base64.getEncoder().encodeToString(input);
      +    }
      +}
      +
      diff --git a/test/micro/org/openjdk/bench/jdk/incubator/vector/Float16OperationsBenchmark.java b/test/micro/org/openjdk/bench/jdk/incubator/vector/Float16OperationsBenchmark.java
      index cbfe9958924..daf18af528e 100644
      --- a/test/micro/org/openjdk/bench/jdk/incubator/vector/Float16OperationsBenchmark.java
      +++ b/test/micro/org/openjdk/bench/jdk/incubator/vector/Float16OperationsBenchmark.java
      @@ -1,5 +1,6 @@
       /*
      - * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
      + * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
      + * Copyright 2026 Arm Limited and/or its affiliates.
        * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
        *
        * This code is free software; you can redistribute it and/or modify it
      @@ -28,6 +29,7 @@ import jdk.incubator.vector.*;
       import org.openjdk.jmh.annotations.*;
       import static jdk.incubator.vector.Float16.*;
       import static java.lang.Float.*;
      +import java.util.Random;
       
       @OutputTimeUnit(TimeUnit.MILLISECONDS)
       @State(Scope.Thread)
      @@ -45,11 +47,20 @@ public class Float16OperationsBenchmark {
           short [] vector5;
           boolean [] vectorPredicate;
       
      +    private int c0, c1, c2, s1, s2;
      +
      +    Random r;
      +
           static final short f16_one = Float.floatToFloat16(1.0f);
           static final short f16_two = Float.floatToFloat16(2.0f);
       
           @Setup(Level.Trial)
           public void BmSetup() {
      +        r = new Random();
      +
      +        c1 = s1 = step();
      +        c2 = vectorDim - (s2 = step());
      +
               rexp      = new int[vectorDim];
               vectorRes = new short[vectorDim];
               vector1   = new short[vectorDim];
      @@ -84,6 +95,16 @@ public class Float16OperationsBenchmark {
               );
           }
       
      +    private int step() {
      +        return (r.nextInt() & 0xf) + 1;
      +    }
      +
      +    private void inc() {
      +        c1 = c1 + s1 < vectorDim ? c1 + s1 : (s1 = step());
      +        c2 = c2 - s2 > 0 ? c2 - s2 : vectorDim - (s2 = step());
      +        c0 = Math.abs(c2 - c1);
      +    }
      +
           @Benchmark
           public void addBenchmark() {
               for (int i = 0; i < vectorDim; i++) {
      @@ -200,6 +221,14 @@ public class Float16OperationsBenchmark {
               }
           }
       
      +    @Benchmark
      +    public void maxScalarBenchmark() {
      +        for (int i = 0; i < vectorDim; i++) {
      +            inc(); // Ensures no auto-vectorization
      +            vectorRes[c0] = float16ToRawShortBits(max(shortBitsToFloat16(vector1[c1]), shortBitsToFloat16(vector2[c2])));
      +        }
      +    }
      +
           @Benchmark
           public void minBenchmark() {
               for (int i = 0; i < vectorDim; i++) {
      @@ -207,6 +236,14 @@ public class Float16OperationsBenchmark {
               }
           }
       
      +    @Benchmark
      +    public void minScalarBenchmark() {
      +        for (int i = 0; i < vectorDim; i++) {
      +            inc(); // Ensures no auto-vectorization
      +            vectorRes[c0] = float16ToRawShortBits(min(shortBitsToFloat16(vector1[c1]), shortBitsToFloat16(vector2[c2])));
      +        }
      +    }
      +
           @Benchmark
           public void sqrtBenchmark() {
               for (int i = 0; i < vectorDim; i++) {
      @@ -314,4 +351,22 @@ public class Float16OperationsBenchmark {
               }
               return distRes;
           }
      +
      +    @Benchmark
      +    public short reductionAddFP16() {
      +        short result = (short) 0;
      +        for (int i = 0; i < vectorDim; i++) {
      +            result = float16ToRawShortBits(add(shortBitsToFloat16(result), shortBitsToFloat16(vector1[i])));
      +        }
      +        return result;
      +    }
      +
      +    @Benchmark
      +    public short reductionMulFP16() {
      +        short result = floatToFloat16(1.0f);
      +        for (int i = 0; i < vectorDim; i++) {
      +            result = float16ToRawShortBits(multiply(shortBitsToFloat16(result), shortBitsToFloat16(vector1[i])));
      +        }
      +        return result;
      +    }
       }
      diff --git a/test/micro/org/openjdk/bench/jdk/incubator/vector/VectorStoreMaskBenchmark.java b/test/micro/org/openjdk/bench/jdk/incubator/vector/VectorStoreMaskBenchmark.java
      new file mode 100644
      index 00000000000..d4dc321ccba
      --- /dev/null
      +++ b/test/micro/org/openjdk/bench/jdk/incubator/vector/VectorStoreMaskBenchmark.java
      @@ -0,0 +1,84 @@
      +/*
      + *  Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
      + *  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      + *
      + *  This code is free software; you can redistribute it and/or modify it
      + *  under the terms of the GNU General Public License version 2 only, as
      + *  published by the Free Software Foundation.
      + *
      + *  This code is distributed in the hope that it will be useful, but WITHOUT
      + *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
      + *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
      + *  version 2 for more details (a copy is included in the LICENSE file that
      + *  accompanied this code).
      + *
      + *  You should have received a copy of the GNU General Public License version
      + *  2 along with this work; if not, write to the Free Software Foundation,
      + *  Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
      + *
      + *  Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
      + *  or visit www.oracle.com if you need additional information or have any
      + *  questions.
      + *
      + */
      +
      +package org.openjdk.bench.jdk.incubator.vector;
      +
      +import jdk.incubator.vector.*;
      +import java.util.concurrent.TimeUnit;
      +import org.openjdk.jmh.annotations.*;
      +
      +@OutputTimeUnit(TimeUnit.MICROSECONDS)
      +@State(Scope.Thread)
      +@Warmup(iterations = 10, time = 1)
      +@Measurement(iterations = 10, time = 1)
      +@Fork(value = 1, jvmArgs = {"--add-modules=jdk.incubator.vector"})
      +public class VectorStoreMaskBenchmark {
      +    static final int LENGTH = 256;
      +    static final boolean[] mask_arr_input = new boolean[LENGTH];
      +    static final boolean[] mask_arr_output = new boolean[LENGTH];
      +    static {
      +        for (int i = 0; i < LENGTH; i++) {
      +            mask_arr_input[i] = (i & 1) == 0;
      +        }
      +    }
      +
      +    @CompilerControl(CompilerControl.Mode.INLINE)
      +    public  void maskLoadCastStoreKernel(VectorSpecies species_from, VectorSpecies species_to) {
      +        for (int i = 0; i < LENGTH; i += species_from.length()) {
      +            VectorMask mask_from = VectorMask.fromArray(species_from, mask_arr_input, i);
      +            VectorMask mask_to = mask_from.cast(species_to);
      +            mask_to.intoArray(mask_arr_output, i);
      +        }
      +    }
      +
      +    @Benchmark
      +    public void microMaskLoadCastStoreByte64() {
      +        maskLoadCastStoreKernel(ByteVector.SPECIES_64, ShortVector.SPECIES_128);
      +    }
      +
      +    @Benchmark
      +    public void microMaskLoadCastStoreShort64() {
      +        maskLoadCastStoreKernel(ShortVector.SPECIES_64, IntVector.SPECIES_128);
      +    }
      +
      +    @Benchmark
      +    public void microMaskLoadCastStoreInt128() {
      +        maskLoadCastStoreKernel(IntVector.SPECIES_128, ShortVector.SPECIES_64);
      +    }
      +
      +    @Benchmark
      +    public void microMaskLoadCastStoreLong128() {
      +        maskLoadCastStoreKernel(LongVector.SPECIES_128, IntVector.SPECIES_64);
      +    }
      +
      +    @Benchmark
      +    public void microMaskLoadCastStoreFloat128() {
      +        maskLoadCastStoreKernel(FloatVector.SPECIES_128, ShortVector.SPECIES_64);
      +    }
      +
      +    @Benchmark
      +    public void microMaskLoadCastStoreDouble128() {
      +        maskLoadCastStoreKernel(DoubleVector.SPECIES_128, IntVector.SPECIES_64);
      +    }
      +}
      \ No newline at end of file
      diff --git a/test/micro/org/openjdk/bench/vm/compiler/FpMinMaxIntrinsics.java b/test/micro/org/openjdk/bench/vm/compiler/FpMinMaxIntrinsics.java
      index 27ae2214157..62c33f5fafe 100644
      --- a/test/micro/org/openjdk/bench/vm/compiler/FpMinMaxIntrinsics.java
      +++ b/test/micro/org/openjdk/bench/vm/compiler/FpMinMaxIntrinsics.java
      @@ -1,5 +1,5 @@
       /*
      - * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
      + * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
        * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
        *
        * This code is free software; you can redistribute it and/or modify it
      @@ -45,14 +45,15 @@ public class FpMinMaxIntrinsics {
           private Random r = new Random();
       
           private static int stride = 1;
      -    private static float acc;
      +    private static float f_acc;
      +    private static double d_acc;
       
           @Setup
           public void init() {
               c1 = s1 = step();
               c2 = COUNT - (s2 = step());
       
      -        for (int i=0; i SPECIES_I    = IntVector.SPECIES_PREFERRED;
           private static final VectorSpecies SPECIES_I512 = IntVector.SPECIES_512;
           private static final VectorSpecies SPECIES_I256 = IntVector.SPECIES_256;
      +    private static final VectorSpecies SPECIES_I128 = IntVector.SPECIES_128;
      +    private static final VectorSpecies SPECIES_I64  = IntVector.SPECIES_64;
           private static final VectorSpecies SPECIES_B       = ByteVector.SPECIES_PREFERRED;
           private static final VectorSpecies SPECIES_B64     = ByteVector.SPECIES_64;
           private static final VectorSpecies SPECIES_F      = FloatVector.SPECIES_PREFERRED;
      @@ -58,15 +61,31 @@ public class VectorAlgorithmsImpl {
               public int[] rI2;
               public int[] rI3;
               public int[] rI4;
      -        public int[] eI;
      +        public int[] rI5;
      +
      +        // Search element for "findI"
      +        public int[] eI_findI;
               // The test has to use the same index into eI for all implementations. But in the
               // benchmark, we'd like to use random indices, so we use the index to advance through
               // the array.
      -        public int eI_idx = 0;
      +        public int eI_findI_idx = 0;
      +
      +        // Data and threshold eI value for "filterI".
      +        // We create the data in a range, and then pick a threshold scaled to that range,
      +        // so that the branch in the filter is branchProbability.
      +        public int[] aI_filterI;
      +        public int eI_filterI;
       
               public float[] aF;
               public float[] bF;
       
      +        // Input for piece-wise functions.
      +        // Uniform [0..1[ with probability p and Uniform [1..2[ with probability (1-p)
      +        public float[] xF;
      +        public float[] rF1;
      +        public float[] rF2;
      +        public float[] rF3;
      +
               public byte[] aB;
               public byte[] strB;
               public byte[] rB1;
      @@ -76,7 +95,16 @@ public class VectorAlgorithmsImpl {
               public int[] oopsX4;
               public int[] memX4;
       
      -        public Data(int size, int seed, int numX4Objects) {
      +        // Input for mismatchB
      +        // We set m1B and m2B to have identical data, temporarily edit m2B at one position,
      +        // run the mismatch implementation, and then reset that position. This means we
      +        // perform as little mutation while randomizing the input data.
      +        public byte[] m1B;
      +        public byte[] m2B;
      +        public int[] mismatchB_idx;
      +        public int mismatchB_idx_idx = 0;
      +
      +        public Data(int size, int seed, int numX4Objects, float branchProbability) {
                   Random random = new Random(seed);
       
                   // int: one input array and multiple output arrays so different implementations can
      @@ -86,14 +114,20 @@ public class VectorAlgorithmsImpl {
                   rI2 = new int[size];
                   rI3 = new int[size];
                   rI4 = new int[size];
      +            rI5 = new int[size];
                   Arrays.setAll(aI, i -> random.nextInt());
       
                   // Populate with some random values from aI, and some totally random values.
      -            eI = new int[0x10000];
      -            for (int i = 0; i < eI.length; i++) {
      -                eI[i] = (random.nextInt(10) == 0) ? random.nextInt() : aI[random.nextInt(size)];
      +            eI_findI = new int[0x10000];
      +            for (int i = 0; i < eI_findI.length; i++) {
      +                eI_findI[i] = (random.nextInt(10) == 0) ? random.nextInt() : aI[random.nextInt(size)];
                   }
       
      +            int filterI_range = 1000_000;
      +            aI_filterI = new int[size];
      +            Arrays.setAll(aI_filterI, i -> random.nextInt(filterI_range));
      +            eI_filterI = (int)(filterI_range * (1.0f - branchProbability));
      +
                   // X4 oop setup.
                   // oopsX4 holds "addresses" (i.e. indices), that point to the 16-byte objects in memX4.
                   oopsX4 = new int[size];
      @@ -117,14 +151,54 @@ public class VectorAlgorithmsImpl {
                       bF[i] = random.nextInt(32) - 16;
                   }
       
      +            xF = new float[size];
      +            rF1 = new float[size];
      +            rF2 = new float[size];
      +            rF3 = new float[size];
      +            for (int i = 0; i < size; i++) {
      +                xF[i] = (random.nextFloat() < branchProbability)
      +                        ? 0f + random.nextFloat()
      +                        : 1f + random.nextFloat();
      +            }
      +
                   // byte: just random data.
                   aB = new byte[size];
      -            strB = new byte[size];
                   rB1 = new byte[size];
                   rB2 = new byte[size];
                   rB3 = new byte[size];
                   random.nextBytes(aB);
      -            random.nextBytes(strB); // TODO: special data!
      +
      +            // byte string: for lowerCase benchmark.
      +            strB = new byte[size];
      +            for (int i = 0; i < size; i++) {
      +                strB[i] = (random.nextFloat() < branchProbability)
      +                          ? (byte)(random.nextInt(16) + 'A')
      +                          : (byte)(random.nextInt(16) + 'a');
      +            }
      +
      +            // Input data for mismatchB
      +            m1B = new byte[size];
      +            m2B = new byte[size];
      +            random.nextBytes(m1B);
      +            System.arraycopy(m1B, 0, m2B, 0, size);
      +
      +            mismatchB_idx = new int[0x10000];
      +            for (int i = 0; i < mismatchB_idx.length; i++) {
      +                // Sometimes make no mutation (-1), sometimes pick index for mutation.
      +                mismatchB_idx[i] = (random.nextInt(10) == 0) ? -1 : random.nextInt(m1B.length);
      +            }
      +        }
      +
      +        public interface MismatchBImpl {
      +            int run(byte[] a, byte[] b);
      +        }
      +
      +        public int wrap_mismatchB(int idx, MismatchBImpl impl) {
      +            int i = mismatchB_idx[idx & 0xffff];
      +            if (i != -1) { m2B[i]++; }
      +            int res = impl.run(m1B, m2B);
      +            if (i != -1) { m2B[i]--; }
      +            return res;
               }
           }
       
      @@ -308,6 +382,21 @@ public class VectorAlgorithmsImpl {
               return sum;
           }
       
      +    public static float dotProductF_VectorAPI_fma(float[] a, float[] b) {
      +        var sums = FloatVector.broadcast(SPECIES_F, 0.0f);
      +        int i;
      +        for (i = 0; i < SPECIES_F.loopBound(a.length); i += SPECIES_F.length()) {
      +            var va = FloatVector.fromArray(SPECIES_F, a, i);
      +            var vb = FloatVector.fromArray(SPECIES_F, b, i);
      +            sums = va.fma(vb, sums);
      +        }
      +        float sum = sums.reduceLanes(VectorOperators.ADD);
      +        for (; i < a.length; i++) {
      +            sum = Math.fma(a[i], b[i], sum);
      +        }
      +        return sum;
      +    }
      +
           public static int hashCodeB_loop(byte[] a) {
               int h = 1;
               for (int i = 0; i < a.length; i++) {
      @@ -616,6 +705,44 @@ public class VectorAlgorithmsImpl {
               return -1;
           }
       
      +    public static int mismatchB_loop(byte[] a, byte[] b) {
      +        for (int i = 0; i < a.length; i++) {
      +            if (a[i] != b[i]) {
      +                return i;
      +            }
      +        }
      +        return -1;
      +    }
      +
      +    public static int mismatchB_Arrays(byte[] a, byte[] b) {
      +        return Arrays.mismatch(a, b);
      +    }
      +
      +    public static int mismatchB_MemorySegment(byte[] a, byte[] b) {
      +        var aMS = MemorySegment.ofArray(a);
      +        var bMS = MemorySegment.ofArray(b);
      +        return (int) aMS.mismatch(bMS);
      +    }
      +
      +    public static int mismatchB_VectorAPI(byte[] a, byte[] b) {
      +        int i = 0;
      +        for (; i < SPECIES_B.loopBound(a.length); i += SPECIES_B.length()) {
      +            ByteVector va = ByteVector.fromArray(SPECIES_B, a, i);
      +            ByteVector vb = ByteVector.fromArray(SPECIES_B, b, i);
      +            var mask = va.compare(VectorOperators.NE, vb);
      +            if (mask.anyTrue()) {
      +                return i + mask.firstTrue();
      +            }
      +        }
      +        for (; i < a.length; i++) {
      +            if (a[i] != b[i]) {
      +                return i;
      +            }
      +        }
      +        return -1;
      +    }
      +
      +
           public static Object reverseI_loop(int[] a, int[] r) {
               for (int i = 0; i < a.length; i++) {
                   r[a.length - i - 1] = a[i];
      @@ -651,13 +778,12 @@ public class VectorAlgorithmsImpl {
               return r;
           }
       
      -    public static Object filterI_VectorAPI(int[] a, int[] r, int threshold) {
      -        var thresholds = IntVector.broadcast(SPECIES_I, threshold);
      +    public static Object filterI_VectorAPI_v1(int[] a, int[] r, int threshold) {
               int j = 0;
               int i = 0;
               for (; i < SPECIES_I.loopBound(a.length); i += SPECIES_I.length()) {
                   IntVector v = IntVector.fromArray(SPECIES_I, a, i);
      -            var mask = v.compare(VectorOperators.GE, thresholds);
      +            var mask = v.compare(VectorOperators.GE, threshold);
                   v = v.compress(mask);
                   int trueCount = mask.trueCount();
                   var prefixMask = mask.compress();
      @@ -676,6 +802,98 @@ public class VectorAlgorithmsImpl {
               return r;
           }
       
      +    // Idea: on platforms that do not support the "v1" solution with "compress" and
      +    //       masked stores, we struggle to deal with the loop-carried dependency of j.
      +    //       But we can still use dynamic uniformity to enable some vectorized performance.
      +    public static Object filterI_VectorAPI_v2_l2(int[] a, int[] r, int threshold) {
      +        int j = 0;
      +        int i = 0;
      +        for (; i < SPECIES_I64.loopBound(a.length); i += SPECIES_I64.length()) {
      +            IntVector v = IntVector.fromArray(SPECIES_I64, a, i);
      +            var mask = v.compare(VectorOperators.GE, threshold);
      +            if (mask.allTrue()) {
      +                v.intoArray(r, j);
      +                j += 2;
      +            } else if (mask.anyTrue()) {
      +                if (mask.laneIsSet(0)) { r[j++] = v.lane(0); }
      +                if (mask.laneIsSet(1)) { r[j++] = v.lane(1); }
      +            } else {
      +                // nothing
      +            }
      +        }
      +        for (; i < a.length; i++) {
      +            int ai = a[i];
      +            if (ai >= threshold) {
      +                r[j++] = ai;
      +            }
      +        }
      +        // Just force the resulting length onto the same array.
      +        r[r.length - 1] = j;
      +        return r;
      +    }
      +
      +    public static Object filterI_VectorAPI_v2_l4(int[] a, int[] r, int threshold) {
      +        int j = 0;
      +        int i = 0;
      +        for (; i < SPECIES_I128.loopBound(a.length); i += SPECIES_I128.length()) {
      +            IntVector v = IntVector.fromArray(SPECIES_I128, a, i);
      +            var mask = v.compare(VectorOperators.GE, threshold);
      +            if (mask.allTrue()) {
      +                v.intoArray(r, j);
      +                j += 4;
      +            } else if (mask.anyTrue()) {
      +                if (mask.laneIsSet(0)) { r[j++] = v.lane(0); }
      +                if (mask.laneIsSet(1)) { r[j++] = v.lane(1); }
      +                if (mask.laneIsSet(2)) { r[j++] = v.lane(2); }
      +                if (mask.laneIsSet(3)) { r[j++] = v.lane(3); }
      +            } else {
      +                // nothing
      +            }
      +        }
      +        for (; i < a.length; i++) {
      +            int ai = a[i];
      +            if (ai >= threshold) {
      +                r[j++] = ai;
      +            }
      +        }
      +        // Just force the resulting length onto the same array.
      +        r[r.length - 1] = j;
      +        return r;
      +    }
      +
      +    public static Object filterI_VectorAPI_v2_l8(int[] a, int[] r, int threshold) {
      +        int j = 0;
      +        int i = 0;
      +        for (; i < SPECIES_I256.loopBound(a.length); i += SPECIES_I256.length()) {
      +            IntVector v = IntVector.fromArray(SPECIES_I256, a, i);
      +            var mask = v.compare(VectorOperators.GE, threshold);
      +            if (mask.allTrue()) {
      +                v.intoArray(r, j);
      +                j += 8;
      +            } else if (mask.anyTrue()) {
      +                if (mask.laneIsSet(0)) { r[j++] = v.lane(0); }
      +                if (mask.laneIsSet(1)) { r[j++] = v.lane(1); }
      +                if (mask.laneIsSet(2)) { r[j++] = v.lane(2); }
      +                if (mask.laneIsSet(3)) { r[j++] = v.lane(3); }
      +                if (mask.laneIsSet(4)) { r[j++] = v.lane(4); }
      +                if (mask.laneIsSet(5)) { r[j++] = v.lane(5); }
      +                if (mask.laneIsSet(6)) { r[j++] = v.lane(6); }
      +                if (mask.laneIsSet(7)) { r[j++] = v.lane(7); }
      +            } else {
      +                // nothing
      +            }
      +        }
      +        for (; i < a.length; i++) {
      +            int ai = a[i];
      +            if (ai >= threshold) {
      +                r[j++] = ai;
      +            }
      +        }
      +        // Just force the resulting length onto the same array.
      +        r[r.length - 1] = j;
      +        return r;
      +    }
      +
           // X4: ints simulate 4-byte oops.
           // oops: if non-zero (= non-null), every entry simulates a 4-byte oop, pointing into mem.
           // mem: an int array that simulates the memory.
      @@ -771,5 +989,176 @@ public class VectorAlgorithmsImpl {
               }
               return r;
           }
      -}
       
      +    public static int conditionalSumB_loop(byte[] a) {
      +        int sum = 0;
      +        for (int i = 0; i < a.length; i++) {
      +            byte c = a[i];
      +            if (c >= 'A' && c <= 'Z') {
      +                sum += c;
      +            }
      +        }
      +        return sum;
      +    }
      +
      +    public static int conditionalSumB_VectorAPI_v1(byte[] a) {
      +        return ConditionalSumB_VectorAPI_V1.compute(a);
      +    }
      +
      +    private static class ConditionalSumB_VectorAPI_V1 {
      +        // Pick I species to be a full vector, and the B vector a quarter its bit length.
      +        // However, we have to get at least 64bits for the B vector, so at least 256bits
      +        // for the int vector - a sad restriction by the currently very narrow range of
      +        // supported shapes.
      +        private static final int BITS_I = Math.max(256, IntVector.SPECIES_PREFERRED.vectorBitSize());
      +        private static final int BITS_B = BITS_I / 4;
      +        private static final VectorShape SHAPE_I = VectorShape.forBitSize(BITS_I);
      +        private static final VectorShape SHAPE_B = VectorShape.forBitSize(BITS_B);
      +        private static final VectorSpecies SPECIES_I = SHAPE_I.withLanes(int.class);
      +        private static final VectorSpecies    SPECIES_B = SHAPE_B.withLanes(byte.class);
      +
      +        public static int compute(byte[] a) {
      +            var zeroB = ByteVector.zero(SPECIES_B);
      +            var accI = IntVector.zero(SPECIES_I);
      +            int i;
      +            for (i = 0; i < SPECIES_B.loopBound(a.length); i += SPECIES_B.length()) {
      +                var vB = ByteVector.fromArray(SPECIES_B, a, i);
      +                var maskA = vB.compare(VectorOperators.GE, (byte)'A');
      +                var maskZ = vB.compare(VectorOperators.LE, (byte)'Z');
      +                var mask = maskA.and(maskZ);
      +                vB = zeroB.blend(vB, mask);
      +                var vI = vB.castShape(SPECIES_I, 0);
      +                accI = accI.add(vI);
      +            }
      +            int sum = accI.reduceLanes(VectorOperators.ADD);
      +            for (; i < a.length; i++) {
      +                byte c = a[i];
      +                if (c >= 'A' && c <= 'Z') {
      +                    sum += c;
      +                }
      +            }
      +            return sum;
      +        }
      +    }
      +
      +    public static int conditionalSumB_VectorAPI_v2(byte[] a) {
      +        return ConditionalSumB_VectorAPI_V2.compute(a);
      +    }
      +
      +    private static class ConditionalSumB_VectorAPI_V2 {
      +        // Pick B species to be a full vector, and use 4 I vectors of the same bit size.
      +        private static final VectorSpecies    SPECIES_B = ByteVector.SPECIES_PREFERRED;
      +        private static final VectorSpecies SPECIES_I = SPECIES_B.vectorShape().withLanes(int.class);
      +
      +        public static int compute(byte[] a) {
      +            var zeroB = ByteVector.zero(SPECIES_B);
      +            var accI = IntVector.zero(SPECIES_I);
      +            int i;
      +            for (i = 0; i < SPECIES_B.loopBound(a.length); i += SPECIES_B.length()) {
      +                var vB = ByteVector.fromArray(SPECIES_B, a, i);
      +                var maskA = vB.compare(VectorOperators.GE, (byte)'A');
      +                var maskZ = vB.compare(VectorOperators.LE, (byte)'Z');
      +                var mask = maskA.and(maskZ);
      +                vB = zeroB.blend(vB, mask);
      +                // When casting byte->int, we get 4x the bits, and split them into 4 parts.
      +                var vI0 = vB.castShape(SPECIES_I, 0);
      +                var vI1 = vB.castShape(SPECIES_I, 1);
      +                var vI2 = vB.castShape(SPECIES_I, 2);
      +                var vI3 = vB.castShape(SPECIES_I, 3);
      +                accI = accI.add(vI0.add(vI1).add(vI2).add(vI3));
      +            }
      +            int sum = accI.reduceLanes(VectorOperators.ADD);
      +            for (; i < a.length; i++) {
      +                byte c = a[i];
      +                if (c >= 'A' && c <= 'Z') {
      +                    sum += c;
      +                }
      +            }
      +            return sum;
      +        }
      +    }
      +
      +    public static float[] pieceWise2FunctionF_loop(float[] a, float[] r) {
      +        for (int i = 0; i < a.length; i++) {
      +            float ai = a[i];
      +            if (ai < 1f) {
      +                float a2 = ai * ai;
      +                float a4 = a2 * a2;
      +                float a8 = a4 * a4;
      +                r[i] = a8;
      +            } else {
      +                float s2 = (float)Math.sqrt(ai);
      +                float s4 = (float)Math.sqrt(s2);
      +                float s8 = (float)Math.sqrt(s4);
      +                r[i] = s8;
      +            }
      +        }
      +        return r;
      +    }
      +
      +    public static float[] pieceWise2FunctionF_VectorAPI_v1(float[] a, float[] r) {
      +        int i;
      +        for (i = 0; i < SPECIES_F.loopBound(a.length); i += SPECIES_F.length()) {
      +            var ai = FloatVector.fromArray(SPECIES_F, a, i);
      +            var mask = ai.compare(VectorOperators.LT, 1f);
      +            var a2 = ai.lanewise(VectorOperators.MUL, ai);
      +            var a4 = a2.lanewise(VectorOperators.MUL, a2);
      +            var a8 = a4.lanewise(VectorOperators.MUL, a4);
      +            var s2 = ai.lanewise(VectorOperators.SQRT);
      +            var s4 = s2.lanewise(VectorOperators.SQRT);
      +            var s8 = s4.lanewise(VectorOperators.SQRT);
      +            var v = s8.blend(a8, mask);
      +            v.intoArray(r, i);
      +        }
      +        for (; i < a.length; i++) {
      +            float ai = a[i];
      +            if (ai < 1f) {
      +                float a2 = ai * ai;
      +                float a4 = a2 * a2;
      +                float a8 = a4 * a4;
      +                r[i] = a8;
      +            } else {
      +                float s2 = (float)Math.sqrt(ai);
      +                float s4 = (float)Math.sqrt(s2);
      +                float s8 = (float)Math.sqrt(s4);
      +                r[i] = s8;
      +            }
      +        }
      +        return r;
      +    }
      +
      +    public static float[] pieceWise2FunctionF_VectorAPI_v2(float[] a, float[] r) {
      +        int i;
      +        for (i = 0; i < SPECIES_F.loopBound(a.length); i += SPECIES_F.length()) {
      +            var ai = FloatVector.fromArray(SPECIES_F, a, i);
      +            var mask = ai.compare(VectorOperators.LT, 1f);
      +            var a2 = ai.lanewise(VectorOperators.MUL, ai);
      +            var a4 = a2.lanewise(VectorOperators.MUL, a2);
      +            var a8 = a4.lanewise(VectorOperators.MUL, a4);
      +            var v = a8;
      +            // SQRT is expensive, so only call if it necessary
      +            if (!mask.allTrue()) {
      +                var s2 = ai.lanewise(VectorOperators.SQRT);
      +                var s4 = s2.lanewise(VectorOperators.SQRT);
      +                var s8 = s4.lanewise(VectorOperators.SQRT);
      +                v = s8.blend(a8, mask);
      +            }
      +            v.intoArray(r, i);
      +        }
      +        for (; i < a.length; i++) {
      +            float ai = a[i];
      +            if (ai < 1f) {
      +                float a2 = ai * ai;
      +                float a4 = a2 * a2;
      +                float a8 = a4 * a4;
      +                r[i] = a8;
      +            } else {
      +                float s2 = (float)Math.sqrt(ai);
      +                float s4 = (float)Math.sqrt(s2);
      +                float s8 = (float)Math.sqrt(s4);
      +                r[i] = s8;
      +            }
      +        }
      +        return r;
      +    }
      +}
      diff --git a/test/micro/org/openjdk/bench/vm/compiler/VectorReduction2.java b/test/micro/org/openjdk/bench/vm/compiler/VectorReduction2.java
      index 9241aca1dad..0d11705c8ec 100644
      --- a/test/micro/org/openjdk/bench/vm/compiler/VectorReduction2.java
      +++ b/test/micro/org/openjdk/bench/vm/compiler/VectorReduction2.java
      @@ -1,5 +1,6 @@
       /*
        * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
      + * Copyright 2026 Arm Limited and/or its affiliates.
        * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
        *
        * This code is free software; you can redistribute it and/or modify it
      @@ -27,6 +28,7 @@ import org.openjdk.jmh.infra.*;
       
       import java.util.concurrent.TimeUnit;
       import java.util.Random;
      +import jdk.incubator.vector.Float16;
       
       /**
        * Note: there is a corresponding IR test:
      @@ -64,6 +66,9 @@ public abstract class VectorReduction2 {
           private double[] in1D;
           private double[] in2D;
           private double[] in3D;
      +    private short[] in1F16;
      +    private short[] in2F16;
      +    private short[] in3F16;
       
           @Param("0")
           private int seed;
      @@ -96,6 +101,9 @@ public abstract class VectorReduction2 {
               in1D = new double[SIZE];
               in2D = new double[SIZE];
               in3D = new double[SIZE];
      +        in1F16 = new short[SIZE];
      +        in2F16 = new short[SIZE];
      +        in3F16 = new short[SIZE];
       
               for (int i = 0; i < SIZE; i++) {
                   in1B[i] = (byte)r.nextInt();
      @@ -121,6 +129,9 @@ public abstract class VectorReduction2 {
                   in1D[i] = r.nextDouble();
                   in2D[i] = r.nextDouble();
                   in3D[i] = r.nextDouble();
      +            in1F16[i] = Float.floatToFloat16(r.nextFloat());
      +            in2F16[i] = Float.floatToFloat16(r.nextFloat());
      +            in3F16[i] = Float.floatToFloat16(r.nextFloat());
               }
           }
       
      @@ -1449,10 +1460,86 @@ public abstract class VectorReduction2 {
               bh.consume(acc);
           }
       
      -    @Fork(value = 1, jvmArgs = {"-XX:+UseSuperWord"})
      +    // ---------float16***Simple ------------------------------------------------------------
      +    @Benchmark
      +    public void float16AddSimple(Blackhole bh) {
      +        short acc = (short)0; // neutral element
      +        for (int i = 0; i < SIZE; i++) {
      +            acc = Float16.float16ToRawShortBits(
      +                    Float16.add(Float16.shortBitsToFloat16(acc), Float16.shortBitsToFloat16(in1F16[i])));
      +        }
      +        bh.consume(acc);
      +    }
      +
      +    @Benchmark
      +    public void float16MulSimple(Blackhole bh) {
      +        short acc = Float.floatToFloat16(1.0f); // neutral element
      +        for (int i = 0; i < SIZE; i++) {
      +            acc = Float16.float16ToRawShortBits(
      +                    Float16.multiply(Float16.shortBitsToFloat16(acc), Float16.shortBitsToFloat16(in1F16[i])));
      +        }
      +        bh.consume(acc);
      +    }
      +
      +    // ---------float16***DotProduct ------------------------------------------------------------
      +    @Benchmark
      +    public void float16AddDotProduct(Blackhole bh) {
      +        short acc = (short)0; // neutral element
      +        for (int i = 0; i < SIZE; i++) {
      +            Float16 val = Float16.multiply(Float16.shortBitsToFloat16(in1F16[i]),
      +                                           Float16.shortBitsToFloat16(in2F16[i]));
      +            acc = Float16.float16ToRawShortBits(
      +                    Float16.add(Float16.shortBitsToFloat16(acc), val));
      +        }
      +        bh.consume(acc);
      +    }
      +
      +    @Benchmark
      +    public void float16MulDotProduct(Blackhole bh) {
      +        short acc = Float.floatToFloat16(1.0f); // neutral element
      +        for (int i = 0; i < SIZE; i++) {
      +            Float16 val = Float16.multiply(Float16.shortBitsToFloat16(in1F16[i]),
      +                                           Float16.shortBitsToFloat16(in2F16[i]));
      +            acc = Float16.float16ToRawShortBits(
      +                    Float16.multiply(Float16.shortBitsToFloat16(acc), val));
      +        }
      +        bh.consume(acc);
      +    }
      +
      +    // ---------float16***Big ------------------------------------------------------------
      +    @Benchmark
      +    public void float16AddBig(Blackhole bh) {
      +        short acc = (short)0; // neutral element
      +        for (int i = 0; i < SIZE; i++) {
      +            Float16 a = Float16.shortBitsToFloat16(in1F16[i]);
      +            Float16 b = Float16.shortBitsToFloat16(in2F16[i]);
      +            Float16 c = Float16.shortBitsToFloat16(in3F16[i]);
      +            Float16 val = Float16.add(Float16.multiply(a, b),
      +                                      Float16.add(Float16.multiply(a, c), Float16.multiply(b, c)));
      +            acc = Float16.float16ToRawShortBits(
      +                    Float16.add(Float16.shortBitsToFloat16(acc), val));
      +        }
      +        bh.consume(acc);
      +    }
      +
      +    @Benchmark
      +    public void float16MulBig(Blackhole bh) {
      +        short acc = Float.floatToFloat16(1.0f); // neutral element
      +        for (int i = 0; i < SIZE; i++) {
      +            Float16 a = Float16.shortBitsToFloat16(in1F16[i]);
      +            Float16 b = Float16.shortBitsToFloat16(in2F16[i]);
      +            Float16 c = Float16.shortBitsToFloat16(in3F16[i]);
      +            Float16 val = Float16.add(Float16.multiply(a, b),
      +                                      Float16.add(Float16.multiply(a, c), Float16.multiply(b, c)));
      +            acc = Float16.float16ToRawShortBits(
      +                    Float16.multiply(Float16.shortBitsToFloat16(acc), val));
      +        }
      +        bh.consume(acc);
      +    }
      +
      +    @Fork(value = 1, jvmArgs = {"--add-modules=jdk.incubator.vector", "-XX:+UseSuperWord"})
           public static class WithSuperword extends VectorReduction2 {}
       
      -    @Fork(value = 1, jvmArgs = {"-XX:-UseSuperWord"})
      +    @Fork(value = 1, jvmArgs = {"--add-modules=jdk.incubator.vector", "-XX:-UseSuperWord"})
           public static class NoSuperword extends VectorReduction2 {}
       }
      -
      diff --git a/test/micro/org/openjdk/bench/vm/gc/GCPatchingNmethodCost.java b/test/micro/org/openjdk/bench/vm/gc/GCPatchingNmethodCost.java
      new file mode 100644
      index 00000000000..53fa2378ead
      --- /dev/null
      +++ b/test/micro/org/openjdk/bench/vm/gc/GCPatchingNmethodCost.java
      @@ -0,0 +1,206 @@
      +/*
      + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
      + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      + *
      + * This code is free software; you can redistribute it and/or modify it
      + * under the terms of the GNU General Public License version 2 only, as
      + * published by the Free Software Foundation.
      + *
      + * This code is distributed in the hope that it will be useful, but WITHOUT
      + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
      + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
      + * version 2 for more details (a copy is included in the LICENSE file that
      + * accompanied this code).
      + *
      + * You should have received a copy of the GNU General Public License version
      + * 2 along with this work; if not, write to the Free Software Foundation,
      + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
      + *
      + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
      + * or visit www.oracle.com if you need additional information or have any
      + * questions.
      + *
      + */
      +
      +package org.openjdk.bench.vm.gc;
      +
      +import java.lang.reflect.Method;
      +import java.util.*;
      +import java.util.concurrent.TimeUnit;
      +
      +import org.openjdk.jmh.annotations.Benchmark;
      +import org.openjdk.jmh.annotations.BenchmarkMode;
      +import org.openjdk.jmh.annotations.CompilerControl;
      +import org.openjdk.jmh.annotations.Fork;
      +import org.openjdk.jmh.annotations.Level;
      +import org.openjdk.jmh.annotations.Mode;
      +import org.openjdk.jmh.annotations.Measurement;
      +import org.openjdk.jmh.annotations.OutputTimeUnit;
      +import org.openjdk.jmh.annotations.Param;
      +import org.openjdk.jmh.annotations.Scope;
      +import org.openjdk.jmh.annotations.Setup;
      +import org.openjdk.jmh.annotations.State;
      +import org.openjdk.jmh.annotations.Warmup;
      +
      +import org.openjdk.bench.util.InMemoryJavaCompiler;
      +
      +import jdk.test.whitebox.WhiteBox;
      +import jdk.test.whitebox.code.NMethod;
      +
      +/*
      + * Nmethods have OOPs and GC barriers emmedded into their code.
      + * GCs patch them which causes invalidation of nmethods' code.
      + *
      + * This benchmark can be used to estimate the cost of patching
      + * OOPs and GC barriers.
      + *
      + * We create 5000 nmethods which access fields of a class.
      + * We measure the time of different GC cycles to see
      + * the impact of patching nmethods.
      + *
      + * The benchmark parameters are method count and accessed field count.
      + */
      +
      +@BenchmarkMode(Mode.SingleShotTime)
      +@OutputTimeUnit(TimeUnit.MILLISECONDS)
      +@State(Scope.Benchmark)
      +@Fork(value = 1, jvmArgsAppend = {
      +    "-XX:+UnlockDiagnosticVMOptions",
      +    "-XX:+UnlockExperimentalVMOptions",
      +    "-XX:+WhiteBoxAPI",
      +    "-Xbootclasspath/a:lib-test/wb.jar",
      +    "-XX:-UseCodeCacheFlushing"
      +})
      +@Warmup(iterations = 5)
      +@Measurement(iterations = 5)
      +public class GCPatchingNmethodCost {
      +
      +    private static final int COMP_LEVEL = 1;
      +    private static final String FIELD_USER = "FieldUser";
      +
      +    public static Fields fields;
      +
      +    private static TestMethod[] methods = {};
      +    private static byte[] BYTE_CODE;
      +    private static WhiteBox WB;
      +
      +    @Param({"5000"})
      +    public int methodCount;
      +
      +    @Param({"0", "2", "4", "8"})
      +    public int accessedFieldCount;
      +
      +    public static class Fields {
      +        public String f1;
      +        public String f2;
      +        public String f3;
      +        public String f4;
      +        public String f5;
      +        public String f6;
      +        public String f7;
      +        public String f8;
      +        public String f9;
      +    }
      +
      +    private static final class TestMethod {
      +        private final Method method;
      +
      +        public TestMethod(Method method) throws Exception {
      +            this.method = method;
      +            WB.testSetDontInlineMethod(method, true);
      +        }
      +
      +        public void profile() throws Exception {
      +            method.invoke(null);
      +            WB.markMethodProfiled(method);
      +        }
      +
      +        public void invoke() throws Exception {
      +            method.invoke(null);
      +        }
      +
      +        public void compile() throws Exception {
      +            WB.enqueueMethodForCompilation(method, COMP_LEVEL);
      +            while (WB.isMethodQueuedForCompilation(method)) {
      +                Thread.onSpinWait();
      +            }
      +            if (WB.getMethodCompilationLevel(method) != COMP_LEVEL) {
      +                throw new IllegalStateException("Method " + method + " is not compiled at the compilation level: " + COMP_LEVEL + ". Got: " + WB.getMethodCompilationLevel(method));
      +            }
      +        }
      +
      +        public NMethod getNMethod() {
      +            return NMethod.get(method, false);
      +        }
      +    }
      +
      +    private static ClassLoader createClassLoader() {
      +        return new ClassLoader() {
      +            @Override
      +            public Class loadClass(String name) throws ClassNotFoundException {
      +                if (!name.equals(FIELD_USER)) {
      +                    return super.loadClass(name);
      +                }
      +
      +                return defineClass(name, BYTE_CODE, 0, BYTE_CODE.length);
      +            }
      +        };
      +    }
      +
      +    private static void createTestMethods(int accessedFieldCount, int count) throws Exception {
      +        String javaCode = "public class " + FIELD_USER + " {";
      +        String field = GCPatchingNmethodCost.class.getName() + ".fields.f";
      +        javaCode += "public static void accessFields() {";
      +        for (int i = 1; i <= accessedFieldCount; i++) {
      +            javaCode += field + i + "= " + field + i + " + " + i + ";";
      +        }
      +        javaCode += "}}";
      +
      +        BYTE_CODE = InMemoryJavaCompiler.compile(FIELD_USER, javaCode);
      +
      +        fields = new Fields();
      +
      +        methods = new TestMethod[count];
      +        for (int i = 0; i < count; i++) {
      +            var cl = createClassLoader().loadClass(FIELD_USER);
      +            Method method = cl.getMethod("accessFields");
      +            methods[i] = new TestMethod(method);
      +            methods[i].profile();
      +            methods[i].compile();
      +        }
      +    }
      +
      +    private static void initWhiteBox() {
      +        WB = WhiteBox.getWhiteBox();
      +    }
      +
      +    @Setup(Level.Trial)
      +    public void setupCodeCache() throws Exception {
      +        initWhiteBox();
      +        createTestMethods(accessedFieldCount, methodCount);
      +        System.gc();
      +    }
      +
      +    @Setup(Level.Iteration)
      +    public void setupIteration() {
      +        fields = new Fields();
      +    }
      +
      +    @Benchmark
      +    public void youngGC() throws Exception {
      +        fields = null;
      +        WB.youngGC();
      +    }
      +
      +    @Benchmark
      +    public void fullGC() throws Exception {
      +        fields = null;
      +        WB.fullGC();
      +    }
      +
      +    @Benchmark
      +    public void systemGC() throws Exception {
      +        fields = null;
      +        System.gc();
      +    }
      +}
      diff --git a/test/setup_aot/HelloWorld.java b/test/setup_aot/HelloWorld.java
      new file mode 100644
      index 00000000000..e243dfb4e8e
      --- /dev/null
      +++ b/test/setup_aot/HelloWorld.java
      @@ -0,0 +1,29 @@
      +/*
      + * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
      + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      + *
      + * This code is free software; you can redistribute it and/or modify it
      + * under the terms of the GNU General Public License version 2 only, as
      + * published by the Free Software Foundation.
      + *
      + * This code is distributed in the hope that it will be useful, but WITHOUT
      + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
      + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
      + * version 2 for more details (a copy is included in the LICENSE file that
      + * accompanied this code).
      + *
      + * You should have received a copy of the GNU General Public License version
      + * 2 along with this work; if not, write to the Free Software Foundation,
      + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
      + *
      + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
      + * or visit www.oracle.com if you need additional information or have any
      + * questions.
      + *
      + */
      +
      +public class HelloWorld {
      +    public static void main(String args[]) {
      +        System.out.println("HelloWorld");
      +    }
      +}