mirror of
https://github.com/openjdk/jdk.git
synced 2026-01-28 12:09:14 +00:00
Compare commits
73 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f4ddafcd70 | ||
|
|
9f78c71f88 | ||
|
|
d011d7c7cc | ||
|
|
ffc6d1b74b | ||
|
|
90d80f99bb | ||
|
|
f3bdee89ed | ||
|
|
283da4ddcb | ||
|
|
ee30afae74 | ||
|
|
aae9f9269a | ||
|
|
da47f7a27c | ||
|
|
1bf35d7bd0 | ||
|
|
a45364a28b | ||
|
|
17351930ac | ||
|
|
58be8702d8 | ||
|
|
2a2b704d9c | ||
|
|
d87c05ca8d | ||
|
|
2da14e26e9 | ||
|
|
31775fd27f | ||
|
|
f2ca96e8d1 | ||
|
|
a90f4b6e57 | ||
|
|
e8f5d2f4f7 | ||
|
|
7317343d47 | ||
|
|
2d267303de | ||
|
|
10d97c5e6e | ||
|
|
e0f1c3a746 | ||
|
|
9ba5d6f8e7 | ||
|
|
09817940f6 | ||
|
|
867f4620db | ||
|
|
08215aed7b | ||
|
|
5964a12adc | ||
|
|
d5140f2a16 | ||
|
|
09f0076ef7 | ||
|
|
d0a3ba9db5 | ||
|
|
93675e6e04 | ||
|
|
32134656df | ||
|
|
ebe8974556 | ||
|
|
3103fa08bb | ||
|
|
25462ba45d | ||
|
|
a07e0771c7 | ||
|
|
46025e45c0 | ||
|
|
d8a1c1d04c | ||
|
|
6950503dcf | ||
|
|
646037dd92 | ||
|
|
535e8bea1a | ||
|
|
3db9a5affe | ||
|
|
cece06f1aa | ||
|
|
8e0d736b13 | ||
|
|
6d9e91f886 | ||
|
|
d5dde3fc90 | ||
|
|
1ec4ff54ae | ||
|
|
2dc6c491bf | ||
|
|
17b6eb45e5 | ||
|
|
a611399a3f | ||
|
|
fb7d25d73f | ||
|
|
d4cf30517e | ||
|
|
509ca63371 | ||
|
|
a2111b0ca6 | ||
|
|
fb8365e75e | ||
|
|
d94b2a1181 | ||
|
|
248519db4a | ||
|
|
b8b4493117 | ||
|
|
c46e6350aa | ||
|
|
d9bc822168 | ||
|
|
85eb6b752e | ||
|
|
b0ad331809 | ||
|
|
7dc8f786fe | ||
|
|
15b5789f55 | ||
|
|
1de6f4f2b6 | ||
|
|
42fc4fe7b1 | ||
|
|
ec6beaa20a | ||
|
|
bf0bc37924 | ||
|
|
22fe70770a | ||
|
|
de686f8a38 |
1
.github/workflows/build-alpine-linux.yml
vendored
1
.github/workflows/build-alpine-linux.yml
vendored
@ -96,6 +96,7 @@ jobs:
|
||||
--with-boot-jdk=${{ steps.bootjdk.outputs.path }}
|
||||
--with-zlib=system
|
||||
--with-jmod-compress=zip-1
|
||||
--with-external-symbols-in-bundles=none
|
||||
${{ inputs.extra-conf-options }} ${{ inputs.configure-arguments }} || (
|
||||
echo "Dumping config.log:" &&
|
||||
cat config.log &&
|
||||
|
||||
1
.github/workflows/build-cross-compile.yml
vendored
1
.github/workflows/build-cross-compile.yml
vendored
@ -179,6 +179,7 @@ jobs:
|
||||
--openjdk-target=${{ matrix.gnu-arch }}-linux-gnu${{ matrix.gnu-abi}}
|
||||
--with-sysroot=sysroot
|
||||
--with-jmod-compress=zip-1
|
||||
--with-external-symbols-in-bundles=none
|
||||
CC=${{ matrix.gnu-arch }}-linux-gnu${{ matrix.gnu-abi}}-gcc-${{ inputs.gcc-major-version }}
|
||||
CXX=${{ matrix.gnu-arch }}-linux-gnu${{ matrix.gnu-abi}}-g++-${{ inputs.gcc-major-version }}
|
||||
${{ inputs.extra-conf-options }} ${{ inputs.configure-arguments }} || (
|
||||
|
||||
1
.github/workflows/build-linux.yml
vendored
1
.github/workflows/build-linux.yml
vendored
@ -143,6 +143,7 @@ jobs:
|
||||
--with-gtest=${{ steps.gtest.outputs.path }}
|
||||
--with-zlib=system
|
||||
--with-jmod-compress=zip-1
|
||||
--with-external-symbols-in-bundles=none
|
||||
${{ inputs.extra-conf-options }} ${{ inputs.configure-arguments }} || (
|
||||
echo "Dumping config.log:" &&
|
||||
cat config.log &&
|
||||
|
||||
1
.github/workflows/build-macos.yml
vendored
1
.github/workflows/build-macos.yml
vendored
@ -110,6 +110,7 @@ jobs:
|
||||
--with-gtest=${{ steps.gtest.outputs.path }}
|
||||
--with-zlib=system
|
||||
--with-jmod-compress=zip-1
|
||||
--with-external-symbols-in-bundles=none
|
||||
${{ inputs.extra-conf-options }} ${{ inputs.configure-arguments }} || (
|
||||
echo "Dumping config.log:" &&
|
||||
cat config.log &&
|
||||
|
||||
1
.github/workflows/build-windows.yml
vendored
1
.github/workflows/build-windows.yml
vendored
@ -134,6 +134,7 @@ jobs:
|
||||
--with-gtest=${{ steps.gtest.outputs.path }}
|
||||
--with-msvc-toolset-version=${{ inputs.msvc-toolset-version }}
|
||||
--with-jmod-compress=zip-1
|
||||
--with-external-symbols-in-bundles=none
|
||||
${{ inputs.extra-conf-options }} ${{ inputs.configure-arguments }} || (
|
||||
echo "Dumping config.log:" &&
|
||||
cat config.log &&
|
||||
|
||||
@ -185,77 +185,30 @@ endif
|
||||
|
||||
ifneq ($(filter product-bundles% legacy-bundles, $(MAKECMDGOALS)), )
|
||||
|
||||
SYMBOLS_EXCLUDE_PATTERN := %.debuginfo %.diz %.map
|
||||
|
||||
# There may be files with spaces in the names, so use ShellFindFiles
|
||||
# explicitly.
|
||||
# There may be files with spaces in the names, so use ShellFindFiles explicitly.
|
||||
ALL_JDK_FILES := $(call ShellFindFiles, $(JDK_IMAGE_DIR))
|
||||
ifneq ($(JDK_IMAGE_DIR), $(JDK_SYMBOLS_IMAGE_DIR))
|
||||
ALL_JDK_SYMBOLS_FILES := $(call ShellFindFiles, $(JDK_SYMBOLS_IMAGE_DIR))
|
||||
else
|
||||
ALL_JDK_SYMBOLS_FILES := $(ALL_JDK_FILES)
|
||||
endif
|
||||
ifneq ($(JDK_IMAGE_DIR), $(JDK_DEMOS_IMAGE_DIR))
|
||||
ALL_JDK_DEMOS_FILES := $(call ShellFindFiles, $(JDK_DEMOS_IMAGE_DIR))
|
||||
else
|
||||
ALL_JDK_DEMOS_FILES := $(ALL_JDK_FILES)
|
||||
endif
|
||||
|
||||
# Create special filter rules when dealing with unzipped .dSYM directories on
|
||||
# macosx
|
||||
ifeq ($(call isTargetOs, macosx), true)
|
||||
ifeq ($(ZIP_EXTERNAL_DEBUG_SYMBOLS), false)
|
||||
JDK_SYMBOLS_EXCLUDE_PATTERN := $(addprefix %, \
|
||||
$(call containing, .dSYM/, $(patsubst $(JDK_IMAGE_DIR)/%, %, \
|
||||
$(ALL_JDK_SYMBOLS_FILES))))
|
||||
endif
|
||||
endif
|
||||
|
||||
# Create special filter rules when dealing with debug symbols on windows
|
||||
ifeq ($(call isTargetOs, windows), true)
|
||||
ifeq ($(SHIP_DEBUG_SYMBOLS), )
|
||||
JDK_SYMBOLS_EXCLUDE_PATTERN := %.pdb
|
||||
endif
|
||||
endif
|
||||
|
||||
JDK_BUNDLE_FILES := \
|
||||
$(filter-out \
|
||||
$(JDK_SYMBOLS_EXCLUDE_PATTERN) \
|
||||
$(JDK_EXTRA_EXCLUDES) \
|
||||
$(SYMBOLS_EXCLUDE_PATTERN) \
|
||||
$(JDK_IMAGE_HOMEDIR)/demo/% \
|
||||
, \
|
||||
$(ALL_JDK_FILES) \
|
||||
)
|
||||
|
||||
JDK_SYMBOLS_BUNDLE_FILES := \
|
||||
$(call FindFiles, $(SYMBOLS_IMAGE_DIR))
|
||||
JDK_SYMBOLS_BUNDLE_FILES := $(call FindFiles, $(SYMBOLS_IMAGE_DIR))
|
||||
|
||||
TEST_DEMOS_BUNDLE_FILES := $(filter $(JDK_DEMOS_IMAGE_HOMEDIR)/demo/%, \
|
||||
$(ALL_JDK_DEMOS_FILES))
|
||||
|
||||
ALL_JRE_FILES := $(call ShellFindFiles, $(JRE_IMAGE_DIR))
|
||||
|
||||
# Create special filter rules when dealing with unzipped .dSYM directories on
|
||||
# macosx
|
||||
ifeq ($(OPENJDK_TARGET_OS), macosx)
|
||||
ifeq ($(ZIP_EXTERNAL_DEBUG_SYMBOLS), false)
|
||||
JRE_SYMBOLS_EXCLUDE_PATTERN := $(addprefix %, \
|
||||
$(call containing, .dSYM/, $(patsubst $(JRE_IMAGE_DIR)/%, %, $(ALL_JRE_FILES))))
|
||||
endif
|
||||
endif
|
||||
|
||||
# Create special filter rules when dealing with debug symbols on windows
|
||||
ifeq ($(call isTargetOs, windows), true)
|
||||
ifeq ($(SHIP_DEBUG_SYMBOLS), )
|
||||
JRE_SYMBOLS_EXCLUDE_PATTERN := %.pdb
|
||||
endif
|
||||
endif
|
||||
|
||||
JRE_BUNDLE_FILES := $(filter-out \
|
||||
$(JRE_SYMBOLS_EXCLUDE_PATTERN) \
|
||||
$(SYMBOLS_EXCLUDE_PATTERN), \
|
||||
$(ALL_JRE_FILES))
|
||||
JRE_BUNDLE_FILES := $(ALL_JRE_FILES)
|
||||
|
||||
ifeq ($(MACOSX_CODESIGN_MODE), hardened)
|
||||
# Macosx release build and code signing available.
|
||||
|
||||
@ -218,10 +218,14 @@ ifeq ($(call isTargetOs, windows), true)
|
||||
ifeq ($(SHIP_DEBUG_SYMBOLS), )
|
||||
JMOD_FLAGS += --exclude '**{_the.*,_*.marker*,*.diz,*.pdb,*.map}'
|
||||
else
|
||||
JMOD_FLAGS += --exclude '**{_the.*,_*.marker*,*.diz,*.map}'
|
||||
JMOD_FLAGS += --exclude '**{_the.*,_*.marker*,*.map}'
|
||||
endif
|
||||
else
|
||||
JMOD_FLAGS += --exclude '**{_the.*,_*.marker*,*.diz,*.debuginfo,*.dSYM/**,*.dSYM}'
|
||||
ifeq ($(SHIP_DEBUG_SYMBOLS), )
|
||||
JMOD_FLAGS += --exclude '**{_the.*,_*.marker*,*.diz,*.debuginfo,*.dSYM/**,*.dSYM}'
|
||||
else
|
||||
JMOD_FLAGS += --exclude '**{_the.*,_*.marker*}'
|
||||
endif
|
||||
endif
|
||||
|
||||
# Unless we are creating a very large module, use the small tool JVM options
|
||||
|
||||
@ -93,16 +93,19 @@ JAVADOC_DISABLED_DOCLINT_WARNINGS := missing
|
||||
JAVADOC_DISABLED_DOCLINT_PACKAGES := org.w3c.* javax.smartcardio
|
||||
|
||||
# The initial set of options for javadoc
|
||||
# -XDaccessInternalAPI is a temporary workaround, see 8373909
|
||||
JAVADOC_OPTIONS := -use -keywords -notimestamp \
|
||||
-serialwarn -encoding utf-8 -docencoding utf-8 -breakiterator \
|
||||
-splitIndex --system none -javafx --expand-requires transitive \
|
||||
--override-methods=summary
|
||||
--override-methods=summary \
|
||||
-XDaccessInternalAPI
|
||||
|
||||
# The reference options must stay stable to allow for comparisons across the
|
||||
# development cycle.
|
||||
REFERENCE_OPTIONS := -XDignore.symbol.file=true -use -keywords -notimestamp \
|
||||
-serialwarn -encoding utf-8 -breakiterator -splitIndex --system none \
|
||||
-html5 -javafx --expand-requires transitive
|
||||
-html5 -javafx --expand-requires transitive \
|
||||
-XDaccessInternalAPI
|
||||
|
||||
# Should we add DRAFT stamps to the generated javadoc?
|
||||
ifeq ($(VERSION_IS_GA), true)
|
||||
|
||||
@ -316,23 +316,36 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_DEBUG_SYMBOLS],
|
||||
AC_MSG_CHECKING([if we should add external native debug symbols to the shipped bundles])
|
||||
AC_ARG_WITH([external-symbols-in-bundles],
|
||||
[AS_HELP_STRING([--with-external-symbols-in-bundles],
|
||||
[which type of external native debug symbol information shall be shipped in product bundles (none, public, full)
|
||||
(e.g. ship full/stripped pdbs on Windows) @<:@none@:>@])])
|
||||
[which type of external native debug symbol information shall be shipped with bundles/images (none, public, full).
|
||||
@<:@none in release builds, full otherwise. --with-native-debug-symbols=external/zipped is a prerequisite. public is only supported on Windows@:>@])],
|
||||
[],
|
||||
[with_external_symbols_in_bundles=default])
|
||||
|
||||
if test "x$with_external_symbols_in_bundles" = x || test "x$with_external_symbols_in_bundles" = xnone ; then
|
||||
AC_MSG_RESULT([no])
|
||||
elif test "x$with_external_symbols_in_bundles" = xfull || test "x$with_external_symbols_in_bundles" = xpublic ; then
|
||||
if test "x$OPENJDK_TARGET_OS" != xwindows ; then
|
||||
AC_MSG_ERROR([--with-external-symbols-in-bundles currently only works on windows!])
|
||||
elif test "x$COPY_DEBUG_SYMBOLS" != xtrue ; then
|
||||
AC_MSG_ERROR([--with-external-symbols-in-bundles only works when --with-native-debug-symbols=external is used!])
|
||||
elif test "x$with_external_symbols_in_bundles" = xfull ; then
|
||||
if test "x$COPY_DEBUG_SYMBOLS" != xtrue ; then
|
||||
AC_MSG_ERROR([--with-external-symbols-in-bundles only works when --with-native-debug-symbols=external/zipped is used!])
|
||||
elif test "x$with_external_symbols_in_bundles" = xpublic && test "x$OPENJDK_TARGET_OS" != xwindows ; then
|
||||
AC_MSG_ERROR([--with-external-symbols-in-bundles=public is only supported on Windows!])
|
||||
fi
|
||||
|
||||
if test "x$with_external_symbols_in_bundles" = xfull ; then
|
||||
AC_MSG_RESULT([full])
|
||||
SHIP_DEBUG_SYMBOLS=full
|
||||
else
|
||||
AC_MSG_RESULT([public])
|
||||
SHIP_DEBUG_SYMBOLS=public
|
||||
fi
|
||||
elif test "x$with_external_symbols_in_bundles" = xdefault ; then
|
||||
if test "x$DEBUG_LEVEL" = xrelease ; then
|
||||
AC_MSG_RESULT([no (default)])
|
||||
elif test "x$COPY_DEBUG_SYMBOLS" = xtrue ; then
|
||||
AC_MSG_RESULT([full (default)])
|
||||
SHIP_DEBUG_SYMBOLS=full
|
||||
else
|
||||
AC_MSG_RESULT([no (default, native debug symbols are not external/zipped)])
|
||||
fi
|
||||
else
|
||||
AC_MSG_ERROR([$with_external_symbols_in_bundles is an unknown value for --with-external-symbols-in-bundles])
|
||||
fi
|
||||
|
||||
@ -114,7 +114,7 @@ EscapeDollar = $(subst $$,\$$,$(subst \$$,$$,$(strip $1)))
|
||||
|
||||
################################################################################
|
||||
# This macro works just like EscapeDollar above, but for #.
|
||||
EscapeHash = $(subst \#,\\\#,$(subst \\\#,\#,$(strip $1)))
|
||||
EscapeHash = $(subst $(HASH),\$(HASH),$(subst \$(HASH),$(HASH),$(strip $1)))
|
||||
|
||||
################################################################################
|
||||
# This macro translates $ into $$ to protect the string from make itself.
|
||||
|
||||
@ -234,6 +234,9 @@ define SetupLinkerFlags
|
||||
ifeq ($(call isTargetOs, macosx), true)
|
||||
$1_EXTRA_LDFLAGS += -Wl,-object_path_lto,$$($1_OBJECT_DIR)/$$($1_NAME)_lto_helper.o
|
||||
endif
|
||||
ifeq ($(TOOLCHAIN_TYPE), microsoft)
|
||||
$1_EXTRA_LDFLAGS += -LTCGOUT:$$($1_OBJECT_DIR)/$$($1_NAME).iobj
|
||||
endif
|
||||
endif
|
||||
|
||||
$1_EXTRA_LDFLAGS += $$($1_LDFLAGS_$(OPENJDK_TARGET_OS_TYPE)) $$($1_LDFLAGS_$(OPENJDK_TARGET_OS)) \
|
||||
|
||||
@ -31,10 +31,9 @@ import java.util.Set;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
import java.lang.reflect.Field;
|
||||
|
||||
import javax.lang.model.element.Element;
|
||||
import javax.lang.model.element.PackageElement;
|
||||
import javax.lang.model.element.TypeElement;
|
||||
|
||||
import com.sun.source.doctree.DocTree;
|
||||
import com.sun.source.doctree.LiteralTree;
|
||||
@ -160,9 +159,10 @@ public class JSpec implements Taglet {
|
||||
if (m.find()) {
|
||||
String chapter = m.group("chapter");
|
||||
String section = m.group("section");
|
||||
String rootParent = currentPath().replaceAll("[^/]+", "..");
|
||||
|
||||
String url = String.format("%1$s/../specs/%2$s/%2$s-%3$s.html#%2$s-%3$s%4$s",
|
||||
docRoot(elem), idPrefix, chapter, section);
|
||||
String url = String.format("%1$s/specs/%2$s/%2$s-%3$s.html#%2$s-%3$s%4$s",
|
||||
rootParent, idPrefix, chapter, section);
|
||||
|
||||
sb.append("<a href=\"")
|
||||
.append(url)
|
||||
@ -183,6 +183,22 @@ public class JSpec implements Taglet {
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
private static ThreadLocal<String> CURRENT_PATH = null;
|
||||
|
||||
private String currentPath() {
|
||||
if (CURRENT_PATH == null) {
|
||||
try {
|
||||
Field f = Class.forName("jdk.javadoc.internal.doclets.formats.html.HtmlDocletWriter")
|
||||
.getField("CURRENT_PATH");
|
||||
@SuppressWarnings("unchecked")
|
||||
ThreadLocal<String> tl = (ThreadLocal<String>) f.get(null);
|
||||
CURRENT_PATH = tl;
|
||||
} catch (ReflectiveOperationException e) {
|
||||
throw new RuntimeException("Cannot determine current path", e);
|
||||
}
|
||||
}
|
||||
return CURRENT_PATH.get();
|
||||
}
|
||||
|
||||
private String expand(List<? extends DocTree> trees) {
|
||||
return (new SimpleDocTreeVisitor<StringBuilder, StringBuilder>() {
|
||||
@ -209,34 +225,4 @@ public class JSpec implements Taglet {
|
||||
}).visit(trees, new StringBuilder()).toString();
|
||||
}
|
||||
|
||||
private String docRoot(Element elem) {
|
||||
switch (elem.getKind()) {
|
||||
case MODULE:
|
||||
return "..";
|
||||
|
||||
case PACKAGE:
|
||||
PackageElement pe = (PackageElement)elem;
|
||||
String pkgPart = pe.getQualifiedName()
|
||||
.toString()
|
||||
.replace('.', '/')
|
||||
.replaceAll("[^/]+", "..");
|
||||
return pe.getEnclosingElement() != null
|
||||
? "../" + pkgPart
|
||||
: pkgPart;
|
||||
|
||||
case CLASS, ENUM, RECORD, INTERFACE, ANNOTATION_TYPE:
|
||||
TypeElement te = (TypeElement)elem;
|
||||
return te.getQualifiedName()
|
||||
.toString()
|
||||
.replace('.', '/')
|
||||
.replaceAll("[^/]+", "..");
|
||||
|
||||
default:
|
||||
var enclosing = elem.getEnclosingElement();
|
||||
if (enclosing == null)
|
||||
throw new IllegalArgumentException(elem.getKind().toString());
|
||||
return docRoot(enclosing);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@ -31,10 +31,9 @@ import java.util.Set;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
import java.lang.reflect.Field;
|
||||
|
||||
import javax.lang.model.element.Element;
|
||||
import javax.lang.model.element.PackageElement;
|
||||
import javax.lang.model.element.TypeElement;
|
||||
|
||||
import com.sun.source.doctree.DocTree;
|
||||
import com.sun.source.doctree.UnknownBlockTagTree;
|
||||
@ -68,7 +67,7 @@ public class ToolGuide implements Taglet {
|
||||
|
||||
static final String TAG_NAME = "toolGuide";
|
||||
|
||||
static final String BASE_URL = "../specs/man";
|
||||
static final String BASE_URL = "specs/man";
|
||||
|
||||
static final Pattern TAG_PATTERN = Pattern.compile("(?s)(?<name>[A-Za-z0-9]+)\\s*(?<label>.*)$");
|
||||
|
||||
@ -119,9 +118,10 @@ public class ToolGuide implements Taglet {
|
||||
if (label.isEmpty()) {
|
||||
label = name;
|
||||
}
|
||||
String rootParent = currentPath().replaceAll("[^/]+", "..");
|
||||
|
||||
String url = String.format("%s/%s/%s.html",
|
||||
docRoot(elem), BASE_URL, name);
|
||||
rootParent, BASE_URL, name);
|
||||
|
||||
if (needComma) {
|
||||
sb.append(",\n");
|
||||
@ -142,33 +142,21 @@ public class ToolGuide implements Taglet {
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
private String docRoot(Element elem) {
|
||||
switch (elem.getKind()) {
|
||||
case MODULE:
|
||||
return "..";
|
||||
private static ThreadLocal<String> CURRENT_PATH = null;
|
||||
|
||||
case PACKAGE:
|
||||
PackageElement pe = (PackageElement)elem;
|
||||
String pkgPart = pe.getQualifiedName()
|
||||
.toString()
|
||||
.replace('.', '/')
|
||||
.replaceAll("[^/]+", "..");
|
||||
return pe.getEnclosingElement() != null
|
||||
? "../" + pkgPart
|
||||
: pkgPart;
|
||||
|
||||
case CLASS, ENUM, RECORD, INTERFACE, ANNOTATION_TYPE:
|
||||
TypeElement te = (TypeElement)elem;
|
||||
return te.getQualifiedName()
|
||||
.toString()
|
||||
.replace('.', '/')
|
||||
.replaceAll("[^/]+", "..");
|
||||
|
||||
default:
|
||||
var enclosing = elem.getEnclosingElement();
|
||||
if (enclosing == null)
|
||||
throw new IllegalArgumentException(elem.getKind().toString());
|
||||
return docRoot(enclosing);
|
||||
private String currentPath() {
|
||||
if (CURRENT_PATH == null) {
|
||||
try {
|
||||
Field f = Class.forName("jdk.javadoc.internal.doclets.formats.html.HtmlDocletWriter")
|
||||
.getField("CURRENT_PATH");
|
||||
@SuppressWarnings("unchecked")
|
||||
ThreadLocal<String> tl = (ThreadLocal<String>) f.get(null);
|
||||
CURRENT_PATH = tl;
|
||||
} catch (ReflectiveOperationException e) {
|
||||
throw new RuntimeException("Cannot determine current path", e);
|
||||
}
|
||||
}
|
||||
return CURRENT_PATH.get();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@ -346,8 +346,14 @@ source %{
|
||||
}
|
||||
|
||||
bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) {
|
||||
// Only SVE has partial vector operations
|
||||
if (UseSVE == 0) {
|
||||
// 1. Only SVE requires partial vector operations.
|
||||
// 2. The vector size in bytes must be smaller than MaxVectorSize.
|
||||
// 3. Predicated vectors have a mask input, which guarantees that
|
||||
// out-of-bounds lanes remain inactive.
|
||||
int length_in_bytes = vt->length_in_bytes();
|
||||
if (UseSVE == 0 ||
|
||||
length_in_bytes == MaxVectorSize ||
|
||||
node->is_predicated_vector()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -370,21 +376,22 @@ source %{
|
||||
return !node->in(1)->is_Con();
|
||||
case Op_LoadVector:
|
||||
case Op_StoreVector:
|
||||
// We use NEON load/store instructions if the vector length is <= 128 bits.
|
||||
return vt->length_in_bytes() > 16;
|
||||
case Op_AddReductionVI:
|
||||
case Op_AddReductionVL:
|
||||
// We may prefer using NEON instructions rather than SVE partial operations.
|
||||
return !VM_Version::use_neon_for_vector(vt->length_in_bytes());
|
||||
// For these ops, we prefer using NEON instructions rather than SVE
|
||||
// predicated instructions for better performance.
|
||||
return !VM_Version::use_neon_for_vector(length_in_bytes);
|
||||
case Op_MinReductionV:
|
||||
case Op_MaxReductionV:
|
||||
// For BYTE/SHORT/INT/FLOAT/DOUBLE types, we may prefer using NEON
|
||||
// instructions rather than SVE partial operations.
|
||||
// For BYTE/SHORT/INT/FLOAT/DOUBLE types, we prefer using NEON
|
||||
// instructions rather than SVE predicated instructions for
|
||||
// better performance.
|
||||
return vt->element_basic_type() == T_LONG ||
|
||||
!VM_Version::use_neon_for_vector(vt->length_in_bytes());
|
||||
!VM_Version::use_neon_for_vector(length_in_bytes);
|
||||
default:
|
||||
// For other ops whose vector size is smaller than the max vector size, a
|
||||
// full-sized unpredicated operation does not impact the final vector result.
|
||||
// For other ops whose vector size is smaller than the max vector
|
||||
// size, a full-sized unpredicated operation does not impact the
|
||||
// vector result.
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@ -336,8 +336,14 @@ source %{
|
||||
}
|
||||
|
||||
bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) {
|
||||
// Only SVE has partial vector operations
|
||||
if (UseSVE == 0) {
|
||||
// 1. Only SVE requires partial vector operations.
|
||||
// 2. The vector size in bytes must be smaller than MaxVectorSize.
|
||||
// 3. Predicated vectors have a mask input, which guarantees that
|
||||
// out-of-bounds lanes remain inactive.
|
||||
int length_in_bytes = vt->length_in_bytes();
|
||||
if (UseSVE == 0 ||
|
||||
length_in_bytes == MaxVectorSize ||
|
||||
node->is_predicated_vector()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -360,21 +366,22 @@ source %{
|
||||
return !node->in(1)->is_Con();
|
||||
case Op_LoadVector:
|
||||
case Op_StoreVector:
|
||||
// We use NEON load/store instructions if the vector length is <= 128 bits.
|
||||
return vt->length_in_bytes() > 16;
|
||||
case Op_AddReductionVI:
|
||||
case Op_AddReductionVL:
|
||||
// We may prefer using NEON instructions rather than SVE partial operations.
|
||||
return !VM_Version::use_neon_for_vector(vt->length_in_bytes());
|
||||
// For these ops, we prefer using NEON instructions rather than SVE
|
||||
// predicated instructions for better performance.
|
||||
return !VM_Version::use_neon_for_vector(length_in_bytes);
|
||||
case Op_MinReductionV:
|
||||
case Op_MaxReductionV:
|
||||
// For BYTE/SHORT/INT/FLOAT/DOUBLE types, we may prefer using NEON
|
||||
// instructions rather than SVE partial operations.
|
||||
// For BYTE/SHORT/INT/FLOAT/DOUBLE types, we prefer using NEON
|
||||
// instructions rather than SVE predicated instructions for
|
||||
// better performance.
|
||||
return vt->element_basic_type() == T_LONG ||
|
||||
!VM_Version::use_neon_for_vector(vt->length_in_bytes());
|
||||
!VM_Version::use_neon_for_vector(length_in_bytes);
|
||||
default:
|
||||
// For other ops whose vector size is smaller than the max vector size, a
|
||||
// full-sized unpredicated operation does not impact the final vector result.
|
||||
// For other ops whose vector size is smaller than the max vector
|
||||
// size, a full-sized unpredicated operation does not impact the
|
||||
// vector result.
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@ -310,7 +310,18 @@ static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registe
|
||||
__ add(sp, sp, 32 * wordSize);
|
||||
}
|
||||
|
||||
#ifdef R18_RESERVED
|
||||
/*
|
||||
Do not modify r18_tls when restoring registers if it is a reserved register. On Windows,
|
||||
for example, r18_tls is used to store the pointer to the current thread's TEB (where TLS
|
||||
variables are stored). Therefore, modifying r18_tls would corrupt the TEB pointer.
|
||||
*/
|
||||
__ pop(RegSet::range(r0, r17), sp);
|
||||
__ ldp(zr, r19, Address(__ post(sp, 2 * wordSize)));
|
||||
__ pop(RegSet::range(r20, r29), sp);
|
||||
#else
|
||||
__ pop(RegSet::range(r0, r29), sp);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void restore_live_registers_except_r0(StubAssembler* sasm, bool restore_fpu_registers = true) {
|
||||
@ -323,8 +334,20 @@ static void restore_live_registers_except_r0(StubAssembler* sasm, bool restore_f
|
||||
__ add(sp, sp, 32 * wordSize);
|
||||
}
|
||||
|
||||
#ifdef R18_RESERVED
|
||||
/*
|
||||
Do not modify r18_tls when restoring registers if it is a reserved register. On Windows,
|
||||
for example, r18_tls is used to store the pointer to the current thread's TEB (where TLS
|
||||
variables are stored). Therefore, modifying r18_tls would corrupt the TEB pointer.
|
||||
*/
|
||||
__ ldp(zr, r1, Address(__ post(sp, 2 * wordSize)));
|
||||
__ pop(RegSet::range(r2, r17), sp);
|
||||
__ ldp(zr, r19, Address(__ post(sp, 2 * wordSize)));
|
||||
__ pop(RegSet::range(r20, r29), sp);
|
||||
#else
|
||||
__ ldp(zr, r1, Address(__ post(sp, 16)));
|
||||
__ pop(RegSet::range(r2, r29), sp);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
||||
@ -5379,7 +5379,6 @@ void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
|
||||
assert (UseCompressedClassPointers, "should only be used for compressed headers");
|
||||
assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
|
||||
int index = oop_recorder()->find_index(k);
|
||||
assert(! Universe::heap()->is_in(k), "should not be an oop");
|
||||
|
||||
InstructionMark im(this);
|
||||
RelocationHolder rspec = metadata_Relocation::spec(index);
|
||||
@ -6260,14 +6259,10 @@ void MacroAssembler::fill_words(Register base, Register cnt, Register value)
|
||||
|
||||
// Intrinsic for
|
||||
//
|
||||
// - sun.nio.cs.ISO_8859_1.Encoder#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
|
||||
// Encodes char[] to byte[] in ISO-8859-1
|
||||
//
|
||||
// - java.lang.StringCoding#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
|
||||
// Encodes byte[] (containing UTF-16) to byte[] in ISO-8859-1
|
||||
//
|
||||
// - java.lang.StringCoding#encodeAsciiArray0(char[] sa, int sp, byte[] da, int dp, int len)
|
||||
// Encodes char[] to byte[] in ASCII
|
||||
// - sun/nio/cs/ISO_8859_1$Encoder.implEncodeISOArray
|
||||
// return the number of characters copied.
|
||||
// - java/lang/StringUTF16.compress
|
||||
// return index of non-latin1 character if copy fails, otherwise 'len'.
|
||||
//
|
||||
// This version always returns the number of characters copied, and does not
|
||||
// clobber the 'len' register. A successful copy will complete with the post-
|
||||
|
||||
@ -6335,8 +6335,36 @@ instruct loadConD_Ex(regD dst, immD src) %{
|
||||
// Prefetch instructions.
|
||||
// Must be safe to execute with invalid address (cannot fault).
|
||||
|
||||
// Special prefetch versions which use the dcbz instruction.
|
||||
instruct prefetch_alloc_zero(indirectMemory mem, iRegLsrc src) %{
|
||||
match(PrefetchAllocation (AddP mem src));
|
||||
predicate(AllocatePrefetchStyle == 3);
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
format %{ "PREFETCH $mem, 2, $src \t// Prefetch write-many with zero" %}
|
||||
size(4);
|
||||
ins_encode %{
|
||||
__ dcbz($src$$Register, $mem$$base$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_memory);
|
||||
%}
|
||||
|
||||
instruct prefetch_alloc_zero_no_offset(indirectMemory mem) %{
|
||||
match(PrefetchAllocation mem);
|
||||
predicate(AllocatePrefetchStyle == 3);
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
format %{ "PREFETCH $mem, 2 \t// Prefetch write-many with zero" %}
|
||||
size(4);
|
||||
ins_encode %{
|
||||
__ dcbz($mem$$base$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_memory);
|
||||
%}
|
||||
|
||||
instruct prefetch_alloc(indirectMemory mem, iRegLsrc src) %{
|
||||
match(PrefetchAllocation (AddP mem src));
|
||||
predicate(AllocatePrefetchStyle != 3);
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
format %{ "PREFETCH $mem, 2, $src \t// Prefetch write-many" %}
|
||||
@ -6349,6 +6377,7 @@ instruct prefetch_alloc(indirectMemory mem, iRegLsrc src) %{
|
||||
|
||||
instruct prefetch_alloc_no_offset(indirectMemory mem) %{
|
||||
match(PrefetchAllocation mem);
|
||||
predicate(AllocatePrefetchStyle != 3);
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
format %{ "PREFETCH $mem, 2 \t// Prefetch write-many" %}
|
||||
|
||||
@ -2736,14 +2736,10 @@ void C2_MacroAssembler::char_array_compress_v(Register src, Register dst, Regist
|
||||
|
||||
// Intrinsic for
|
||||
//
|
||||
// - sun.nio.cs.ISO_8859_1.Encoder#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
|
||||
// Encodes char[] to byte[] in ISO-8859-1
|
||||
//
|
||||
// - java.lang.StringCoding#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
|
||||
// Encodes byte[] (containing UTF-16) to byte[] in ISO-8859-1
|
||||
//
|
||||
// - java.lang.StringCoding#encodeAsciiArray0(char[] sa, int sp, byte[] da, int dp, int len)
|
||||
// Encodes char[] to byte[] in ASCII
|
||||
// - sun/nio/cs/ISO_8859_1$Encoder.implEncodeISOArray
|
||||
// return the number of characters copied.
|
||||
// - java/lang/StringUTF16.compress
|
||||
// return index of non-latin1 character if copy fails, otherwise 'len'.
|
||||
//
|
||||
// This version always returns the number of characters copied. A successful
|
||||
// copy will complete with the post-condition: 'res' == 'len', while an
|
||||
|
||||
@ -4933,7 +4933,6 @@ void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
|
||||
assert (UseCompressedClassPointers, "should only be used for compressed headers");
|
||||
assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
|
||||
int index = oop_recorder()->find_index(k);
|
||||
assert(!Universe::heap()->is_in(k), "should not be an oop");
|
||||
|
||||
narrowKlass nk = CompressedKlassPointers::encode(k);
|
||||
relocate(metadata_Relocation::spec(index), [&] {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -5889,7 +5889,7 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
|
||||
vpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit);
|
||||
|
||||
subptr(count, 16 << shift);
|
||||
jccb(Assembler::less, L_check_fill_32_bytes);
|
||||
jcc(Assembler::less, L_check_fill_32_bytes);
|
||||
align(16);
|
||||
|
||||
BIND(L_fill_64_bytes_loop_avx3);
|
||||
@ -6054,46 +6054,32 @@ void MacroAssembler::evpbroadcast(BasicType type, XMMRegister dst, Register src,
|
||||
}
|
||||
}
|
||||
|
||||
// Encode given char[]/byte[] to byte[] in ISO_8859_1 or ASCII
|
||||
//
|
||||
// @IntrinsicCandidate
|
||||
// int sun.nio.cs.ISO_8859_1.Encoder#encodeISOArray0(
|
||||
// char[] sa, int sp, byte[] da, int dp, int len) {
|
||||
// int i = 0;
|
||||
// for (; i < len; i++) {
|
||||
// char c = sa[sp++];
|
||||
// if (c > '\u00FF')
|
||||
// break;
|
||||
// da[dp++] = (byte) c;
|
||||
// }
|
||||
// return i;
|
||||
// }
|
||||
//
|
||||
// @IntrinsicCandidate
|
||||
// int java.lang.StringCoding.encodeISOArray0(
|
||||
// byte[] sa, int sp, byte[] da, int dp, int len) {
|
||||
// int i = 0;
|
||||
// for (; i < len; i++) {
|
||||
// char c = StringUTF16.getChar(sa, sp++);
|
||||
// if (c > '\u00FF')
|
||||
// break;
|
||||
// da[dp++] = (byte) c;
|
||||
// }
|
||||
// return i;
|
||||
// }
|
||||
//
|
||||
// @IntrinsicCandidate
|
||||
// int java.lang.StringCoding.encodeAsciiArray0(
|
||||
// char[] sa, int sp, byte[] da, int dp, int len) {
|
||||
// int i = 0;
|
||||
// for (; i < len; i++) {
|
||||
// char c = sa[sp++];
|
||||
// if (c >= '\u0080')
|
||||
// break;
|
||||
// da[dp++] = (byte) c;
|
||||
// }
|
||||
// return i;
|
||||
// }
|
||||
// encode char[] to byte[] in ISO_8859_1 or ASCII
|
||||
//@IntrinsicCandidate
|
||||
//private static int implEncodeISOArray(byte[] sa, int sp,
|
||||
//byte[] da, int dp, int len) {
|
||||
// int i = 0;
|
||||
// for (; i < len; i++) {
|
||||
// char c = StringUTF16.getChar(sa, sp++);
|
||||
// if (c > '\u00FF')
|
||||
// break;
|
||||
// da[dp++] = (byte)c;
|
||||
// }
|
||||
// return i;
|
||||
//}
|
||||
//
|
||||
//@IntrinsicCandidate
|
||||
//private static int implEncodeAsciiArray(char[] sa, int sp,
|
||||
// byte[] da, int dp, int len) {
|
||||
// int i = 0;
|
||||
// for (; i < len; i++) {
|
||||
// char c = sa[sp++];
|
||||
// if (c >= '\u0080')
|
||||
// break;
|
||||
// da[dp++] = (byte)c;
|
||||
// }
|
||||
// return i;
|
||||
//}
|
||||
void MacroAssembler::encode_iso_array(Register src, Register dst, Register len,
|
||||
XMMRegister tmp1Reg, XMMRegister tmp2Reg,
|
||||
XMMRegister tmp3Reg, XMMRegister tmp4Reg,
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
//
|
||||
// Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
@ -2633,17 +2633,19 @@ bool Matcher::supports_vector_calling_convention(void) {
|
||||
return EnableVectorSupport;
|
||||
}
|
||||
|
||||
static bool is_ndd_demotable_opr1(const MachNode* mdef) {
|
||||
return ((mdef->flags() & Node::PD::Flag_ndd_demotable_opr1) != 0);
|
||||
}
|
||||
|
||||
static bool is_ndd_demotable_opr2(const MachNode* mdef) {
|
||||
return ((mdef->flags() & Node::PD::Flag_ndd_demotable_opr2) != 0);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
static bool is_ndd_demotable(const MachNode* mdef) {
|
||||
return ((mdef->flags() & Node::PD::Flag_ndd_demotable) != 0);
|
||||
}
|
||||
|
||||
static bool is_ndd_demotable_commutative(const MachNode* mdef) {
|
||||
return ((mdef->flags() & Node::PD::Flag_ndd_demotable_commutative) != 0);
|
||||
}
|
||||
|
||||
static bool is_demotion_candidate(const MachNode* mdef) {
|
||||
return (is_ndd_demotable(mdef) || is_ndd_demotable_commutative(mdef));
|
||||
return (is_ndd_demotable_opr1(mdef) || is_ndd_demotable_opr2(mdef));
|
||||
}
|
||||
#endif
|
||||
|
||||
bool Matcher::is_register_biasing_candidate(const MachNode* mdef,
|
||||
int oper_index) {
|
||||
@ -2653,8 +2655,8 @@ bool Matcher::is_register_biasing_candidate(const MachNode* mdef,
|
||||
|
||||
if (mdef->num_opnds() <= oper_index || mdef->operand_index(oper_index) < 0 ||
|
||||
mdef->in(mdef->operand_index(oper_index)) == nullptr) {
|
||||
assert(oper_index != 1 || !is_demotion_candidate(mdef), "%s", mdef->Name());
|
||||
assert(oper_index != 2 || !is_ndd_demotable_commutative(mdef), "%s", mdef->Name());
|
||||
assert(oper_index != 1 || !is_ndd_demotable_opr1(mdef), "%s", mdef->Name());
|
||||
assert(oper_index != 2 || !is_ndd_demotable_opr2(mdef), "%s", mdef->Name());
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -2662,14 +2664,13 @@ bool Matcher::is_register_biasing_candidate(const MachNode* mdef,
|
||||
// address computation. Biasing def towards any address component will not
|
||||
// result in NDD demotion by assembler.
|
||||
if (mdef->operand_num_edges(oper_index) != 1) {
|
||||
assert(!is_ndd_demotable(mdef), "%s", mdef->Name());
|
||||
return false;
|
||||
}
|
||||
|
||||
// Demotion candidate must be register mask compatible with definition.
|
||||
const RegMask& oper_mask = mdef->in_RegMask(mdef->operand_index(oper_index));
|
||||
if (!oper_mask.overlap(mdef->out_RegMask())) {
|
||||
assert(!is_demotion_candidate(mdef), "%s", mdef->Name());
|
||||
assert(!is_ndd_demotable(mdef), "%s", mdef->Name());
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -2681,12 +2682,12 @@ bool Matcher::is_register_biasing_candidate(const MachNode* mdef,
|
||||
// EVEX prefix with shorter REX/REX2 encoding. Demotion candidates
|
||||
// are decorated with a special flag by instruction selector.
|
||||
case 1:
|
||||
return is_demotion_candidate(mdef);
|
||||
return is_ndd_demotable_opr1(mdef);
|
||||
|
||||
// Definition operand of commutative operation can be biased towards second
|
||||
// operand.
|
||||
case 2:
|
||||
return is_ndd_demotable_commutative(mdef);
|
||||
return is_ndd_demotable_opr2(mdef);
|
||||
|
||||
// Current scheme only selects up to two biasing candidates
|
||||
default:
|
||||
@ -2888,9 +2889,9 @@ public:
|
||||
Flag_clears_zero_flag = Node::_last_flag << 9,
|
||||
Flag_clears_overflow_flag = Node::_last_flag << 10,
|
||||
Flag_clears_sign_flag = Node::_last_flag << 11,
|
||||
Flag_ndd_demotable = Node::_last_flag << 12,
|
||||
Flag_ndd_demotable_commutative = Node::_last_flag << 13,
|
||||
_last_flag = Flag_ndd_demotable_commutative
|
||||
Flag_ndd_demotable_opr1 = Node::_last_flag << 12,
|
||||
Flag_ndd_demotable_opr2 = Node::_last_flag << 13,
|
||||
_last_flag = Flag_ndd_demotable_opr2
|
||||
};
|
||||
};
|
||||
|
||||
@ -9872,7 +9873,7 @@ instruct addI_rReg_ndd(rRegI dst, rRegI src1, rRegI src2, rFlagsReg cr)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (AddI src1 src2));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_commutative);
|
||||
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
|
||||
|
||||
format %{ "eaddl $dst, $src1, $src2\t# int ndd" %}
|
||||
ins_encode %{
|
||||
@ -9900,7 +9901,7 @@ instruct addI_rReg_rReg_imm_ndd(rRegI dst, rRegI src1, immI src2, rFlagsReg cr)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (AddI src1 src2));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
format %{ "eaddl $dst, $src1, $src2\t# int ndd" %}
|
||||
ins_encode %{
|
||||
@ -9943,7 +9944,7 @@ instruct addI_rReg_rReg_mem_ndd(rRegI dst, rRegI src1, memory src2, rFlagsReg cr
|
||||
predicate(UseAPX);
|
||||
match(Set dst (AddI src1 (LoadI src2)));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_commutative);
|
||||
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
|
||||
|
||||
ins_cost(150);
|
||||
format %{ "eaddl $dst, $src1, $src2\t# int ndd" %}
|
||||
@ -10000,7 +10001,7 @@ instruct incI_rReg_ndd(rRegI dst, rRegI src, immI_1 val, rFlagsReg cr)
|
||||
predicate(UseAPX && UseIncDec);
|
||||
match(Set dst (AddI src val));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
format %{ "eincl $dst, $src\t# int ndd" %}
|
||||
ins_encode %{
|
||||
@ -10055,7 +10056,7 @@ instruct decI_rReg_ndd(rRegI dst, rRegI src, immI_M1 val, rFlagsReg cr)
|
||||
predicate(UseAPX && UseIncDec);
|
||||
match(Set dst (AddI src val));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
format %{ "edecl $dst, $src\t# int ndd" %}
|
||||
ins_encode %{
|
||||
@ -10162,7 +10163,7 @@ instruct addL_rReg_ndd(rRegL dst, rRegL src1, rRegL src2, rFlagsReg cr)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (AddL src1 src2));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_commutative);
|
||||
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
|
||||
|
||||
format %{ "eaddq $dst, $src1, $src2\t# long ndd" %}
|
||||
ins_encode %{
|
||||
@ -10190,7 +10191,7 @@ instruct addL_rReg_rReg_imm_ndd(rRegL dst, rRegL src1, immL32 src2, rFlagsReg cr
|
||||
predicate(UseAPX);
|
||||
match(Set dst (AddL src1 src2));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
format %{ "eaddq $dst, $src1, $src2\t# long ndd" %}
|
||||
ins_encode %{
|
||||
@ -10233,7 +10234,7 @@ instruct addL_rReg_rReg_mem_ndd(rRegL dst, rRegL src1, memory src2, rFlagsReg cr
|
||||
predicate(UseAPX);
|
||||
match(Set dst (AddL src1 (LoadL src2)));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_commutative);
|
||||
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
|
||||
|
||||
ins_cost(150);
|
||||
format %{ "eaddq $dst, $src1, $src2\t# long ndd" %}
|
||||
@ -10289,7 +10290,7 @@ instruct incL_rReg_ndd(rRegL dst, rRegI src, immL1 val, rFlagsReg cr)
|
||||
predicate(UseAPX && UseIncDec);
|
||||
match(Set dst (AddL src val));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
format %{ "eincq $dst, $src\t# long ndd" %}
|
||||
ins_encode %{
|
||||
@ -10344,7 +10345,7 @@ instruct decL_rReg_ndd(rRegL dst, rRegL src, immL_M1 val, rFlagsReg cr)
|
||||
predicate(UseAPX && UseIncDec);
|
||||
match(Set dst (AddL src val));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
format %{ "edecq $dst, $src\t# long ndd" %}
|
||||
ins_encode %{
|
||||
@ -11059,7 +11060,7 @@ instruct subI_rReg_ndd(rRegI dst, rRegI src1, rRegI src2, rFlagsReg cr)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (SubI src1 src2));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
format %{ "esubl $dst, $src1, $src2\t# int ndd" %}
|
||||
ins_encode %{
|
||||
@ -11073,7 +11074,7 @@ instruct subI_rReg_rReg_imm_ndd(rRegI dst, rRegI src1, immI src2, rFlagsReg cr)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (SubI src1 src2));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
format %{ "esubl $dst, $src1, $src2\t# int ndd" %}
|
||||
ins_encode %{
|
||||
@ -11116,7 +11117,7 @@ instruct subI_rReg_rReg_mem_ndd(rRegI dst, rRegI src1, memory src2, rFlagsReg cr
|
||||
predicate(UseAPX);
|
||||
match(Set dst (SubI src1 (LoadI src2)));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
ins_cost(150);
|
||||
format %{ "esubl $dst, $src1, $src2\t# int ndd" %}
|
||||
@ -11174,7 +11175,7 @@ instruct subL_rReg_ndd(rRegL dst, rRegL src1, rRegL src2, rFlagsReg cr)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (SubL src1 src2));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
format %{ "esubq $dst, $src1, $src2\t# long ndd" %}
|
||||
ins_encode %{
|
||||
@ -11188,7 +11189,7 @@ instruct subL_rReg_rReg_imm_ndd(rRegL dst, rRegL src1, immL32 src2, rFlagsReg cr
|
||||
predicate(UseAPX);
|
||||
match(Set dst (SubL src1 src2));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
format %{ "esubq $dst, $src1, $src2\t# long ndd" %}
|
||||
ins_encode %{
|
||||
@ -11231,7 +11232,7 @@ instruct subL_rReg_rReg_mem_ndd(rRegL dst, rRegL src1, memory src2, rFlagsReg cr
|
||||
predicate(UseAPX);
|
||||
match(Set dst (SubL src1 (LoadL src2)));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
ins_cost(150);
|
||||
format %{ "esubq $dst, $src1, $src2\t# long ndd" %}
|
||||
@ -11303,7 +11304,7 @@ instruct negI_rReg_ndd(rRegI dst, rRegI src, immI_0 zero, rFlagsReg cr)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (SubI zero src));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_opr2);
|
||||
|
||||
format %{ "enegl $dst, $src\t# int ndd" %}
|
||||
ins_encode %{
|
||||
@ -11331,7 +11332,7 @@ instruct negI_rReg_2_ndd(rRegI dst, rRegI src, rFlagsReg cr)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (NegI src));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
format %{ "enegl $dst, $src\t# int ndd" %}
|
||||
ins_encode %{
|
||||
@ -11372,7 +11373,7 @@ instruct negL_rReg_ndd(rRegL dst, rRegL src, immL0 zero, rFlagsReg cr)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (SubL zero src));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_opr2);
|
||||
|
||||
format %{ "enegq $dst, $src\t# long ndd" %}
|
||||
ins_encode %{
|
||||
@ -11400,7 +11401,7 @@ instruct negL_rReg_2_ndd(rRegL dst, rRegL src, rFlagsReg cr)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (NegL src));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
format %{ "enegq $dst, $src\t# long ndd" %}
|
||||
ins_encode %{
|
||||
@ -11445,7 +11446,7 @@ instruct mulI_rReg_ndd(rRegI dst, rRegI src1, rRegI src2, rFlagsReg cr)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (MulI src1 src2));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_ndd_demotable_commutative);
|
||||
flag(PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
|
||||
|
||||
ins_cost(300);
|
||||
format %{ "eimull $dst, $src1, $src2\t# int ndd" %}
|
||||
@ -11487,7 +11488,7 @@ instruct mulI_rReg_rReg_mem_ndd(rRegI dst, rRegI src1, memory src2, rFlagsReg cr
|
||||
predicate(UseAPX);
|
||||
match(Set dst (MulI src1 (LoadI src2)));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
|
||||
|
||||
ins_cost(350);
|
||||
format %{ "eimull $dst, $src1, $src2\t# int ndd" %}
|
||||
@ -11539,7 +11540,7 @@ instruct mulL_rReg_ndd(rRegL dst, rRegL src1, rRegL src2, rFlagsReg cr)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (MulL src1 src2));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_ndd_demotable_commutative);
|
||||
flag(PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
|
||||
|
||||
ins_cost(300);
|
||||
format %{ "eimulq $dst, $src1, $src2\t# long ndd" %}
|
||||
@ -11581,7 +11582,7 @@ instruct mulL_rReg_rReg_mem_ndd(rRegL dst, rRegL src1, memory src2, rFlagsReg cr
|
||||
predicate(UseAPX);
|
||||
match(Set dst (MulL src1 (LoadL src2)));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_ndd_demotable_commutative);
|
||||
flag(PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
|
||||
|
||||
ins_cost(350);
|
||||
format %{ "eimulq $dst, $src1, $src2 \t# long" %}
|
||||
@ -11856,7 +11857,7 @@ instruct salI_rReg_immI2_ndd(rRegI dst, rRegI src, immI2 shift, rFlagsReg cr)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (LShiftI src shift));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
format %{ "esall $dst, $src, $shift\t# int(ndd)" %}
|
||||
ins_encode %{
|
||||
@ -11885,7 +11886,7 @@ instruct salI_rReg_imm_ndd(rRegI dst, rRegI src, immI8 shift, rFlagsReg cr)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (LShiftI src shift));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
format %{ "esall $dst, $src, $shift\t# int (ndd)" %}
|
||||
ins_encode %{
|
||||
@ -11992,7 +11993,7 @@ instruct sarI_rReg_imm_ndd(rRegI dst, rRegI src, immI8 shift, rFlagsReg cr)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (RShiftI src shift));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
format %{ "esarl $dst, $src, $shift\t# int (ndd)" %}
|
||||
ins_encode %{
|
||||
@ -12099,7 +12100,7 @@ instruct shrI_rReg_imm_ndd(rRegI dst, rRegI src, immI8 shift, rFlagsReg cr)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (URShiftI src shift));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
format %{ "eshrl $dst, $src, $shift\t # int (ndd)" %}
|
||||
ins_encode %{
|
||||
@ -12207,7 +12208,7 @@ instruct salL_rReg_immI2_ndd(rRegL dst, rRegL src, immI2 shift, rFlagsReg cr)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (LShiftL src shift));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
format %{ "esalq $dst, $src, $shift\t# long (ndd)" %}
|
||||
ins_encode %{
|
||||
@ -12236,7 +12237,7 @@ instruct salL_rReg_imm_ndd(rRegL dst, rRegL src, immI8 shift, rFlagsReg cr)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (LShiftL src shift));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
format %{ "esalq $dst, $src, $shift\t# long (ndd)" %}
|
||||
ins_encode %{
|
||||
@ -12343,7 +12344,7 @@ instruct sarL_rReg_imm_ndd(rRegL dst, rRegL src, immI shift, rFlagsReg cr)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (RShiftL src shift));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
format %{ "esarq $dst, $src, $shift\t# long (ndd)" %}
|
||||
ins_encode %{
|
||||
@ -12450,7 +12451,7 @@ instruct shrL_rReg_imm_ndd(rRegL dst, rRegL src, immI8 shift, rFlagsReg cr)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (URShiftL src shift));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
format %{ "eshrq $dst, $src, $shift\t# long (ndd)" %}
|
||||
ins_encode %{
|
||||
@ -12622,7 +12623,7 @@ instruct rolI_rReg_Var_ndd(rRegI dst, rRegI src, rcx_RegI shift, rFlagsReg cr)
|
||||
predicate(UseAPX && n->bottom_type()->basic_type() == T_INT);
|
||||
match(Set dst (RotateLeft src shift));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
format %{ "eroll $dst, $src, $shift\t# rotate left (int ndd)" %}
|
||||
ins_encode %{
|
||||
@ -12687,7 +12688,7 @@ instruct rorI_rReg_Var_ndd(rRegI dst, rRegI src, rcx_RegI shift, rFlagsReg cr)
|
||||
predicate(UseAPX && n->bottom_type()->basic_type() == T_INT);
|
||||
match(Set dst (RotateRight src shift));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
format %{ "erorl $dst, $src, $shift\t# rotate right(int ndd)" %}
|
||||
ins_encode %{
|
||||
@ -12754,7 +12755,7 @@ instruct rolL_rReg_Var_ndd(rRegL dst, rRegL src, rcx_RegI shift, rFlagsReg cr)
|
||||
predicate(UseAPX && n->bottom_type()->basic_type() == T_LONG);
|
||||
match(Set dst (RotateLeft src shift));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
format %{ "erolq $dst, $src, $shift\t# rotate left(long ndd)" %}
|
||||
ins_encode %{
|
||||
@ -12819,7 +12820,7 @@ instruct rorL_rReg_Var_ndd(rRegL dst, rRegL src, rcx_RegI shift, rFlagsReg cr)
|
||||
predicate(UseAPX && n->bottom_type()->basic_type() == T_LONG);
|
||||
match(Set dst (RotateRight src shift));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
format %{ "erorq $dst, $src, $shift\t# rotate right(long ndd)" %}
|
||||
ins_encode %{
|
||||
@ -12897,7 +12898,7 @@ instruct andI_rReg_ndd(rRegI dst, rRegI src1, rRegI src2, rFlagsReg cr)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (AndI src1 src2));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_commutative);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
|
||||
|
||||
format %{ "eandl $dst, $src1, $src2\t# int ndd" %}
|
||||
ins_encode %{
|
||||
@ -12990,7 +12991,7 @@ instruct andI_rReg_rReg_imm_ndd(rRegI dst, rRegI src1, immI src2, rFlagsReg cr)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (AndI src1 src2));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
format %{ "eandl $dst, $src1, $src2\t# int ndd" %}
|
||||
ins_encode %{
|
||||
@ -13034,7 +13035,7 @@ instruct andI_rReg_rReg_mem_ndd(rRegI dst, rRegI src1, memory src2, rFlagsReg cr
|
||||
predicate(UseAPX);
|
||||
match(Set dst (AndI src1 (LoadI src2)));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_commutative);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
|
||||
|
||||
ins_cost(150);
|
||||
format %{ "eandl $dst, $src1, $src2\t# int ndd" %}
|
||||
@ -13234,7 +13235,7 @@ instruct orI_rReg_ndd(rRegI dst, rRegI src1, rRegI src2, rFlagsReg cr)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (OrI src1 src2));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_commutative);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
|
||||
|
||||
format %{ "eorl $dst, $src1, $src2\t# int ndd" %}
|
||||
ins_encode %{
|
||||
@ -13263,7 +13264,7 @@ instruct orI_rReg_rReg_imm_ndd(rRegI dst, rRegI src1, immI src2, rFlagsReg cr)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (OrI src1 src2));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
format %{ "eorl $dst, $src1, $src2\t# int ndd" %}
|
||||
ins_encode %{
|
||||
@ -13277,7 +13278,7 @@ instruct orI_rReg_imm_rReg_ndd(rRegI dst, immI src1, rRegI src2, rFlagsReg cr)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (OrI src1 src2));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
format %{ "eorl $dst, $src2, $src1\t# int ndd" %}
|
||||
ins_encode %{
|
||||
@ -13321,7 +13322,7 @@ instruct orI_rReg_rReg_mem_ndd(rRegI dst, rRegI src1, memory src2, rFlagsReg cr)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (OrI src1 (LoadI src2)));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
|
||||
|
||||
ins_cost(150);
|
||||
format %{ "eorl $dst, $src1, $src2\t# int ndd" %}
|
||||
@ -13397,7 +13398,7 @@ instruct xorI_rReg_ndd(rRegI dst, rRegI src1, rRegI src2, rFlagsReg cr)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (XorI src1 src2));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_commutative);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
|
||||
|
||||
format %{ "exorl $dst, $src1, $src2\t# int ndd" %}
|
||||
ins_encode %{
|
||||
@ -13423,7 +13424,7 @@ instruct xorI_rReg_im1_ndd(rRegI dst, rRegI src, immI_M1 imm)
|
||||
%{
|
||||
match(Set dst (XorI src imm));
|
||||
predicate(UseAPX);
|
||||
flag(PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
format %{ "enotl $dst, $src" %}
|
||||
ins_encode %{
|
||||
@ -13454,7 +13455,7 @@ instruct xorI_rReg_rReg_imm_ndd(rRegI dst, rRegI src1, immI src2, rFlagsReg cr)
|
||||
predicate(UseAPX && n->in(2)->bottom_type()->is_int()->get_con() != -1);
|
||||
match(Set dst (XorI src1 src2));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
format %{ "exorl $dst, $src1, $src2\t# int ndd" %}
|
||||
ins_encode %{
|
||||
@ -13500,7 +13501,7 @@ instruct xorI_rReg_rReg_mem_ndd(rRegI dst, rRegI src1, memory src2, rFlagsReg cr
|
||||
predicate(UseAPX);
|
||||
match(Set dst (XorI src1 (LoadI src2)));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
|
||||
|
||||
ins_cost(150);
|
||||
format %{ "exorl $dst, $src1, $src2\t# int ndd" %}
|
||||
@ -13579,7 +13580,7 @@ instruct andL_rReg_ndd(rRegL dst, rRegL src1, rRegL src2, rFlagsReg cr)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (AndL src1 src2));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_commutative);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
|
||||
|
||||
format %{ "eandq $dst, $src1, $src2\t# long ndd" %}
|
||||
ins_encode %{
|
||||
@ -13635,7 +13636,7 @@ instruct andL_rReg_rReg_imm_ndd(rRegL dst, rRegL src1, immL32 src2, rFlagsReg cr
|
||||
predicate(UseAPX);
|
||||
match(Set dst (AndL src1 src2));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
format %{ "eandq $dst, $src1, $src2\t# long ndd" %}
|
||||
ins_encode %{
|
||||
@ -13679,7 +13680,7 @@ instruct andL_rReg_rReg_mem_ndd(rRegL dst, rRegL src1, memory src2, rFlagsReg cr
|
||||
predicate(UseAPX);
|
||||
match(Set dst (AndL src1 (LoadL src2)));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_commutative);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
|
||||
|
||||
ins_cost(150);
|
||||
format %{ "eandq $dst, $src1, $src2\t# long ndd" %}
|
||||
@ -13882,7 +13883,7 @@ instruct orL_rReg_ndd(rRegL dst, rRegL src1, rRegL src2, rFlagsReg cr)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (OrL src1 src2));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_commutative);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
|
||||
|
||||
format %{ "eorq $dst, $src1, $src2\t# long ndd" %}
|
||||
ins_encode %{
|
||||
@ -13937,7 +13938,7 @@ instruct orL_rReg_rReg_imm_ndd(rRegL dst, rRegL src1, immL32 src2, rFlagsReg cr)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (OrL src1 src2));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
format %{ "eorq $dst, $src1, $src2\t# long ndd" %}
|
||||
ins_encode %{
|
||||
@ -13951,7 +13952,7 @@ instruct orL_rReg_imm_rReg_ndd(rRegL dst, immL32 src1, rRegL src2, rFlagsReg cr)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (OrL src1 src2));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
format %{ "eorq $dst, $src2, $src1\t# long ndd" %}
|
||||
ins_encode %{
|
||||
@ -13996,7 +13997,7 @@ instruct orL_rReg_rReg_mem_ndd(rRegL dst, rRegL src1, memory src2, rFlagsReg cr)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (OrL src1 (LoadL src2)));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_commutative);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
|
||||
|
||||
ins_cost(150);
|
||||
format %{ "eorq $dst, $src1, $src2\t# long ndd" %}
|
||||
@ -14075,7 +14076,7 @@ instruct xorL_rReg_ndd(rRegL dst, rRegL src1, rRegL src2, rFlagsReg cr)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (XorL src1 src2));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_commutative);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
|
||||
|
||||
format %{ "exorq $dst, $src1, $src2\t# long ndd" %}
|
||||
ins_encode %{
|
||||
@ -14101,7 +14102,7 @@ instruct xorL_rReg_im1_ndd(rRegL dst,rRegL src, immL_M1 imm)
|
||||
%{
|
||||
predicate(UseAPX);
|
||||
match(Set dst (XorL src imm));
|
||||
flag(PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
format %{ "enotq $dst, $src" %}
|
||||
ins_encode %{
|
||||
@ -14132,7 +14133,7 @@ instruct xorL_rReg_rReg_imm(rRegL dst, rRegL src1, immL32 src2, rFlagsReg cr)
|
||||
predicate(UseAPX && n->in(2)->bottom_type()->is_long()->get_con() != -1L);
|
||||
match(Set dst (XorL src1 src2));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
format %{ "exorq $dst, $src1, $src2\t# long ndd" %}
|
||||
ins_encode %{
|
||||
@ -14178,7 +14179,7 @@ instruct xorL_rReg_rReg_mem_ndd(rRegL dst, rRegL src1, memory src2, rFlagsReg cr
|
||||
predicate(UseAPX);
|
||||
match(Set dst (XorL src1 (LoadL src2)));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_commutative);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
|
||||
|
||||
ins_cost(150);
|
||||
format %{ "exorq $dst, $src1, $src2\t# long ndd" %}
|
||||
@ -16633,7 +16634,7 @@ instruct minI_rReg_ndd(rRegI dst, rRegI src1, rRegI src2)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (MinI src1 src2));
|
||||
effect(DEF dst, USE src1, USE src2);
|
||||
flag(PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
ins_cost(200);
|
||||
expand %{
|
||||
@ -16685,7 +16686,7 @@ instruct maxI_rReg_ndd(rRegI dst, rRegI src1, rRegI src2)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (MaxI src1 src2));
|
||||
effect(DEF dst, USE src1, USE src2);
|
||||
flag(PD::Flag_ndd_demotable);
|
||||
flag(PD::Flag_ndd_demotable_opr1);
|
||||
|
||||
ins_cost(200);
|
||||
expand %{
|
||||
|
||||
@ -96,6 +96,7 @@
|
||||
#include "runtime/vmOperations.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
#include "sanitizers/leak.hpp"
|
||||
#include "services/management.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
#include "utilities/defaultStream.hpp"
|
||||
|
||||
@ -127,10 +127,8 @@ ResolutionErrorEntry::~ResolutionErrorEntry() {
|
||||
}
|
||||
|
||||
void ResolutionErrorEntry::set_nest_host_error(const char* message) {
|
||||
// If a message is already set, free it.
|
||||
if (nest_host_error() != nullptr) {
|
||||
FREE_C_HEAP_ARRAY(char, _nest_host_error);
|
||||
}
|
||||
assert(_nest_host_error == nullptr, "caller should have checked");
|
||||
assert_lock_strong(SystemDictionary_lock);
|
||||
_nest_host_error = message;
|
||||
}
|
||||
|
||||
|
||||
@ -1859,7 +1859,7 @@ Symbol* SystemDictionary::find_resolution_error(const constantPoolHandle& pool,
|
||||
|
||||
void SystemDictionary::add_nest_host_error(const constantPoolHandle& pool,
|
||||
int which,
|
||||
const char* message) {
|
||||
const stringStream& message) {
|
||||
{
|
||||
MutexLocker ml(Thread::current(), SystemDictionary_lock);
|
||||
ResolutionErrorEntry* entry = ResolutionErrorTable::find_entry(pool, which);
|
||||
@ -1868,14 +1868,19 @@ void SystemDictionary::add_nest_host_error(const constantPoolHandle& pool,
|
||||
// constant pool index. In this case resolution succeeded but there's an error in this nest host
|
||||
// that we use the table to record.
|
||||
assert(pool->resolved_klass_at(which) != nullptr, "klass should be resolved if there is no entry");
|
||||
ResolutionErrorTable::add_entry(pool, which, message);
|
||||
ResolutionErrorTable::add_entry(pool, which, message.as_string(true /* on C-heap */));
|
||||
} else {
|
||||
// An existing entry means we had a true resolution failure (LinkageError) with our nest host, but we
|
||||
// still want to add the error message for the higher-level access checks to report. We should
|
||||
// only reach here under the same error condition, so we can ignore the potential race with setting
|
||||
// the message, and set it again.
|
||||
assert(entry->nest_host_error() == nullptr || strcmp(entry->nest_host_error(), message) == 0, "should be the same message");
|
||||
entry->set_nest_host_error(message);
|
||||
// the message.
|
||||
const char* nhe = entry->nest_host_error();
|
||||
if (nhe == nullptr) {
|
||||
entry->set_nest_host_error(message.as_string(true /* on C-heap */));
|
||||
} else {
|
||||
DEBUG_ONLY(const char* msg = message.base();)
|
||||
assert(strcmp(nhe, msg) == 0, "New message %s, differs from original %s", msg, nhe);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -280,7 +280,7 @@ public:
|
||||
|
||||
// Record a nest host resolution/validation error
|
||||
static void add_nest_host_error(const constantPoolHandle& pool, int which,
|
||||
const char* message);
|
||||
const stringStream& message);
|
||||
static const char* find_nest_host_error(const constantPoolHandle& pool, int which);
|
||||
|
||||
static void add_to_initiating_loader(JavaThread* current, InstanceKlass* k,
|
||||
|
||||
@ -415,18 +415,18 @@ class methodHandle;
|
||||
\
|
||||
do_class(java_lang_StringCoding, "java/lang/StringCoding") \
|
||||
do_intrinsic(_countPositives, java_lang_StringCoding, countPositives_name, countPositives_signature, F_S) \
|
||||
do_name( countPositives_name, "countPositives0") \
|
||||
do_name( countPositives_name, "countPositives") \
|
||||
do_signature(countPositives_signature, "([BII)I") \
|
||||
\
|
||||
do_class(sun_nio_cs_iso8859_1_Encoder, "sun/nio/cs/ISO_8859_1$Encoder") \
|
||||
do_intrinsic(_encodeISOArray, sun_nio_cs_iso8859_1_Encoder, encodeISOArray_name, encodeISOArray_signature, F_S) \
|
||||
do_name( encodeISOArray_name, "encodeISOArray0") \
|
||||
do_name( encodeISOArray_name, "implEncodeISOArray") \
|
||||
do_signature(encodeISOArray_signature, "([CI[BII)I") \
|
||||
\
|
||||
do_intrinsic(_encodeByteISOArray, java_lang_StringCoding, encodeISOArray_name, indexOfI_signature, F_S) \
|
||||
\
|
||||
do_intrinsic(_encodeAsciiArray, java_lang_StringCoding, encodeAsciiArray_name, encodeISOArray_signature, F_S) \
|
||||
do_name( encodeAsciiArray_name, "encodeAsciiArray0") \
|
||||
do_name( encodeAsciiArray_name, "implEncodeAsciiArray") \
|
||||
\
|
||||
do_class(java_math_BigInteger, "java/math/BigInteger") \
|
||||
do_intrinsic(_multiplyToLen, java_math_BigInteger, multiplyToLen_name, multiplyToLen_signature, F_S) \
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -686,7 +686,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
|
||||
// the check before we do the actual allocation. The reason for doing it
|
||||
// before the allocation is that we avoid having to keep track of the newly
|
||||
// allocated memory while we do a GC.
|
||||
if (policy()->need_to_start_conc_mark("concurrent humongous allocation",
|
||||
// Only try that if we can actually perform a GC.
|
||||
if (is_init_completed() && policy()->need_to_start_conc_mark("concurrent humongous allocation",
|
||||
word_size)) {
|
||||
try_collect(word_size, GCCause::_g1_humongous_allocation, collection_counters(this));
|
||||
}
|
||||
|
||||
@ -630,6 +630,14 @@ bool SerialHeap::requires_barriers(stackChunkOop obj) const {
|
||||
|
||||
// Returns "TRUE" iff "p" points into the committed areas of the heap.
|
||||
bool SerialHeap::is_in(const void* p) const {
|
||||
// precondition
|
||||
verify_not_in_native_if_java_thread();
|
||||
|
||||
if (!is_in_reserved(p)) {
|
||||
// If it's not even in reserved.
|
||||
return false;
|
||||
}
|
||||
|
||||
return _young_gen->is_in(p) || _old_gen->is_in(p);
|
||||
}
|
||||
|
||||
@ -797,3 +805,12 @@ void SerialHeap::gc_epilogue(bool full) {
|
||||
|
||||
MetaspaceCounters::update_performance_counters();
|
||||
};
|
||||
|
||||
#ifdef ASSERT
|
||||
void SerialHeap::verify_not_in_native_if_java_thread() {
|
||||
if (Thread::current()->is_Java_thread()) {
|
||||
JavaThread* thread = JavaThread::current();
|
||||
assert(thread->thread_state() != _thread_in_native, "precondition");
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -111,6 +111,8 @@ private:
|
||||
void print_tracing_info() const override;
|
||||
void stop() override {};
|
||||
|
||||
static void verify_not_in_native_if_java_thread() NOT_DEBUG_RETURN;
|
||||
|
||||
public:
|
||||
// Returns JNI_OK on success
|
||||
jint initialize() override;
|
||||
|
||||
@ -37,6 +37,7 @@
|
||||
#include "utilities/copy.hpp"
|
||||
|
||||
size_t ThreadLocalAllocBuffer::_max_size = 0;
|
||||
int ThreadLocalAllocBuffer::_reserve_for_allocation_prefetch = 0;
|
||||
unsigned int ThreadLocalAllocBuffer::_target_refills = 0;
|
||||
|
||||
ThreadLocalAllocBuffer::ThreadLocalAllocBuffer() :
|
||||
@ -224,6 +225,30 @@ void ThreadLocalAllocBuffer::startup_initialization() {
|
||||
// abort during VM initialization.
|
||||
_target_refills = MAX2(_target_refills, 2U);
|
||||
|
||||
#ifdef COMPILER2
|
||||
// If the C2 compiler is present, extra space is needed at the end of
|
||||
// TLABs, otherwise prefetching instructions generated by the C2
|
||||
// compiler will fault (due to accessing memory outside of heap).
|
||||
// The amount of space is the max of the number of lines to
|
||||
// prefetch for array and for instance allocations. (Extra space must be
|
||||
// reserved to accommodate both types of allocations.)
|
||||
//
|
||||
// Only SPARC-specific BIS instructions are known to fault. (Those
|
||||
// instructions are generated if AllocatePrefetchStyle==3 and
|
||||
// AllocatePrefetchInstr==1). To be on the safe side, however,
|
||||
// extra space is reserved for all combinations of
|
||||
// AllocatePrefetchStyle and AllocatePrefetchInstr.
|
||||
//
|
||||
// If the C2 compiler is not present, no space is reserved.
|
||||
|
||||
// +1 for rounding up to next cache line, +1 to be safe
|
||||
if (CompilerConfig::is_c2_or_jvmci_compiler_enabled()) {
|
||||
int lines = MAX2(AllocatePrefetchLines, AllocateInstancePrefetchLines) + 2;
|
||||
_reserve_for_allocation_prefetch = (AllocatePrefetchDistance + AllocatePrefetchStepSize * lines) /
|
||||
(int)HeapWordSize;
|
||||
}
|
||||
#endif
|
||||
|
||||
// During jvm startup, the main thread is initialized
|
||||
// before the heap is initialized. So reinitialize it now.
|
||||
guarantee(Thread::current()->is_Java_thread(), "tlab initialization thread not Java thread");
|
||||
@ -429,7 +454,8 @@ void ThreadLocalAllocStats::publish() {
|
||||
}
|
||||
|
||||
size_t ThreadLocalAllocBuffer::end_reserve() {
|
||||
return CollectedHeap::lab_alignment_reserve();
|
||||
size_t reserve_size = CollectedHeap::lab_alignment_reserve();
|
||||
return MAX2(reserve_size, (size_t)_reserve_for_allocation_prefetch);
|
||||
}
|
||||
|
||||
const HeapWord* ThreadLocalAllocBuffer::start_relaxed() const {
|
||||
|
||||
@ -58,6 +58,7 @@ private:
|
||||
size_t _allocated_before_last_gc; // total bytes allocated up until the last gc
|
||||
|
||||
static size_t _max_size; // maximum size of any TLAB
|
||||
static int _reserve_for_allocation_prefetch; // Reserve at the end of the TLAB
|
||||
static unsigned _target_refills; // expected number of refills between GCs
|
||||
|
||||
unsigned _number_of_refills;
|
||||
|
||||
@ -1394,7 +1394,7 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
|
||||
}
|
||||
if (addr->Opcode() == Op_AddP) {
|
||||
Node* orig_base = addr->in(AddPNode::Base);
|
||||
Node* base = new CheckCastPPNode(ctrl, orig_base, orig_base->bottom_type(), ConstraintCastNode::StrongDependency);
|
||||
Node* base = new CheckCastPPNode(ctrl, orig_base, orig_base->bottom_type(), ConstraintCastNode::DependencyType::NonFloatingNarrowing);
|
||||
phase->register_new_node(base, ctrl);
|
||||
if (addr->in(AddPNode::Base) == addr->in((AddPNode::Address))) {
|
||||
// Field access
|
||||
|
||||
@ -184,8 +184,8 @@ void ShenandoahAdaptiveHeuristics::record_success_concurrent() {
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahAdaptiveHeuristics::record_success_degenerated() {
|
||||
ShenandoahHeuristics::record_success_degenerated();
|
||||
void ShenandoahAdaptiveHeuristics::record_degenerated() {
|
||||
ShenandoahHeuristics::record_degenerated();
|
||||
// Adjust both trigger's parameters in the case of a degenerated GC because
|
||||
// either of them should have triggered earlier to avoid this case.
|
||||
adjust_margin_of_error(DEGENERATE_PENALTY_SD);
|
||||
|
||||
@ -114,7 +114,7 @@ public:
|
||||
|
||||
virtual void record_cycle_start() override;
|
||||
virtual void record_success_concurrent() override;
|
||||
virtual void record_success_degenerated() override;
|
||||
virtual void record_degenerated() override;
|
||||
virtual void record_success_full() override;
|
||||
|
||||
virtual bool should_start_gc() override;
|
||||
|
||||
@ -243,7 +243,7 @@ void ShenandoahHeuristics::record_success_concurrent() {
|
||||
adjust_penalty(Concurrent_Adjust);
|
||||
}
|
||||
|
||||
void ShenandoahHeuristics::record_success_degenerated() {
|
||||
void ShenandoahHeuristics::record_degenerated() {
|
||||
adjust_penalty(Degenerated_Penalty);
|
||||
}
|
||||
|
||||
|
||||
@ -218,7 +218,7 @@ public:
|
||||
|
||||
virtual void record_success_concurrent();
|
||||
|
||||
virtual void record_success_degenerated();
|
||||
virtual void record_degenerated();
|
||||
|
||||
virtual void record_success_full();
|
||||
|
||||
|
||||
@ -722,10 +722,10 @@ void ShenandoahOldHeuristics::record_success_concurrent() {
|
||||
this->ShenandoahHeuristics::record_success_concurrent();
|
||||
}
|
||||
|
||||
void ShenandoahOldHeuristics::record_success_degenerated() {
|
||||
void ShenandoahOldHeuristics::record_degenerated() {
|
||||
// Forget any triggers that occurred while OLD GC was ongoing. If we really need to start another, it will retrigger.
|
||||
clear_triggers();
|
||||
this->ShenandoahHeuristics::record_success_degenerated();
|
||||
this->ShenandoahHeuristics::record_degenerated();
|
||||
}
|
||||
|
||||
void ShenandoahOldHeuristics::record_success_full() {
|
||||
|
||||
@ -190,7 +190,7 @@ public:
|
||||
|
||||
void record_success_concurrent() override;
|
||||
|
||||
void record_success_degenerated() override;
|
||||
void record_degenerated() override;
|
||||
|
||||
void record_success_full() override;
|
||||
|
||||
|
||||
@ -128,8 +128,8 @@ public:
|
||||
void write_ref_array(HeapWord* start, size_t count);
|
||||
|
||||
private:
|
||||
template <class T>
|
||||
inline void arraycopy_marking(T* dst, size_t count);
|
||||
template <bool IS_GENERATIONAL, class T>
|
||||
void arraycopy_marking(T* dst, size_t count);
|
||||
template <class T>
|
||||
inline void arraycopy_evacuation(T* src, size_t count);
|
||||
template <class T>
|
||||
|
||||
@ -429,7 +429,11 @@ void ShenandoahBarrierSet::arraycopy_barrier(T* src, T* dst, size_t count) {
|
||||
// If marking old or young, we must evaluate the SATB barrier. This will be the only
|
||||
// action if we are not marking old. If we are marking old, we must still evaluate the
|
||||
// load reference barrier for a young collection.
|
||||
arraycopy_marking(dst, count);
|
||||
if (_heap->mode()->is_generational()) {
|
||||
arraycopy_marking<true>(dst, count);
|
||||
} else {
|
||||
arraycopy_marking<false>(dst, count);
|
||||
}
|
||||
}
|
||||
|
||||
if ((gc_state & ShenandoahHeap::EVACUATION) != 0) {
|
||||
@ -441,11 +445,12 @@ void ShenandoahBarrierSet::arraycopy_barrier(T* src, T* dst, size_t count) {
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
template <bool IS_GENERATIONAL, class T>
|
||||
void ShenandoahBarrierSet::arraycopy_marking(T* dst, size_t count) {
|
||||
assert(_heap->is_concurrent_mark_in_progress(), "only during marking");
|
||||
if (ShenandoahSATBBarrier) {
|
||||
if (!_heap->marking_context()->allocated_after_mark_start(reinterpret_cast<HeapWord*>(dst))) {
|
||||
if (!_heap->marking_context()->allocated_after_mark_start(reinterpret_cast<HeapWord*>(dst)) ||
|
||||
(IS_GENERATIONAL && _heap->heap_region_containing(dst)->is_old() && _heap->is_concurrent_young_mark_in_progress())) {
|
||||
arraycopy_work<T, false, false, true>(dst, count);
|
||||
}
|
||||
}
|
||||
|
||||
@ -313,8 +313,12 @@ void ShenandoahDegenGC::op_degenerated() {
|
||||
policy->record_degenerated(_generation->is_young(), _abbreviated, progress);
|
||||
if (progress) {
|
||||
heap->notify_gc_progress();
|
||||
_generation->heuristics()->record_degenerated();
|
||||
} else if (!heap->mode()->is_generational() || policy->generational_should_upgrade_degenerated_gc()) {
|
||||
// Upgrade to full GC, register full-GC impact on heuristics.
|
||||
op_degenerated_futile();
|
||||
} else {
|
||||
_generation->heuristics()->record_degenerated();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -87,6 +87,9 @@ JVM_InternString(JNIEnv *env, jstring str);
|
||||
/*
|
||||
* java.lang.System
|
||||
*/
|
||||
JNIEXPORT jboolean JNICALL
|
||||
JVM_AOTEndRecording(JNIEnv *env);
|
||||
|
||||
JNIEXPORT jlong JNICALL
|
||||
JVM_CurrentTimeMillis(JNIEnv *env, jclass ignored);
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,6 +32,7 @@
|
||||
#include "jfr/recorder/repository/jfrEmergencyDump.hpp"
|
||||
#include "jfr/recorder/repository/jfrRepository.hpp"
|
||||
#include "jfr/recorder/service/jfrOptionSet.hpp"
|
||||
#include "jfr/recorder/service/jfrRecorderService.hpp"
|
||||
#include "jfr/support/jfrClassDefineEvent.hpp"
|
||||
#include "jfr/support/jfrKlassExtension.hpp"
|
||||
#include "jfr/support/jfrResolution.hpp"
|
||||
@ -43,6 +44,7 @@
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/javaThread.hpp"
|
||||
|
||||
|
||||
bool Jfr::is_enabled() {
|
||||
return JfrRecorder::is_enabled();
|
||||
}
|
||||
@ -153,9 +155,9 @@ void Jfr::on_resolution(const Method* caller, const Method* target, TRAPS) {
|
||||
}
|
||||
#endif
|
||||
|
||||
void Jfr::on_vm_shutdown(bool emit_old_object_samples, bool emit_event_shutdown, bool halt) {
|
||||
void Jfr::on_vm_shutdown(bool exception_handler /* false */, bool halt /* false */, bool oom /* false */) {
|
||||
if (!halt && JfrRecorder::is_recording()) {
|
||||
JfrEmergencyDump::on_vm_shutdown(emit_old_object_samples, emit_event_shutdown);
|
||||
JfrEmergencyDump::on_vm_shutdown(exception_handler, oom);
|
||||
}
|
||||
}
|
||||
|
||||
@ -173,6 +175,12 @@ bool Jfr::on_start_flight_recording_option(const JavaVMOption** option, char* de
|
||||
return JfrOptionSet::parse_start_flight_recording_option(option, delimiter);
|
||||
}
|
||||
|
||||
void Jfr::on_report_java_out_of_memory() {
|
||||
if (CrashOnOutOfMemoryError && JfrRecorder::is_recording()) {
|
||||
JfrRecorderService::emit_leakprofiler_events_on_oom();
|
||||
}
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS
|
||||
void Jfr::on_restoration(const Klass* k, JavaThread* jt) {
|
||||
assert(k != nullptr, "invariant");
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -71,7 +71,7 @@ class Jfr : AllStatic {
|
||||
static void on_resolution(const Method* caller, const Method* target, TRAPS);
|
||||
static void on_java_thread_start(JavaThread* starter, JavaThread* startee);
|
||||
static void on_set_current_thread(JavaThread* jt, oop thread);
|
||||
static void on_vm_shutdown(bool emit_old_object_samples, bool emit_event_shutdown, bool halt = false);
|
||||
static void on_vm_shutdown(bool exception_handler = false, bool halt = false, bool oom = false);
|
||||
static void on_vm_error_report(outputStream* st);
|
||||
static bool on_flight_recorder_option(const JavaVMOption** option, char* delimiter);
|
||||
static bool on_start_flight_recording_option(const JavaVMOption** option, char* delimiter);
|
||||
@ -79,6 +79,7 @@ class Jfr : AllStatic {
|
||||
static void initialize_main_thread(JavaThread* jt);
|
||||
static bool has_sample_request(JavaThread* jt);
|
||||
static void check_and_process_sample_request(JavaThread* jt);
|
||||
static void on_report_java_out_of_memory();
|
||||
CDS_ONLY(static void on_restoration(const Klass* k, JavaThread* jt);)
|
||||
};
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -364,8 +364,7 @@ JVM_ENTRY_NO_ENV(void, jfr_set_force_instrumentation(JNIEnv* env, jclass jvm, jb
|
||||
JVM_END
|
||||
|
||||
NO_TRANSITION(void, jfr_emit_old_object_samples(JNIEnv* env, jclass jvm, jlong cutoff_ticks, jboolean emit_all, jboolean skip_bfs))
|
||||
JfrRecorderService service;
|
||||
service.emit_leakprofiler_events(cutoff_ticks, emit_all == JNI_TRUE, skip_bfs == JNI_TRUE);
|
||||
JfrRecorderService::emit_leakprofiler_events(cutoff_ticks, emit_all == JNI_TRUE, skip_bfs == JNI_TRUE);
|
||||
NO_TRANSITION_END
|
||||
|
||||
JVM_ENTRY_NO_ENV(void, jfr_exclude_thread(JNIEnv* env, jclass jvm, jobject t))
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -232,41 +232,50 @@ void JfrSamplerThread::task_stacktrace(JfrSampleRequestType type, JavaThread** l
|
||||
JavaThread* start = nullptr;
|
||||
elapsedTimer sample_time;
|
||||
sample_time.start();
|
||||
ThreadsListHandle tlh;
|
||||
// Resolve a sample session relative start position index into the thread list array.
|
||||
// In cases where the last sampled thread is null or not-null but stale, find_index() returns -1.
|
||||
_cur_index = tlh.list()->find_index_of_JavaThread(*last_thread);
|
||||
JavaThread* current = _cur_index != -1 ? *last_thread : nullptr;
|
||||
{
|
||||
/*
|
||||
* Take the Threads_lock for three purposes:
|
||||
*
|
||||
* 1) Avoid sampling right through a safepoint,
|
||||
* which could result in touching oops in case of virtual threads.
|
||||
* 2) Prevent JFR from issuing an epoch rotation while the sampler thread
|
||||
* is actively processing a thread in state native, as both threads are outside the safepoint protocol.
|
||||
* 3) Some operating systems (BSD / Mac) require a process lock when sending a signal with pthread_kill.
|
||||
* Holding the Threads_lock prevents a JavaThread from calling os::create_thread(), which also takes the process lock.
|
||||
* In a sense, we provide a coarse signal mask, so we can always send the resume signal.
|
||||
*/
|
||||
MutexLocker tlock(Threads_lock);
|
||||
ThreadsListHandle tlh;
|
||||
// Resolve a sample session relative start position index into the thread list array.
|
||||
// In cases where the last sampled thread is null or not-null but stale, find_index() returns -1.
|
||||
_cur_index = tlh.list()->find_index_of_JavaThread(*last_thread);
|
||||
JavaThread* current = _cur_index != -1 ? *last_thread : nullptr;
|
||||
|
||||
while (num_samples < sample_limit) {
|
||||
current = next_thread(tlh.list(), start, current);
|
||||
if (current == nullptr) {
|
||||
break;
|
||||
}
|
||||
if (is_excluded(current)) {
|
||||
continue;
|
||||
}
|
||||
if (start == nullptr) {
|
||||
start = current; // remember the thread where we started to attempt sampling
|
||||
}
|
||||
bool success;
|
||||
if (JAVA_SAMPLE == type) {
|
||||
success = sample_java_thread(current);
|
||||
} else {
|
||||
assert(type == NATIVE_SAMPLE, "invariant");
|
||||
success = sample_native_thread(current);
|
||||
}
|
||||
if (success) {
|
||||
num_samples++;
|
||||
}
|
||||
if (SafepointSynchronize::is_at_safepoint()) {
|
||||
// For _thread_in_native, we cannot get the Threads_lock.
|
||||
// For _thread_in_Java, well, there are none.
|
||||
break;
|
||||
while (num_samples < sample_limit) {
|
||||
current = next_thread(tlh.list(), start, current);
|
||||
if (current == nullptr) {
|
||||
break;
|
||||
}
|
||||
if (is_excluded(current)) {
|
||||
continue;
|
||||
}
|
||||
if (start == nullptr) {
|
||||
start = current; // remember the thread where we started to attempt sampling
|
||||
}
|
||||
bool success;
|
||||
if (JAVA_SAMPLE == type) {
|
||||
success = sample_java_thread(current);
|
||||
} else {
|
||||
assert(type == NATIVE_SAMPLE, "invariant");
|
||||
success = sample_native_thread(current);
|
||||
}
|
||||
if (success) {
|
||||
num_samples++;
|
||||
}
|
||||
}
|
||||
|
||||
*last_thread = current; // remember the thread we last attempted to sample
|
||||
}
|
||||
|
||||
*last_thread = current; // remember the thread we last attempted to sample
|
||||
sample_time.stop();
|
||||
log_trace(jfr)("JFR thread sampling done in %3.7f secs with %d java %d native samples",
|
||||
sample_time.seconds(), type == JAVA_SAMPLE ? num_samples : 0, type == NATIVE_SAMPLE ? num_samples : 0);
|
||||
@ -297,6 +306,7 @@ class OSThreadSampler : public SuspendedThreadTask {
|
||||
// Sampling a thread in state _thread_in_Java
|
||||
// involves a platform-specific thread suspend and CPU context retrieval.
|
||||
bool JfrSamplerThread::sample_java_thread(JavaThread* jt) {
|
||||
assert_lock_strong(Threads_lock);
|
||||
if (jt->thread_state() != _thread_in_Java) {
|
||||
return false;
|
||||
}
|
||||
@ -328,6 +338,7 @@ static JfrSamplerThread* _sampler_thread = nullptr;
|
||||
// without thread suspension and CPU context retrieval,
|
||||
// if we carefully order the loads of the thread state.
|
||||
bool JfrSamplerThread::sample_native_thread(JavaThread* jt) {
|
||||
assert_lock_strong(Threads_lock);
|
||||
if (jt->thread_state() != _thread_in_native) {
|
||||
return false;
|
||||
}
|
||||
@ -343,24 +354,14 @@ bool JfrSamplerThread::sample_native_thread(JavaThread* jt) {
|
||||
|
||||
SafepointMechanism::arm_local_poll_release(jt);
|
||||
|
||||
// Take the Threads_lock for two purposes:
|
||||
// 1) Avoid sampling through a safepoint which could result
|
||||
// in touching oops in case of virtual threads.
|
||||
// 2) Prevent JFR from issuing an epoch rotation while the sampler thread
|
||||
// is actively processing a thread in native, as both threads are now
|
||||
// outside the safepoint protocol.
|
||||
|
||||
// OrderAccess::fence() as part of acquiring the lock prevents loads from floating up.
|
||||
JfrMutexTryLock threads_lock(Threads_lock);
|
||||
|
||||
if (!threads_lock.acquired() || !jt->has_last_Java_frame()) {
|
||||
// Remove the native sample request and release the potentially waiting thread.
|
||||
JfrSampleMonitor jsm(tl);
|
||||
return false;
|
||||
// Separate the arming of the poll (above) from the reading of JavaThread state (below).
|
||||
if (UseSystemMemoryBarrier) {
|
||||
SystemMemoryBarrier::emit();
|
||||
} else {
|
||||
OrderAccess::fence();
|
||||
}
|
||||
|
||||
if (jt->thread_state() != _thread_in_native) {
|
||||
assert_lock_strong(Threads_lock);
|
||||
if (jt->thread_state() != _thread_in_native || !jt->has_last_Java_frame()) {
|
||||
JfrSampleMonitor jsm(tl);
|
||||
if (jsm.is_waiting()) {
|
||||
// The thread has already returned from native,
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -38,6 +38,8 @@
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "runtime/vmOperations.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
@ -460,15 +462,6 @@ static void release_locks(Thread* thread) {
|
||||
assert(thread != nullptr, "invariant");
|
||||
assert(!thread->is_Java_thread() || JavaThread::cast(thread)->thread_state() == _thread_in_vm, "invariant");
|
||||
|
||||
#ifdef ASSERT
|
||||
Mutex* owned_lock = thread->owned_locks();
|
||||
while (owned_lock != nullptr) {
|
||||
Mutex* next = owned_lock->next();
|
||||
owned_lock->unlock();
|
||||
owned_lock = next;
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
if (Threads_lock->owned_by_self()) {
|
||||
Threads_lock->unlock();
|
||||
}
|
||||
@ -550,17 +543,14 @@ class JavaThreadInVMAndNative : public StackObj {
|
||||
}
|
||||
};
|
||||
|
||||
static void post_events(bool emit_old_object_samples, bool emit_event_shutdown, Thread* thread) {
|
||||
if (emit_old_object_samples) {
|
||||
LeakProfiler::emit_events(max_jlong, false, false);
|
||||
}
|
||||
if (emit_event_shutdown) {
|
||||
static void post_events(bool exception_handler, bool oom, Thread * thread) {
|
||||
if (exception_handler) {
|
||||
EventShutdown e;
|
||||
e.set_reason("VM Error");
|
||||
e.set_reason(oom ? "CrashOnOutOfMemoryError" : "VM Error");
|
||||
e.commit();
|
||||
}
|
||||
EventDumpReason event;
|
||||
event.set_reason(emit_old_object_samples ? "Out of Memory" : "Crash");
|
||||
event.set_reason(exception_handler && oom ? "CrashOnOutOfMemoryError" : exception_handler ? "Crash" : "Out of Memory");
|
||||
event.set_recordingId(-1);
|
||||
event.commit();
|
||||
}
|
||||
@ -594,20 +584,40 @@ static bool guard_reentrancy() {
|
||||
return false;
|
||||
}
|
||||
|
||||
void JfrEmergencyDump::on_vm_shutdown(bool emit_old_object_samples, bool emit_event_shutdown) {
|
||||
void JfrEmergencyDump::on_vm_shutdown(bool exception_handler, bool oom) {
|
||||
if (!guard_reentrancy()) {
|
||||
return;
|
||||
}
|
||||
|
||||
Thread* const thread = Thread::current_or_null_safe();
|
||||
assert(thread != nullptr, "invariant");
|
||||
if (thread->is_Watcher_thread()) {
|
||||
log_info(jfr, system)("The Watcher thread crashed so no jfr emergency dump will be generated.");
|
||||
return;
|
||||
}
|
||||
|
||||
// Ensure a JavaThread is _thread_in_vm when we make this call
|
||||
JavaThreadInVMAndNative jtivm(thread);
|
||||
post_events(exception_handler, oom, thread);
|
||||
|
||||
if (thread->is_Watcher_thread()) {
|
||||
// We cannot attempt an emergency dump using the Watcher thread
|
||||
// because we rely on the WatcherThread task "is_error_reported()",
|
||||
// to exit the VM after a hardcoded timeout, should the relatively
|
||||
// risky operation of an emergency dump fail (deadlock, livelock).
|
||||
log_warning(jfr, system)
|
||||
("The Watcher thread crashed so no jfr emergency dump will be generated.");
|
||||
return;
|
||||
}
|
||||
|
||||
if (thread->is_VM_thread()) {
|
||||
const VM_Operation* const operation = VMThread::vm_operation();
|
||||
if (operation != nullptr && operation->type() == VM_Operation::VMOp_JFROldObject) {
|
||||
// We will not be able to issue a rotation because the rotation lock
|
||||
// is held by the JFR Recorder Thread that issued the VM_Operation.
|
||||
log_warning(jfr, system)
|
||||
("The VM Thread crashed as part of emitting leak profiler events so no jfr emergency dump will be generated.");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
release_locks(thread);
|
||||
post_events(emit_old_object_samples, emit_event_shutdown, thread);
|
||||
|
||||
// if JavaThread, transition to _thread_in_native to issue a final flushpoint
|
||||
NoHandleMark nhm;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -39,7 +39,7 @@ class JfrEmergencyDump : AllStatic {
|
||||
static const char* chunk_path(const char* repository_path);
|
||||
static void on_vm_error(const char* repository_path);
|
||||
static void on_vm_error_report(outputStream* st, const char* repository_path);
|
||||
static void on_vm_shutdown(bool emit_old_object_samples, bool emit_event_shutdown);
|
||||
static void on_vm_shutdown(bool exception_handler, bool oom);
|
||||
};
|
||||
|
||||
#endif // SHARE_JFR_RECORDER_REPOSITORY_JFREMERGENCYDUMP_HPP
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2013, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -34,7 +34,8 @@
|
||||
(MSGBIT(MSG_START)) | \
|
||||
(MSGBIT(MSG_CLONE_IN_MEMORY)) | \
|
||||
(MSGBIT(MSG_VM_ERROR)) | \
|
||||
(MSGBIT(MSG_FLUSHPOINT)) \
|
||||
(MSGBIT(MSG_FLUSHPOINT)) | \
|
||||
(MSGBIT(MSG_EMIT_LEAKP_REFCHAINS)) \
|
||||
)
|
||||
|
||||
static JfrPostBox* _instance = nullptr;
|
||||
@ -165,7 +166,7 @@ void JfrPostBox::notify_waiters() {
|
||||
assert(JfrMsg_lock->owned_by_self(), "incrementing _msg_handled_serial is protected by JfrMsg_lock.");
|
||||
// Update made visible on release of JfrMsg_lock via fence instruction in Monitor::IUnlock.
|
||||
++_msg_handled_serial;
|
||||
JfrMsg_lock->notify();
|
||||
JfrMsg_lock->notify_all();
|
||||
}
|
||||
|
||||
// safeguard to ensure no threads are left waiting
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -43,6 +43,7 @@ enum JFR_Msg {
|
||||
MSG_SHUTDOWN,
|
||||
MSG_VM_ERROR,
|
||||
MSG_FLUSHPOINT,
|
||||
MSG_EMIT_LEAKP_REFCHAINS,
|
||||
MSG_NO_OF_MSGS
|
||||
};
|
||||
|
||||
@ -51,23 +52,25 @@ enum JFR_Msg {
|
||||
*
|
||||
* Synchronous messages (posting thread waits for message completion):
|
||||
*
|
||||
* MSG_CLONE_IN_MEMORY (0) ; MSGBIT(MSG_CLONE_IN_MEMORY) == (1 << 0) == 0x1
|
||||
* MSG_START(1) ; MSGBIT(MSG_START) == (1 << 0x1) == 0x2
|
||||
* MSG_STOP (2) ; MSGBIT(MSG_STOP) == (1 << 0x2) == 0x4
|
||||
* MSG_ROTATE (3) ; MSGBIT(MSG_ROTATE) == (1 << 0x3) == 0x8
|
||||
* MSG_VM_ERROR (8) ; MSGBIT(MSG_VM_ERROR) == (1 << 0x8) == 0x100
|
||||
* MSG_FLUSHPOINT (9) ; MSGBIT(MSG_FLUSHPOINT) == (1 << 0x9) == 0x200
|
||||
* MSG_CLONE_IN_MEMORY (0) ; MSGBIT(MSG_CLONE_IN_MEMORY) == (1 << 0) == 0x1
|
||||
* MSG_START(1) ; MSGBIT(MSG_START) == (1 << 0x1) == 0x2
|
||||
* MSG_STOP (2) ; MSGBIT(MSG_STOP) == (1 << 0x2) == 0x4
|
||||
* MSG_ROTATE (3) ; MSGBIT(MSG_ROTATE) == (1 << 0x3) == 0x8
|
||||
* MSG_VM_ERROR (8) ; MSGBIT(MSG_VM_ERROR) == (1 << 0x8) == 0x100
|
||||
* MSG_FLUSHPOINT (9) ; MSGBIT(MSG_FLUSHPOINT) == (1 << 0x9) == 0x200
|
||||
* MSG_EMIT_LEAKP_REFCHAINS (10); MSGBIT(MSG_EMIT_LEAKP_REFCHAINS) == (1 << 0xa) == 0x400
|
||||
*
|
||||
* Asynchronous messages (posting thread returns immediately upon deposit):
|
||||
*
|
||||
* MSG_FULLBUFFER (4) ; MSGBIT(MSG_FULLBUFFER) == (1 << 0x4) == 0x10
|
||||
* MSG_CHECKPOINT (5) ; MSGBIT(CHECKPOINT) == (1 << 0x5) == 0x20
|
||||
* MSG_WAKEUP (6) ; MSGBIT(WAKEUP) == (1 << 0x6) == 0x40
|
||||
* MSG_SHUTDOWN (7) ; MSGBIT(MSG_SHUTDOWN) == (1 << 0x7) == 0x80
|
||||
* MSG_FULLBUFFER (4) ; MSGBIT(MSG_FULLBUFFER) == (1 << 0x4) == 0x10
|
||||
* MSG_CHECKPOINT (5) ; MSGBIT(CHECKPOINT) == (1 << 0x5) == 0x20
|
||||
* MSG_WAKEUP (6) ; MSGBIT(WAKEUP) == (1 << 0x6) == 0x40
|
||||
* MSG_SHUTDOWN (7) ; MSGBIT(MSG_SHUTDOWN) == (1 << 0x7) == 0x80
|
||||
*/
|
||||
|
||||
class JfrPostBox : public JfrCHeapObj {
|
||||
friend class JfrRecorder;
|
||||
friend class JfrRecorderService;
|
||||
public:
|
||||
void post(JFR_Msg msg);
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -55,6 +55,7 @@
|
||||
#include "runtime/safepoint.hpp"
|
||||
#include "runtime/vmOperations.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
|
||||
// incremented on each flushpoint
|
||||
static u8 flushpoint_id = 0;
|
||||
@ -391,6 +392,7 @@ class JfrSafepointWriteVMOperation : public VM_Operation {
|
||||
JfrRecorderService::JfrRecorderService() :
|
||||
_checkpoint_manager(JfrCheckpointManager::instance()),
|
||||
_chunkwriter(JfrRepository::chunkwriter()),
|
||||
_post_box(JfrPostBox::instance()),
|
||||
_repository(JfrRepository::instance()),
|
||||
_stack_trace_repository(JfrStackTraceRepository::instance()),
|
||||
_storage(JfrStorage::instance()),
|
||||
@ -670,17 +672,173 @@ void JfrRecorderService::evaluate_chunk_size_for_rotation() {
|
||||
JfrChunkRotation::evaluate(_chunkwriter);
|
||||
}
|
||||
|
||||
void JfrRecorderService::emit_leakprofiler_events(int64_t cutoff_ticks, bool emit_all, bool skip_bfs) {
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(JavaThread::current()));
|
||||
// Take the rotation lock to exclude flush() during event emits. This is because event emit
|
||||
// also creates a number checkpoint events. Those checkpoint events require a future typeset checkpoint
|
||||
// event for completeness, i.e. to be generated before being flushed to a segment.
|
||||
// The upcoming flush() or rotation() after event emit completes this typeset checkpoint
|
||||
// and serializes all event emit checkpoint events to the same segment.
|
||||
JfrRotationLock lock;
|
||||
// Take the rotation lock before the transition.
|
||||
JavaThread* current_thread = JavaThread::current();
|
||||
MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, current_thread));
|
||||
ThreadInVMfromNative transition(current_thread);
|
||||
LeakProfiler::emit_events(cutoff_ticks, emit_all, skip_bfs);
|
||||
// LeakProfiler event serialization support.
|
||||
|
||||
struct JfrLeakProfilerEmitRequest {
|
||||
int64_t cutoff_ticks;
|
||||
bool emit_all;
|
||||
bool skip_bfs;
|
||||
bool oom;
|
||||
};
|
||||
|
||||
typedef GrowableArrayCHeap<JfrLeakProfilerEmitRequest, mtTracing> JfrLeakProfilerEmitRequestQueue;
|
||||
static JfrLeakProfilerEmitRequestQueue* _queue = nullptr;
|
||||
constexpr const static int64_t _no_path_to_gc_roots = 0;
|
||||
static bool _oom_emit_request_posted = false;
|
||||
static bool _oom_emit_request_delivered = false;
|
||||
|
||||
static inline bool exclude_paths_to_gc_roots(int64_t cutoff_ticks) {
|
||||
return cutoff_ticks <= _no_path_to_gc_roots;
|
||||
}
|
||||
|
||||
static void enqueue(const JfrLeakProfilerEmitRequest& request) {
|
||||
assert(JfrRotationLock::is_owner(), "invariant");
|
||||
if (_queue == nullptr) {
|
||||
_queue = new JfrLeakProfilerEmitRequestQueue(4);
|
||||
}
|
||||
assert(_queue != nullptr, "invariant");
|
||||
assert(!_oom_emit_request_posted, "invariant");
|
||||
if (request.oom) {
|
||||
_oom_emit_request_posted = true;
|
||||
}
|
||||
_queue->append(request);
|
||||
}
|
||||
|
||||
static JfrLeakProfilerEmitRequest dequeue() {
|
||||
assert(JfrRotationLock::is_owner(), "invariant");
|
||||
assert(_queue != nullptr, "invariant");
|
||||
assert(_queue->is_nonempty(), "invariant");
|
||||
const JfrLeakProfilerEmitRequest& request = _queue->first();
|
||||
_queue->remove_at(0);
|
||||
return request;
|
||||
}
|
||||
|
||||
// This version of emit excludes path-to-gc-roots, i.e. it skips reference chains.
|
||||
static void emit_leakprofiler_events(bool emit_all, bool skip_bfs, JavaThread* jt) {
|
||||
assert(jt != nullptr, "invariant");
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(jt));
|
||||
// Take the rotation lock to exclude flush() during event emits. This is because the event emit operation
|
||||
// also creates a number of checkpoint events. Those checkpoint events require a future typeset checkpoint
|
||||
// event for completeness, i.e., to be generated before being flushed to a segment.
|
||||
// The upcoming flush() or rotation() after event emit completes this typeset checkpoint
|
||||
// and serializes all checkpoint events to the same segment.
|
||||
JfrRotationLock lock;
|
||||
// Take the rotation lock before the thread transition, to avoid blocking safepoints.
|
||||
if (_oom_emit_request_posted) {
|
||||
// A request to emit leakprofiler events in response to CrashOnOutOfMemoryError
|
||||
// is pending or has already been completed. We are about to crash at any time now.
|
||||
assert(CrashOnOutOfMemoryError, "invariant");
|
||||
return;
|
||||
}
|
||||
MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, jt));
|
||||
ThreadInVMfromNative transition(jt);
|
||||
// Since we are not requesting path-to-gc-roots, i.e., reference chains, we need not issue a VM_Operation.
|
||||
// Therefore, we can let the requesting thread process the request directly, since it already holds the requisite lock.
|
||||
LeakProfiler::emit_events(_no_path_to_gc_roots, emit_all, skip_bfs);
|
||||
}
|
||||
|
||||
void JfrRecorderService::transition_and_post_leakprofiler_emit_msg(JavaThread* jt) {
|
||||
assert(jt != nullptr, "invariant");
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(jt);)
|
||||
assert(!JfrRotationLock::is_owner(), "invariant");
|
||||
// Transition to _thread_in_VM and post a synchronous message to the JFR Recorder Thread
|
||||
// for it to process our enqueued request, which includes paths-to-gc-roots, i.e., reference chains.
|
||||
MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, jt));
|
||||
ThreadInVMfromNative transition(jt);
|
||||
_post_box.post(MSG_EMIT_LEAKP_REFCHAINS);
|
||||
}
|
||||
|
||||
// This version of emit includes path-to-gc-roots, i.e., it includes in the request traversing of reference chains.
|
||||
// Traversing reference chains is performed as part of a VM_Operation, and we initiate it from the JFR Recorder Thread.
|
||||
// Because multiple threads can concurrently report_on_java_out_of_memory(), having them all post a synchronous JFR msg,
|
||||
// they rendezvous at a safepoint in a convenient state, ThreadBlockInVM. This mechanism prevents any thread from racing past
|
||||
// this point and begin executing VMError::report_and_die(), until at least one oom request has been delivered.
|
||||
void JfrRecorderService::emit_leakprofiler_events_paths_to_gc_roots(int64_t cutoff_ticks,
|
||||
bool emit_all,
|
||||
bool skip_bfs,
|
||||
bool oom,
|
||||
JavaThread* jt) {
|
||||
assert(jt != nullptr, "invariant");
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(jt);)
|
||||
assert(!exclude_paths_to_gc_roots(cutoff_ticks), "invariant");
|
||||
|
||||
{
|
||||
JfrRotationLock lock;
|
||||
// Take the rotation lock to read and post a request for the JFR Recorder Thread.
|
||||
if (_oom_emit_request_posted) {
|
||||
if (!oom) {
|
||||
// A request to emit leakprofiler events in response to CrashOnOutOfMemoryError
|
||||
// is pending or has already been completed. We are about to crash at any time now.
|
||||
assert(CrashOnOutOfMemoryError, "invariant");
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
assert(!_oom_emit_request_posted, "invariant");
|
||||
JfrLeakProfilerEmitRequest request = { cutoff_ticks, emit_all, skip_bfs, oom };
|
||||
enqueue(request);
|
||||
}
|
||||
}
|
||||
JfrRecorderService service;
|
||||
service.transition_and_post_leakprofiler_emit_msg(jt);
|
||||
}
|
||||
|
||||
// Leakprofiler serialization request, the jdk.jfr.internal.JVM.emitOldObjectSamples() Java entry point.
|
||||
void JfrRecorderService::emit_leakprofiler_events(int64_t cutoff_ticks,
|
||||
bool emit_all,
|
||||
bool skip_bfs) {
|
||||
JavaThread* const jt = JavaThread::current();
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(jt);)
|
||||
if (exclude_paths_to_gc_roots(cutoff_ticks)) {
|
||||
::emit_leakprofiler_events(emit_all, skip_bfs, jt);
|
||||
return;
|
||||
}
|
||||
emit_leakprofiler_events_paths_to_gc_roots(cutoff_ticks, emit_all, skip_bfs, /* oom */ false, jt);
|
||||
}
|
||||
|
||||
// Leakprofiler serialization request, the report_on_java_out_of_memory VM entry point.
|
||||
void JfrRecorderService::emit_leakprofiler_events_on_oom() {
|
||||
assert(CrashOnOutOfMemoryError, "invariant");
|
||||
if (EventOldObjectSample::is_enabled()) {
|
||||
JavaThread* const jt = JavaThread::current();
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(jt);)
|
||||
ThreadToNativeFromVM transition(jt);
|
||||
emit_leakprofiler_events_paths_to_gc_roots(max_jlong, false, false, /* oom */ true, jt);
|
||||
}
|
||||
}
|
||||
|
||||
// The worker routine for the JFR Recorder Thread when processing MSG_EMIT_LEAKP_REFCHAINS messages.
|
||||
void JfrRecorderService::emit_leakprofiler_events() {
|
||||
JavaThread* const jt = JavaThread::current();
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(jt));
|
||||
// Take the rotation lock before the transition.
|
||||
JfrRotationLock lock;
|
||||
if (_oom_emit_request_delivered) {
|
||||
// A request to emit leakprofiler events in response to CrashOnOutOfMemoryError
|
||||
// has already been completed. We are about to crash at any time now.
|
||||
assert(_oom_emit_request_posted, "invariant");
|
||||
assert(CrashOnOutOfMemoryError, "invariant");
|
||||
return;
|
||||
}
|
||||
|
||||
assert(_queue->is_nonempty(), "invariant");
|
||||
|
||||
{
|
||||
MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, jt));
|
||||
ThreadInVMfromNative transition(jt);
|
||||
while (_queue->is_nonempty()) {
|
||||
const JfrLeakProfilerEmitRequest& request = dequeue();
|
||||
LeakProfiler::emit_events(request.cutoff_ticks, request.emit_all, request.skip_bfs);
|
||||
if (_oom_emit_request_posted && request.oom) {
|
||||
assert(CrashOnOutOfMemoryError, "invariant");
|
||||
_oom_emit_request_delivered = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If processing involved an out-of-memory request, issue an immediate flush operation.
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(jt));
|
||||
if (_chunkwriter.is_valid() && _oom_emit_request_delivered) {
|
||||
invoke_flush();
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,19 +27,23 @@
|
||||
|
||||
#include "jfr/utilities/jfrAllocation.hpp"
|
||||
|
||||
class JavaThread;
|
||||
class JfrCheckpointManager;
|
||||
class JfrChunkWriter;
|
||||
class JfrPostBox;
|
||||
class JfrRepository;
|
||||
class JfrStackTraceRepository;
|
||||
class JfrStorage;
|
||||
class JfrStringPool;
|
||||
|
||||
class JfrRecorderService : public StackObj {
|
||||
friend class Jfr;
|
||||
friend class JfrSafepointClearVMOperation;
|
||||
friend class JfrSafepointWriteVMOperation;
|
||||
private:
|
||||
JfrCheckpointManager& _checkpoint_manager;
|
||||
JfrChunkWriter& _chunkwriter;
|
||||
JfrPostBox& _post_box;
|
||||
JfrRepository& _repository;
|
||||
JfrStackTraceRepository& _stack_trace_repository;
|
||||
JfrStorage& _storage;
|
||||
@ -64,6 +68,14 @@ class JfrRecorderService : public StackObj {
|
||||
void invoke_safepoint_write();
|
||||
void post_safepoint_write();
|
||||
|
||||
void transition_and_post_leakprofiler_emit_msg(JavaThread* jt);
|
||||
|
||||
static void emit_leakprofiler_events_on_oom();
|
||||
static void emit_leakprofiler_events_paths_to_gc_roots(int64_t cutoff_ticks,
|
||||
bool emit_all,
|
||||
bool skip_bfs,
|
||||
bool oom,
|
||||
JavaThread* jt);
|
||||
public:
|
||||
JfrRecorderService();
|
||||
void start();
|
||||
@ -72,8 +84,12 @@ class JfrRecorderService : public StackObj {
|
||||
void flushpoint();
|
||||
void process_full_buffers();
|
||||
void evaluate_chunk_size_for_rotation();
|
||||
void emit_leakprofiler_events(int64_t cutoff_ticks, bool emit_all, bool skip_bfs);
|
||||
void emit_leakprofiler_events();
|
||||
|
||||
static bool is_recording();
|
||||
static void emit_leakprofiler_events(int64_t cutoff_ticks,
|
||||
bool emit_all,
|
||||
bool skip_bfs);
|
||||
};
|
||||
|
||||
#endif // SHARE_JFR_RECORDER_SERVICE_JFRRECORDERSERVICE_HPP
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -44,6 +44,7 @@ void recorderthread_entry(JavaThread* thread, JavaThread* unused) {
|
||||
#define ROTATE (msgs & (MSGBIT(MSG_ROTATE)|MSGBIT(MSG_STOP)))
|
||||
#define FLUSHPOINT (msgs & (MSGBIT(MSG_FLUSHPOINT)))
|
||||
#define PROCESS_FULL_BUFFERS (msgs & (MSGBIT(MSG_ROTATE)|MSGBIT(MSG_STOP)|MSGBIT(MSG_FULLBUFFER)))
|
||||
#define LEAKPROFILER_REFCHAINS (msgs & MSGBIT(MSG_EMIT_LEAKP_REFCHAINS))
|
||||
|
||||
JfrPostBox& post_box = JfrRecorderThreadEntry::post_box();
|
||||
log_debug(jfr, system)("Recorder thread STARTED");
|
||||
@ -70,6 +71,9 @@ void recorderthread_entry(JavaThread* thread, JavaThread* unused) {
|
||||
if (PROCESS_FULL_BUFFERS) {
|
||||
service.process_full_buffers();
|
||||
}
|
||||
if (LEAKPROFILER_REFCHAINS) {
|
||||
service.emit_leakprofiler_events();
|
||||
}
|
||||
// Check amount of data written to chunk already
|
||||
// if it warrants asking for a new chunk.
|
||||
service.evaluate_chunk_size_for_rotation();
|
||||
@ -98,5 +102,5 @@ void recorderthread_entry(JavaThread* thread, JavaThread* unused) {
|
||||
#undef ROTATE
|
||||
#undef FLUSHPOINT
|
||||
#undef PROCESS_FULL_BUFFERS
|
||||
#undef SCAVENGE
|
||||
#undef LEAKPROFILER_REFCHAINS
|
||||
}
|
||||
|
||||
@ -125,20 +125,6 @@ static traceid get_source(const InstanceKlass* ik, JavaThread* jt) {
|
||||
return source_id;
|
||||
}
|
||||
|
||||
static traceid get_source(const AOTClassLocation* cl, JavaThread* jt) {
|
||||
assert(cl != nullptr, "invariant");
|
||||
assert(!cl->is_modules_image(), "invariant");
|
||||
const char* const path = cl->path();
|
||||
assert(path != nullptr, "invariant");
|
||||
size_t len = strlen(path);
|
||||
const char* file_type = cl->file_type_string();
|
||||
assert(file_type != nullptr, "invariant");
|
||||
len += strlen(file_type) + 3; // ":/" + null
|
||||
char* const url = NEW_RESOURCE_ARRAY_IN_THREAD(jt, char, len);
|
||||
jio_snprintf(url, len, "%s%s%s", file_type, ":/", path);
|
||||
return JfrSymbolTable::add(url);
|
||||
}
|
||||
|
||||
static inline void send_event(const InstanceKlass* ik, traceid source_id) {
|
||||
EventClassDefine event;
|
||||
event.set_definedClass(ik);
|
||||
@ -172,6 +158,20 @@ void JfrClassDefineEvent::on_creation(const InstanceKlass* ik, const ClassFilePa
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS
|
||||
static traceid get_source(const AOTClassLocation* cl, JavaThread* jt) {
|
||||
assert(cl != nullptr, "invariant");
|
||||
assert(!cl->is_modules_image(), "invariant");
|
||||
const char* const path = cl->path();
|
||||
assert(path != nullptr, "invariant");
|
||||
size_t len = strlen(path);
|
||||
const char* file_type = cl->file_type_string();
|
||||
assert(file_type != nullptr, "invariant");
|
||||
len += strlen(file_type) + 3; // ":/" + null
|
||||
char* const url = NEW_RESOURCE_ARRAY_IN_THREAD(jt, char, len);
|
||||
jio_snprintf(url, len, "%s%s%s", file_type, ":/", path);
|
||||
return JfrSymbolTable::add(url);
|
||||
}
|
||||
|
||||
void JfrClassDefineEvent::on_restoration(const InstanceKlass* ik, JavaThread* jt) {
|
||||
assert(ik != nullptr, "invariant");
|
||||
assert(ik->trace_id() != 0, "invariant");
|
||||
|
||||
@ -312,12 +312,11 @@ InstanceKlass* InstanceKlass::nest_host(TRAPS) {
|
||||
ss.print("Nest host resolution of %s with host %s failed: ",
|
||||
this->external_name(), target_host_class);
|
||||
java_lang_Throwable::print(PENDING_EXCEPTION, &ss);
|
||||
const char* msg = ss.as_string(true /* on C-heap */);
|
||||
constantPoolHandle cph(THREAD, constants());
|
||||
SystemDictionary::add_nest_host_error(cph, _nest_host_index, msg);
|
||||
SystemDictionary::add_nest_host_error(cph, _nest_host_index, ss);
|
||||
CLEAR_PENDING_EXCEPTION;
|
||||
|
||||
log_trace(class, nestmates)("%s", msg);
|
||||
log_trace(class, nestmates)("%s", ss.base());
|
||||
} else {
|
||||
// A valid nest-host is an instance class in the current package that lists this
|
||||
// class as a nest member. If any of these conditions are not met the class is
|
||||
@ -356,10 +355,9 @@ InstanceKlass* InstanceKlass::nest_host(TRAPS) {
|
||||
k->external_name(),
|
||||
k->class_loader_data()->loader_name_and_id(),
|
||||
error);
|
||||
const char* msg = ss.as_string(true /* on C-heap */);
|
||||
constantPoolHandle cph(THREAD, constants());
|
||||
SystemDictionary::add_nest_host_error(cph, _nest_host_index, msg);
|
||||
log_trace(class, nestmates)("%s", msg);
|
||||
SystemDictionary::add_nest_host_error(cph, _nest_host_index, ss);
|
||||
log_trace(class, nestmates)("%s", ss.base());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
||||
@ -1598,8 +1598,10 @@ static Node* fold_subI_no_underflow_pattern(Node* n, PhaseGVN* phase) {
|
||||
Node* x = add2->in(1);
|
||||
Node* con2 = add2->in(2);
|
||||
if (is_sub_con(con2)) {
|
||||
// The graph could be dying (i.e. x is top) in which case type(x) is not a long.
|
||||
const TypeLong* x_long = phase->type(x)->isa_long();
|
||||
// Collapsed graph not equivalent if potential over/underflow -> bailing out (*)
|
||||
if (can_overflow(phase->type(x)->is_long(), con1->get_long() + con2->get_long())) {
|
||||
if (x_long == nullptr || can_overflow(x_long, con1->get_long() + con2->get_long())) {
|
||||
return nullptr;
|
||||
}
|
||||
Node* new_con = phase->transform(new AddLNode(con1, con2));
|
||||
|
||||
@ -246,6 +246,13 @@ public:
|
||||
|
||||
// Do not match base-ptr edge
|
||||
virtual uint match_edge(uint idx) const;
|
||||
|
||||
#ifdef ASSERT
|
||||
bool address_input_has_same_base() const {
|
||||
Node *addp = in(Address);
|
||||
return !addp->is_AddP() || addp->in(Base)->is_top() || addp->in(Base) == in(Base);
|
||||
}
|
||||
#endif
|
||||
};
|
||||
|
||||
//------------------------------OrINode----------------------------------------
|
||||
|
||||
@ -675,9 +675,6 @@
|
||||
product(bool, PrintIntrinsics, false, DIAGNOSTIC, \
|
||||
"prints attempted and successful inlining of intrinsics") \
|
||||
\
|
||||
develop(bool, VerifyIntrinsicChecks, false, \
|
||||
"Verify in intrinsic that Java level checks work as expected") \
|
||||
\
|
||||
develop(bool, StressReflectiveCode, false, \
|
||||
"Use inexact types at allocations, etc., to test reflection") \
|
||||
\
|
||||
@ -697,7 +694,8 @@
|
||||
"Print progress during Iterative Global Value Numbering") \
|
||||
\
|
||||
develop(uint, VerifyIterativeGVN, 0, \
|
||||
"Verify Iterative Global Value Numbering =DCBA, with:" \
|
||||
"Verify Iterative Global Value Numbering =EDCBA, with:" \
|
||||
" E: verify node specific invariants" \
|
||||
" D: verify Node::Identity did not miss opportunities" \
|
||||
" C: verify Node::Ideal did not miss opportunities" \
|
||||
" B: verify that type(n) == n->Value() after IGVN" \
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -418,7 +418,8 @@ bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms)
|
||||
C->inline_printer()->record(cg->method(), call_node()->jvms(), InliningResult::FAILURE,
|
||||
"late method handle call resolution");
|
||||
}
|
||||
assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline || StressIncrementalInlining, "we're doing late inlining");
|
||||
assert(!cg->is_late_inline() || cg->is_mh_late_inline() || cg->is_virtual_late_inline() ||
|
||||
AlwaysIncrementalInline || StressIncrementalInlining, "we're doing late inlining");
|
||||
_inline_cg = cg;
|
||||
return true;
|
||||
} else {
|
||||
|
||||
@ -22,7 +22,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "castnode.hpp"
|
||||
#include "opto/addnode.hpp"
|
||||
#include "opto/callnode.hpp"
|
||||
#include "opto/castnode.hpp"
|
||||
@ -35,12 +34,22 @@
|
||||
#include "opto/type.hpp"
|
||||
#include "utilities/checkedCast.hpp"
|
||||
|
||||
const ConstraintCastNode::DependencyType ConstraintCastNode::DependencyType::FloatingNarrowing(true, true, "floating narrowing dependency"); // not pinned, narrows type
|
||||
const ConstraintCastNode::DependencyType ConstraintCastNode::DependencyType::FloatingNonNarrowing(true, false, "floating non-narrowing dependency"); // not pinned, doesn't narrow type
|
||||
const ConstraintCastNode::DependencyType ConstraintCastNode::DependencyType::NonFloatingNarrowing(false, true, "non-floating narrowing dependency"); // pinned, narrows type
|
||||
const ConstraintCastNode::DependencyType ConstraintCastNode::DependencyType::NonFloatingNonNarrowing(false, false, "non-floating non-narrowing dependency"); // pinned, doesn't narrow type
|
||||
|
||||
//=============================================================================
|
||||
// If input is already higher or equal to cast type, then this is an identity.
|
||||
Node* ConstraintCastNode::Identity(PhaseGVN* phase) {
|
||||
if (_dependency == UnconditionalDependency) {
|
||||
if (!_dependency.narrows_type()) {
|
||||
// If this cast doesn't carry a type dependency (i.e. not used for type narrowing), we cannot optimize it.
|
||||
return this;
|
||||
}
|
||||
|
||||
// This cast node carries a type dependency. We can remove it if:
|
||||
// - Its input has a narrower type
|
||||
// - There's a dominating cast with same input but narrower type
|
||||
Node* dom = dominating_cast(phase, phase);
|
||||
if (dom != nullptr) {
|
||||
return dom;
|
||||
@ -109,7 +118,7 @@ Node* ConstraintCastNode::Ideal(PhaseGVN* phase, bool can_reshape) {
|
||||
}
|
||||
|
||||
uint ConstraintCastNode::hash() const {
|
||||
return TypeNode::hash() + (int)_dependency + (_extra_types != nullptr ? _extra_types->hash() : 0);
|
||||
return TypeNode::hash() + _dependency.hash() + (_extra_types != nullptr ? _extra_types->hash() : 0);
|
||||
}
|
||||
|
||||
bool ConstraintCastNode::cmp(const Node &n) const {
|
||||
@ -117,7 +126,7 @@ bool ConstraintCastNode::cmp(const Node &n) const {
|
||||
return false;
|
||||
}
|
||||
ConstraintCastNode& cast = (ConstraintCastNode&) n;
|
||||
if (cast._dependency != _dependency) {
|
||||
if (!cast._dependency.cmp(_dependency)) {
|
||||
return false;
|
||||
}
|
||||
if (_extra_types == nullptr || cast._extra_types == nullptr) {
|
||||
@ -130,7 +139,7 @@ uint ConstraintCastNode::size_of() const {
|
||||
return sizeof(*this);
|
||||
}
|
||||
|
||||
Node* ConstraintCastNode::make_cast_for_basic_type(Node* c, Node* n, const Type* t, DependencyType dependency, BasicType bt) {
|
||||
Node* ConstraintCastNode::make_cast_for_basic_type(Node* c, Node* n, const Type* t, const DependencyType& dependency, BasicType bt) {
|
||||
switch(bt) {
|
||||
case T_INT:
|
||||
return new CastIINode(c, n, t, dependency);
|
||||
@ -143,9 +152,9 @@ Node* ConstraintCastNode::make_cast_for_basic_type(Node* c, Node* n, const Type*
|
||||
}
|
||||
|
||||
TypeNode* ConstraintCastNode::dominating_cast(PhaseGVN* gvn, PhaseTransform* pt) const {
|
||||
if (_dependency == UnconditionalDependency) {
|
||||
return nullptr;
|
||||
}
|
||||
// See discussion at definition of ConstraintCastNode::DependencyType: replacing this cast with a dominating one is
|
||||
// not safe if _dependency.narrows_type() is not true.
|
||||
assert(_dependency.narrows_type(), "cast can't be replaced by dominating one");
|
||||
Node* val = in(1);
|
||||
Node* ctl = in(0);
|
||||
int opc = Opcode();
|
||||
@ -205,30 +214,21 @@ void ConstraintCastNode::dump_spec(outputStream *st) const {
|
||||
st->print(" extra types: ");
|
||||
_extra_types->dump_on(st);
|
||||
}
|
||||
if (_dependency != RegularDependency) {
|
||||
st->print(" %s dependency", _dependency == StrongDependency ? "strong" : "unconditional");
|
||||
}
|
||||
st->print(" ");
|
||||
_dependency.dump_on(st);
|
||||
}
|
||||
#endif
|
||||
|
||||
const Type* CastIINode::Value(PhaseGVN* phase) const {
|
||||
const Type *res = ConstraintCastNode::Value(phase);
|
||||
if (res == Type::TOP) {
|
||||
return Type::TOP;
|
||||
}
|
||||
assert(res->isa_int(), "res must be int");
|
||||
|
||||
// Similar to ConvI2LNode::Value() for the same reasons
|
||||
// see if we can remove type assertion after loop opts
|
||||
res = widen_type(phase, res, T_INT);
|
||||
|
||||
return res;
|
||||
CastIINode* CastIINode::make_with(Node* parent, const TypeInteger* type, const DependencyType& dependency) const {
|
||||
return new CastIINode(in(0), parent, type, dependency, _range_check_dependency, _extra_types);
|
||||
}
|
||||
|
||||
Node* ConstraintCastNode::find_or_make_integer_cast(PhaseIterGVN* igvn, Node* parent, const TypeInteger* type) const {
|
||||
Node* n = clone();
|
||||
n->set_req(1, parent);
|
||||
n->as_ConstraintCast()->set_type(type);
|
||||
CastLLNode* CastLLNode::make_with(Node* parent, const TypeInteger* type, const DependencyType& dependency) const {
|
||||
return new CastLLNode(in(0), parent, type, dependency, _extra_types);
|
||||
}
|
||||
|
||||
Node* ConstraintCastNode::find_or_make_integer_cast(PhaseIterGVN* igvn, Node* parent, const TypeInteger* type, const DependencyType& dependency) const {
|
||||
Node* n = make_with(parent, type, dependency);
|
||||
Node* existing = igvn->hash_find_insert(n);
|
||||
if (existing != nullptr) {
|
||||
n->destruct(igvn);
|
||||
@ -242,14 +242,13 @@ Node *CastIINode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
if (progress != nullptr) {
|
||||
return progress;
|
||||
}
|
||||
if (can_reshape && !phase->C->post_loop_opts_phase()) {
|
||||
// makes sure we run ::Value to potentially remove type assertion after loop opts
|
||||
if (!phase->C->post_loop_opts_phase()) {
|
||||
// makes sure we run widen_type() to potentially common type assertions after loop opts
|
||||
phase->C->record_for_post_loop_opts_igvn(this);
|
||||
}
|
||||
if (!_range_check_dependency || phase->C->post_loop_opts_phase()) {
|
||||
return optimize_integer_cast(phase, T_INT);
|
||||
}
|
||||
phase->C->record_for_post_loop_opts_igvn(this);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@ -279,9 +278,9 @@ void CastIINode::dump_spec(outputStream* st) const {
|
||||
#endif
|
||||
|
||||
CastIINode* CastIINode::pin_array_access_node() const {
|
||||
assert(_dependency == RegularDependency, "already pinned");
|
||||
assert(_dependency.is_floating(), "already pinned");
|
||||
if (has_range_check()) {
|
||||
return new CastIINode(in(0), in(1), bottom_type(), StrongDependency, has_range_check());
|
||||
return new CastIINode(in(0), in(1), bottom_type(), _dependency.with_pinned_dependency(), has_range_check());
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
@ -315,16 +314,6 @@ void CastIINode::remove_range_check_cast(Compile* C) {
|
||||
}
|
||||
|
||||
|
||||
const Type* CastLLNode::Value(PhaseGVN* phase) const {
|
||||
const Type* res = ConstraintCastNode::Value(phase);
|
||||
if (res == Type::TOP) {
|
||||
return Type::TOP;
|
||||
}
|
||||
assert(res->isa_long(), "res must be long");
|
||||
|
||||
return widen_type(phase, res, T_LONG);
|
||||
}
|
||||
|
||||
bool CastLLNode::is_inner_loop_backedge(ProjNode* proj) {
|
||||
if (proj != nullptr) {
|
||||
Node* ctrl_use = proj->unique_ctrl_out_or_null();
|
||||
@ -392,7 +381,7 @@ Node* CastLLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
|
||||
return progress;
|
||||
}
|
||||
if (!phase->C->post_loop_opts_phase()) {
|
||||
// makes sure we run ::Value to potentially remove type assertion after loop opts
|
||||
// makes sure we run widen_type() to potentially common type assertions after loop opts
|
||||
phase->C->record_for_post_loop_opts_igvn(this);
|
||||
}
|
||||
// transform (CastLL (ConvI2L ..)) into (ConvI2L (CastII ..)) if the type of the CastLL is narrower than the type of
|
||||
@ -543,7 +532,7 @@ Node* CastP2XNode::Identity(PhaseGVN* phase) {
|
||||
return this;
|
||||
}
|
||||
|
||||
Node* ConstraintCastNode::make_cast_for_type(Node* c, Node* in, const Type* type, DependencyType dependency,
|
||||
Node* ConstraintCastNode::make_cast_for_type(Node* c, Node* in, const Type* type, const DependencyType& dependency,
|
||||
const TypeTuple* types) {
|
||||
if (type->isa_int()) {
|
||||
return new CastIINode(c, in, type, dependency, false, types);
|
||||
@ -564,7 +553,7 @@ Node* ConstraintCastNode::make_cast_for_type(Node* c, Node* in, const Type* type
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Node* ConstraintCastNode::optimize_integer_cast(PhaseGVN* phase, BasicType bt) {
|
||||
Node* ConstraintCastNode::optimize_integer_cast_of_add(PhaseGVN* phase, BasicType bt) {
|
||||
PhaseIterGVN *igvn = phase->is_IterGVN();
|
||||
const TypeInteger* this_type = this->type()->isa_integer(bt);
|
||||
if (this_type == nullptr) {
|
||||
@ -586,8 +575,42 @@ Node* ConstraintCastNode::optimize_integer_cast(PhaseGVN* phase, BasicType bt) {
|
||||
Node* x = z->in(1);
|
||||
Node* y = z->in(2);
|
||||
|
||||
Node* cx = find_or_make_integer_cast(igvn, x, rx);
|
||||
Node* cy = find_or_make_integer_cast(igvn, y, ry);
|
||||
const TypeInteger* tx = phase->type(x)->is_integer(bt);
|
||||
const TypeInteger* ty = phase->type(y)->is_integer(bt);
|
||||
|
||||
// (Cast (Add x y) tz) is transformed into (Add (Cast x rx) (Cast y ry))
|
||||
//
|
||||
// tz = [tzlo, tzhi]
|
||||
// rx = [rxlo, rxhi]
|
||||
// ry = [rylo, ryhi]
|
||||
// with type of x, tx = [txlo, txhi]
|
||||
// with type of y, ty = [tylo, tyhi]
|
||||
//
|
||||
// From Compile::push_thru_add():
|
||||
// rxlo = max(tzlo - tyhi, txlo)
|
||||
// rxhi = min(tzhi - tylo, txhi)
|
||||
// rylo = max(tzlo - txhi, tylo)
|
||||
// ryhi = min(tzhi - txlo, tyhi)
|
||||
//
|
||||
// If x is a constant, then txlo = txhi
|
||||
// rxlo = txlo, rxhi = txhi
|
||||
// The bounds of the type of the Add after transformation then is:
|
||||
// rxlo + rylo >= txlo + tzlo - txhi >= tzlo
|
||||
// rxhi + ryhi <= txhi + tzhi - txlo <= tzhi
|
||||
// The resulting type is not wider than the type of the Cast
|
||||
// before transformation
|
||||
//
|
||||
// If neither x nor y are constant then the type of the resulting
|
||||
// Add can be wider than the type of the type of the Cast before
|
||||
// transformation.
|
||||
// For instance, tx = [0, 10], ty = [0, 10], tz = [0, 10]
|
||||
// then rx = [0, 10], ry = [0, 10]
|
||||
// and rx + ry = [0, 20] which is wider than tz
|
||||
//
|
||||
// Same reasoning applies to (Cast (Sub x y) tz)
|
||||
const DependencyType& dependency = (!tx->is_con() && !ty->is_con()) ? _dependency.with_non_narrowing() : _dependency;
|
||||
Node* cx = find_or_make_integer_cast(igvn, x, rx, dependency);
|
||||
Node* cy = find_or_make_integer_cast(igvn, y, ry, dependency);
|
||||
if (op == Op_Add(bt)) {
|
||||
return AddNode::make(cx, cy, bt);
|
||||
} else {
|
||||
@ -599,11 +622,26 @@ Node* ConstraintCastNode::optimize_integer_cast(PhaseGVN* phase, BasicType bt) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const Type* ConstraintCastNode::widen_type(const PhaseGVN* phase, const Type* res, BasicType bt) const {
|
||||
if (!phase->C->post_loop_opts_phase()) {
|
||||
Node* ConstraintCastNode::optimize_integer_cast(PhaseGVN* phase, BasicType bt) {
|
||||
Node* res = optimize_integer_cast_of_add(phase, bt);
|
||||
if (res != nullptr) {
|
||||
return res;
|
||||
}
|
||||
const Type* t = Value(phase);
|
||||
if (t != Type::TOP && phase->C->post_loop_opts_phase()) {
|
||||
const Type* bottom_t = bottom_type();
|
||||
const TypeInteger* wide_t = widen_type(phase, bottom_t, bt);
|
||||
if (wide_t != bottom_t) {
|
||||
// Widening the type of the Cast (to allow some commoning) causes the Cast to change how it can be optimized (if
|
||||
// type of its input is narrower than the Cast's type, we can't remove it to not loose the control dependency).
|
||||
return make_with(in(1), wide_t, _dependency.with_non_narrowing());
|
||||
}
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const TypeInteger* ConstraintCastNode::widen_type(const PhaseGVN* phase, const Type* res, BasicType bt) const {
|
||||
const TypeInteger* this_type = res->is_integer(bt);
|
||||
// At VerifyConstraintCasts == 1, we verify the ConstraintCastNodes that are present during code
|
||||
// emission. This allows us detecting possible mis-scheduling due to these nodes being pinned at
|
||||
// the wrong control nodes.
|
||||
@ -612,10 +650,9 @@ const Type* ConstraintCastNode::widen_type(const PhaseGVN* phase, const Type* re
|
||||
// mis-transformations that may happen due to these nodes being pinned at the wrong control
|
||||
// nodes.
|
||||
if (VerifyConstraintCasts > 1) {
|
||||
return res;
|
||||
return this_type;
|
||||
}
|
||||
|
||||
const TypeInteger* this_type = res->is_integer(bt);
|
||||
const TypeInteger* in_type = phase->type(in(1))->isa_integer(bt);
|
||||
if (in_type != nullptr &&
|
||||
(in_type->lo_as_long() != this_type->lo_as_long() ||
|
||||
@ -636,5 +673,5 @@ const Type* ConstraintCastNode::widen_type(const PhaseGVN* phase, const Type* re
|
||||
MIN2(in_type->hi_as_long(), hi1),
|
||||
MAX2((int)in_type->_widen, w1), bt);
|
||||
}
|
||||
return res;
|
||||
return this_type;
|
||||
}
|
||||
|
||||
@ -33,21 +33,119 @@
|
||||
// cast to a different range
|
||||
class ConstraintCastNode: public TypeNode {
|
||||
public:
|
||||
enum DependencyType {
|
||||
RegularDependency, // if cast doesn't improve input type, cast can be removed
|
||||
StrongDependency, // leave cast in even if _type doesn't improve input type, can be replaced by stricter dominating cast if one exist
|
||||
UnconditionalDependency // leave cast in unconditionally
|
||||
// Cast nodes are subject to a few optimizations:
|
||||
//
|
||||
// 1- if the type carried by the Cast doesn't narrow the type of its input, the cast can be replaced by its input.
|
||||
// Similarly, if a dominating Cast with the same input and a narrower type constraint is found, it can replace the
|
||||
// current cast.
|
||||
//
|
||||
// 2- if the condition that the Cast is control dependent is hoisted, the Cast is hoisted as well
|
||||
//
|
||||
// 1- and 2- are not always applied depending on what constraint are applied to the Cast: there are cases where 1-
|
||||
// and 2- apply, where neither 1- nor 2- apply and where one or the other apply. This class abstract away these
|
||||
// details.
|
||||
//
|
||||
// If _narrows_type is true, the cast carries a type dependency: "after" the control the cast is dependent on, its data
|
||||
// input is known to have a narrower type (stored in the cast node itself). Optimizations 1- above only apply to cast
|
||||
// nodes for which _narrows_type is true.
|
||||
// if _floating is true, the cast only depends on a single control: its control input. Otherwise, it is pinned at its
|
||||
// current location. Optimizations 2- only apply to cast nodes for which _floating is true.
|
||||
// _floating here is similar to Node::depends_only_on_test().
|
||||
// The 4 combinations of _narrows_types/_floating true/false have some use. See below, at the end of this class
|
||||
// definition, for examples.
|
||||
class DependencyType {
|
||||
private:
|
||||
const bool _floating; // Does this Cast depends on its control input or is it pinned?
|
||||
const bool _narrows_type; // Does this Cast narrows the type i.e. if input type is narrower can it be removed?
|
||||
const char* _desc;
|
||||
DependencyType(bool depends_on_test, bool narrows_type, const char* desc)
|
||||
: _floating(depends_on_test),
|
||||
_narrows_type(narrows_type),
|
||||
_desc(desc) {
|
||||
}
|
||||
NONCOPYABLE(DependencyType);
|
||||
|
||||
public:
|
||||
|
||||
bool is_floating() const {
|
||||
return _floating;
|
||||
}
|
||||
|
||||
bool narrows_type() const {
|
||||
return _narrows_type;
|
||||
}
|
||||
|
||||
void dump_on(outputStream *st) const {
|
||||
st->print("%s", _desc);
|
||||
}
|
||||
|
||||
uint hash() const {
|
||||
return (_floating ? 1 : 0) + (_narrows_type ? 2 : 0);
|
||||
}
|
||||
|
||||
bool cmp(const DependencyType& other) const {
|
||||
return _floating == other._floating && _narrows_type == other._narrows_type;
|
||||
}
|
||||
|
||||
const DependencyType& with_non_narrowing() const {
|
||||
if (_floating) {
|
||||
return FloatingNonNarrowing;
|
||||
}
|
||||
return NonFloatingNonNarrowing;
|
||||
}
|
||||
|
||||
const DependencyType& with_pinned_dependency() const {
|
||||
if (_narrows_type) {
|
||||
return NonFloatingNarrowing;
|
||||
}
|
||||
return NonFloatingNonNarrowing;
|
||||
}
|
||||
|
||||
// All the possible combinations of floating/narrowing with example use cases:
|
||||
|
||||
// Use case example: Range Check CastII
|
||||
// Floating: The Cast is only dependent on the single range check. If the range check was ever to be hoisted it
|
||||
// would be safe to let the Cast float to where the range check is hoisted up to.
|
||||
// Narrowing: The Cast narrows the type to a positive index. If the input to the Cast is narrower, we can safely
|
||||
// remove the cast because the array access will be safe.
|
||||
static const DependencyType FloatingNarrowing;
|
||||
// Use case example: Widening Cast nodes' types after loop opts: We want to common Casts with slightly different types.
|
||||
// Floating: These Casts only depend on the single control.
|
||||
// NonNarrowing: Even when the input type is narrower, we are not removing the Cast. Otherwise, the dependency
|
||||
// to the single control is lost, and an array access could float above its range check because we
|
||||
// just removed the dependency to the range check by removing the Cast. This could lead to an
|
||||
// out-of-bounds access.
|
||||
static const DependencyType FloatingNonNarrowing;
|
||||
// Use case example: An array accesses that is no longer dependent on a single range check (e.g. range check smearing).
|
||||
// NonFloating: The array access must be pinned below all the checks it depends on. If the check it directly depends
|
||||
// on with a control input is hoisted, we do not hoist the Cast as well. If we allowed the Cast to float,
|
||||
// we risk that the array access ends up above another check it depends on (we cannot model two control
|
||||
// dependencies for a node in the IR). This could lead to an out-of-bounds access.
|
||||
// Narrowing: If the Cast does not narrow the input type, then it's safe to remove the cast because the array access
|
||||
// will be safe.
|
||||
static const DependencyType NonFloatingNarrowing;
|
||||
// Use case example: Sinking nodes out of a loop
|
||||
// Non-Floating & Non-Narrowing: We don't want the Cast that forces the node to be out of loop to be removed in any
|
||||
// case. Otherwise, the sunk node could float back into the loop, undoing the sinking.
|
||||
// This Cast is only used for pinning without caring about narrowing types.
|
||||
static const DependencyType NonFloatingNonNarrowing;
|
||||
|
||||
};
|
||||
|
||||
protected:
|
||||
const DependencyType _dependency;
|
||||
protected:
|
||||
const DependencyType& _dependency;
|
||||
virtual bool cmp( const Node &n ) const;
|
||||
virtual uint size_of() const;
|
||||
virtual uint hash() const; // Check the type
|
||||
const Type* widen_type(const PhaseGVN* phase, const Type* res, BasicType bt) const;
|
||||
Node* find_or_make_integer_cast(PhaseIterGVN* igvn, Node* parent, const TypeInteger* type) const;
|
||||
const TypeInteger* widen_type(const PhaseGVN* phase, const Type* res, BasicType bt) const;
|
||||
|
||||
virtual ConstraintCastNode* make_with(Node* parent, const TypeInteger* type, const DependencyType& dependency) const {
|
||||
ShouldNotReachHere(); // Only implemented for CastII and CastLL
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Node* find_or_make_integer_cast(PhaseIterGVN* igvn, Node* parent, const TypeInteger* type, const DependencyType& dependency) const;
|
||||
|
||||
private:
|
||||
// PhiNode::Ideal() transforms a Phi that merges a single uncasted value into a single cast pinned at the region.
|
||||
// The types of cast nodes eliminated as a consequence of this transformation are collected and stored here so the
|
||||
// type dependencies carried by the cast are known. The cast can then be eliminated if the type of its input is
|
||||
@ -55,7 +153,7 @@ public:
|
||||
const TypeTuple* _extra_types;
|
||||
|
||||
public:
|
||||
ConstraintCastNode(Node* ctrl, Node* n, const Type* t, ConstraintCastNode::DependencyType dependency,
|
||||
ConstraintCastNode(Node* ctrl, Node* n, const Type* t, const DependencyType& dependency,
|
||||
const TypeTuple* extra_types)
|
||||
: TypeNode(t,2), _dependency(dependency), _extra_types(extra_types) {
|
||||
init_class_id(Class_ConstraintCast);
|
||||
@ -67,18 +165,21 @@ public:
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual int Opcode() const;
|
||||
virtual uint ideal_reg() const = 0;
|
||||
virtual bool depends_only_on_test() const { return _dependency == RegularDependency; }
|
||||
bool carry_dependency() const { return _dependency != RegularDependency; }
|
||||
bool carry_dependency() const { return !_dependency.cmp(DependencyType::FloatingNarrowing); }
|
||||
// A cast node depends_only_on_test if and only if it is floating
|
||||
virtual bool depends_only_on_test() const { return _dependency.is_floating(); }
|
||||
const DependencyType& dependency() const { return _dependency; }
|
||||
TypeNode* dominating_cast(PhaseGVN* gvn, PhaseTransform* pt) const;
|
||||
static Node* make_cast_for_basic_type(Node* c, Node* n, const Type* t, DependencyType dependency, BasicType bt);
|
||||
static Node* make_cast_for_basic_type(Node* c, Node* n, const Type* t, const DependencyType& dependency, BasicType bt);
|
||||
|
||||
#ifndef PRODUCT
|
||||
virtual void dump_spec(outputStream *st) const;
|
||||
#endif
|
||||
|
||||
static Node* make_cast_for_type(Node* c, Node* in, const Type* type, DependencyType dependency,
|
||||
static Node* make_cast_for_type(Node* c, Node* in, const Type* type, const DependencyType& dependency,
|
||||
const TypeTuple* types);
|
||||
|
||||
Node* optimize_integer_cast_of_add(PhaseGVN* phase, BasicType bt);
|
||||
Node* optimize_integer_cast(PhaseGVN* phase, BasicType bt);
|
||||
|
||||
bool higher_equal_types(PhaseGVN* phase, const Node* other) const;
|
||||
@ -102,7 +203,7 @@ class CastIINode: public ConstraintCastNode {
|
||||
virtual uint size_of() const;
|
||||
|
||||
public:
|
||||
CastIINode(Node* ctrl, Node* n, const Type* t, DependencyType dependency = RegularDependency, bool range_check_dependency = false, const TypeTuple* types = nullptr)
|
||||
CastIINode(Node* ctrl, Node* n, const Type* t, const DependencyType& dependency = DependencyType::FloatingNarrowing, bool range_check_dependency = false, const TypeTuple* types = nullptr)
|
||||
: ConstraintCastNode(ctrl, n, t, dependency, types), _range_check_dependency(range_check_dependency) {
|
||||
assert(ctrl != nullptr, "control must be set");
|
||||
init_class_id(Class_CastII);
|
||||
@ -110,7 +211,7 @@ class CastIINode: public ConstraintCastNode {
|
||||
virtual int Opcode() const;
|
||||
virtual uint ideal_reg() const { return Op_RegI; }
|
||||
virtual Node* Identity(PhaseGVN* phase);
|
||||
virtual const Type* Value(PhaseGVN* phase) const;
|
||||
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
bool has_range_check() const {
|
||||
#ifdef _LP64
|
||||
@ -122,6 +223,7 @@ class CastIINode: public ConstraintCastNode {
|
||||
}
|
||||
|
||||
CastIINode* pin_array_access_node() const;
|
||||
CastIINode* make_with(Node* parent, const TypeInteger* type, const DependencyType& dependency) const;
|
||||
void remove_range_check_cast(Compile* C);
|
||||
|
||||
#ifndef PRODUCT
|
||||
@ -131,14 +233,12 @@ class CastIINode: public ConstraintCastNode {
|
||||
|
||||
class CastLLNode: public ConstraintCastNode {
|
||||
public:
|
||||
CastLLNode(Node* ctrl, Node* n, const Type* t, DependencyType dependency = RegularDependency, const TypeTuple* types = nullptr)
|
||||
CastLLNode(Node* ctrl, Node* n, const Type* t, const DependencyType& dependency = DependencyType::FloatingNarrowing, const TypeTuple* types = nullptr)
|
||||
: ConstraintCastNode(ctrl, n, t, dependency, types) {
|
||||
assert(ctrl != nullptr, "control must be set");
|
||||
init_class_id(Class_CastLL);
|
||||
}
|
||||
|
||||
virtual const Type* Value(PhaseGVN* phase) const;
|
||||
|
||||
static bool is_inner_loop_backedge(ProjNode* proj);
|
||||
|
||||
static bool cmp_used_at_inner_loop_exit_test(CmpNode* cmp);
|
||||
@ -147,11 +247,12 @@ public:
|
||||
virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
|
||||
virtual int Opcode() const;
|
||||
virtual uint ideal_reg() const { return Op_RegL; }
|
||||
CastLLNode* make_with(Node* parent, const TypeInteger* type, const DependencyType& dependency) const;
|
||||
};
|
||||
|
||||
class CastHHNode: public ConstraintCastNode {
|
||||
public:
|
||||
CastHHNode(Node* ctrl, Node* n, const Type* t, DependencyType dependency = RegularDependency, const TypeTuple* types = nullptr)
|
||||
CastHHNode(Node* ctrl, Node* n, const Type* t, const DependencyType& dependency = DependencyType::FloatingNarrowing, const TypeTuple* types = nullptr)
|
||||
: ConstraintCastNode(ctrl, n, t, dependency, types) {
|
||||
assert(ctrl != nullptr, "control must be set");
|
||||
init_class_id(Class_CastHH);
|
||||
@ -162,7 +263,7 @@ public:
|
||||
|
||||
class CastFFNode: public ConstraintCastNode {
|
||||
public:
|
||||
CastFFNode(Node* ctrl, Node* n, const Type* t, DependencyType dependency = RegularDependency, const TypeTuple* types = nullptr)
|
||||
CastFFNode(Node* ctrl, Node* n, const Type* t, const DependencyType& dependency = DependencyType::FloatingNarrowing, const TypeTuple* types = nullptr)
|
||||
: ConstraintCastNode(ctrl, n, t, dependency, types) {
|
||||
assert(ctrl != nullptr, "control must be set");
|
||||
init_class_id(Class_CastFF);
|
||||
@ -173,7 +274,7 @@ public:
|
||||
|
||||
class CastDDNode: public ConstraintCastNode {
|
||||
public:
|
||||
CastDDNode(Node* ctrl, Node* n, const Type* t, DependencyType dependency = RegularDependency, const TypeTuple* types = nullptr)
|
||||
CastDDNode(Node* ctrl, Node* n, const Type* t, const DependencyType& dependency = DependencyType::FloatingNarrowing, const TypeTuple* types = nullptr)
|
||||
: ConstraintCastNode(ctrl, n, t, dependency, types) {
|
||||
assert(ctrl != nullptr, "control must be set");
|
||||
init_class_id(Class_CastDD);
|
||||
@ -184,7 +285,7 @@ public:
|
||||
|
||||
class CastVVNode: public ConstraintCastNode {
|
||||
public:
|
||||
CastVVNode(Node* ctrl, Node* n, const Type* t, DependencyType dependency = RegularDependency, const TypeTuple* types = nullptr)
|
||||
CastVVNode(Node* ctrl, Node* n, const Type* t, const DependencyType& dependency = DependencyType::FloatingNarrowing, const TypeTuple* types = nullptr)
|
||||
: ConstraintCastNode(ctrl, n, t, dependency, types) {
|
||||
assert(ctrl != nullptr, "control must be set");
|
||||
init_class_id(Class_CastVV);
|
||||
@ -198,7 +299,7 @@ public:
|
||||
// cast pointer to pointer (different type)
|
||||
class CastPPNode: public ConstraintCastNode {
|
||||
public:
|
||||
CastPPNode (Node* ctrl, Node* n, const Type* t, DependencyType dependency = RegularDependency, const TypeTuple* types = nullptr)
|
||||
CastPPNode (Node* ctrl, Node* n, const Type* t, const DependencyType& dependency = DependencyType::FloatingNarrowing, const TypeTuple* types = nullptr)
|
||||
: ConstraintCastNode(ctrl, n, t, dependency, types) {
|
||||
init_class_id(Class_CastPP);
|
||||
}
|
||||
@ -210,7 +311,7 @@ class CastPPNode: public ConstraintCastNode {
|
||||
// for _checkcast, cast pointer to pointer (different type), without JOIN,
|
||||
class CheckCastPPNode: public ConstraintCastNode {
|
||||
public:
|
||||
CheckCastPPNode(Node* ctrl, Node* n, const Type* t, DependencyType dependency = RegularDependency, const TypeTuple* types = nullptr)
|
||||
CheckCastPPNode(Node* ctrl, Node* n, const Type* t, const DependencyType& dependency = DependencyType::FloatingNarrowing, const TypeTuple* types = nullptr)
|
||||
: ConstraintCastNode(ctrl, n, t, dependency, types) {
|
||||
assert(ctrl != nullptr, "control must be set");
|
||||
init_class_id(Class_CheckCastPP);
|
||||
|
||||
@ -1483,18 +1483,9 @@ Node* PhiNode::Identity(PhaseGVN* phase) {
|
||||
Node* phi_reg = region();
|
||||
for (DUIterator_Fast imax, i = phi_reg->fast_outs(imax); i < imax; i++) {
|
||||
Node* u = phi_reg->fast_out(i);
|
||||
if (u->is_Phi() && u->as_Phi()->type() == Type::MEMORY &&
|
||||
u->adr_type() == TypePtr::BOTTOM && u->in(0) == phi_reg &&
|
||||
u->req() == phi_len) {
|
||||
for (uint j = 1; j < phi_len; j++) {
|
||||
if (in(j) != u->in(j)) {
|
||||
u = nullptr;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (u != nullptr) {
|
||||
return u;
|
||||
}
|
||||
assert(!u->is_Phi() || u->in(0) == phi_reg, "broken Phi/Region subgraph");
|
||||
if (u->is_Phi() && u->req() == phi_len && can_be_replaced_by(u->as_Phi())) {
|
||||
return u;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2096,6 +2087,20 @@ bool PhiNode::is_split_through_mergemem_terminating() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Is one of the inputs a Cast that has not been processed by igvn yet?
|
||||
bool PhiNode::wait_for_cast_input_igvn(const PhaseIterGVN* igvn) const {
|
||||
for (uint i = 1, cnt = req(); i < cnt; ++i) {
|
||||
Node* n = in(i);
|
||||
while (n != nullptr && n->is_ConstraintCast()) {
|
||||
if (igvn->_worklist.member(n)) {
|
||||
return true;
|
||||
}
|
||||
n = n->in(1);
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
//------------------------------Ideal------------------------------------------
|
||||
// Return a node which is more "ideal" than the current node. Must preserve
|
||||
// the CFG, but we can still strip out dead paths.
|
||||
@ -2154,6 +2159,28 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
// If there is a chance that the region can be optimized out do
|
||||
// not add a cast node that we can't remove yet.
|
||||
!wait_for_region_igvn(phase)) {
|
||||
// If one of the inputs is a cast that has yet to be processed by igvn, delay processing of this node to give the
|
||||
// inputs a chance to optimize and possibly end up with identical inputs (casts included).
|
||||
// Say we have:
|
||||
// (Phi region (Cast#1 c uin) (Cast#2 c uin))
|
||||
// and Cast#1 and Cast#2 have not had a chance to common yet
|
||||
// if the unique_input() transformation below proceeds, then PhiNode::Ideal returns:
|
||||
// (Cast#3 region uin) (1)
|
||||
// If PhiNode::Ideal is delayed until Cast#1 and Cast#2 common, then it returns:
|
||||
// (Cast#1 c uin) (2)
|
||||
//
|
||||
// In (1) the resulting cast is conservatively pinned at a later control and while Cast#3 and Cast#1/Cast#2 still
|
||||
// have a chance to common, that requires proving that c dominates region in ConstraintCastNode::dominating_cast()
|
||||
// which may not happen if control flow is too complicated and another pass of loop opts doesn't run. Delaying the
|
||||
// transformation here should allow a more optimal result.
|
||||
// Beyond the efficiency concern, there is a risk, if the casts are CastPPs, to end up with a chain of AddPs with
|
||||
// different base inputs (but a unique uncasted base input). This breaks an invariant in the shape of address
|
||||
// subtrees.
|
||||
PhaseIterGVN* igvn = phase->is_IterGVN();
|
||||
if (wait_for_cast_input_igvn(igvn)) {
|
||||
igvn->_worklist.push(this);
|
||||
return nullptr;
|
||||
}
|
||||
uncasted = true;
|
||||
uin = unique_input(phase, true);
|
||||
}
|
||||
@ -2192,7 +2219,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
if (phi_type->isa_ptr()) {
|
||||
const Type* uin_type = phase->type(uin);
|
||||
if (!phi_type->isa_oopptr() && !uin_type->isa_oopptr()) {
|
||||
cast = new CastPPNode(r, uin, phi_type, ConstraintCastNode::StrongDependency, extra_types);
|
||||
cast = new CastPPNode(r, uin, phi_type, ConstraintCastNode::DependencyType::NonFloatingNarrowing, extra_types);
|
||||
} else {
|
||||
// Use a CastPP for a cast to not null and a CheckCastPP for
|
||||
// a cast to a new klass (and both if both null-ness and
|
||||
@ -2202,7 +2229,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
// null, uin's type must be casted to not null
|
||||
if (phi_type->join(TypePtr::NOTNULL) == phi_type->remove_speculative() &&
|
||||
uin_type->join(TypePtr::NOTNULL) != uin_type->remove_speculative()) {
|
||||
cast = new CastPPNode(r, uin, TypePtr::NOTNULL, ConstraintCastNode::StrongDependency, extra_types);
|
||||
cast = new CastPPNode(r, uin, TypePtr::NOTNULL, ConstraintCastNode::DependencyType::NonFloatingNarrowing, extra_types);
|
||||
}
|
||||
|
||||
// If the type of phi and uin, both casted to not null,
|
||||
@ -2214,14 +2241,14 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
cast = phase->transform(cast);
|
||||
n = cast;
|
||||
}
|
||||
cast = new CheckCastPPNode(r, n, phi_type, ConstraintCastNode::StrongDependency, extra_types);
|
||||
cast = new CheckCastPPNode(r, n, phi_type, ConstraintCastNode::DependencyType::NonFloatingNarrowing, extra_types);
|
||||
}
|
||||
if (cast == nullptr) {
|
||||
cast = new CastPPNode(r, uin, phi_type, ConstraintCastNode::StrongDependency, extra_types);
|
||||
cast = new CastPPNode(r, uin, phi_type, ConstraintCastNode::DependencyType::NonFloatingNarrowing, extra_types);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
cast = ConstraintCastNode::make_cast_for_type(r, uin, phi_type, ConstraintCastNode::StrongDependency, extra_types);
|
||||
cast = ConstraintCastNode::make_cast_for_type(r, uin, phi_type, ConstraintCastNode::DependencyType::NonFloatingNarrowing, extra_types);
|
||||
}
|
||||
assert(cast != nullptr, "cast should be set");
|
||||
cast = phase->transform(cast);
|
||||
@ -2551,7 +2578,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
}
|
||||
Node* phi = mms.memory();
|
||||
assert(made_new_phi || phi->in(i) == n, "replace the i-th merge by a slice");
|
||||
phi->set_req(i, mms.memory2());
|
||||
phi->set_req_X(i, mms.memory2(), phase);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2560,7 +2587,9 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
for (MergeMemStream mms(result); mms.next_non_empty(); ) {
|
||||
Node* phi = mms.memory();
|
||||
for (uint i = 1; i < req(); ++i) {
|
||||
if (phi->in(i) == this) phi->set_req(i, phi);
|
||||
if (phi->in(i) == this) {
|
||||
phi->set_req_X(i, phi, phase);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2582,7 +2611,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
Node *ii = in(i);
|
||||
Node *new_in = MemNode::optimize_memory_chain(ii, at, nullptr, phase);
|
||||
if (ii != new_in ) {
|
||||
set_req(i, new_in);
|
||||
set_req_X(i, new_in, phase);
|
||||
progress = this;
|
||||
}
|
||||
}
|
||||
@ -2690,6 +2719,25 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
progress = merge_through_phi(this, phase->is_IterGVN());
|
||||
}
|
||||
|
||||
// PhiNode::Identity replaces a non-bottom memory phi with a bottom memory phi with the same inputs, if it exists.
|
||||
// If the bottom memory phi's inputs are changed (so it can now replace the non-bottom memory phi) or if it's created
|
||||
// only after the non-bottom memory phi is processed by igvn, PhiNode::Identity doesn't run and the transformation
|
||||
// doesn't happen.
|
||||
// Look for non-bottom Phis that should be transformed and enqueue them for igvn so that PhiNode::Identity executes for
|
||||
// them.
|
||||
if (can_reshape && type() == Type::MEMORY && adr_type() == TypePtr::BOTTOM) {
|
||||
PhaseIterGVN* igvn = phase->is_IterGVN();
|
||||
uint phi_len = req();
|
||||
Node* phi_reg = region();
|
||||
for (DUIterator_Fast imax, i = phi_reg->fast_outs(imax); i < imax; i++) {
|
||||
Node* u = phi_reg->fast_out(i);
|
||||
assert(!u->is_Phi() || (u->in(0) == phi_reg && u->req() == phi_len), "broken Phi/Region subgraph");
|
||||
if (u->is_Phi() && u->as_Phi()->can_be_replaced_by(this)) {
|
||||
igvn->_worklist.push(u);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return progress; // Return any progress
|
||||
}
|
||||
|
||||
@ -2739,6 +2787,11 @@ const TypeTuple* PhiNode::collect_types(PhaseGVN* phase) const {
|
||||
return TypeTuple::make(types.length(), flds);
|
||||
}
|
||||
|
||||
bool PhiNode::can_be_replaced_by(const PhiNode* other) const {
|
||||
return type() == Type::MEMORY && other->type() == Type::MEMORY && adr_type() != TypePtr::BOTTOM &&
|
||||
other->adr_type() == TypePtr::BOTTOM && has_same_inputs_as(other);
|
||||
}
|
||||
|
||||
Node* PhiNode::clone_through_phi(Node* root_phi, const Type* t, uint c, PhaseIterGVN* igvn) {
|
||||
Node_Stack stack(1);
|
||||
VectorSet visited;
|
||||
|
||||
@ -182,6 +182,8 @@ class PhiNode : public TypeNode {
|
||||
|
||||
bool is_split_through_mergemem_terminating() const;
|
||||
|
||||
bool wait_for_cast_input_igvn(const PhaseIterGVN* igvn) const;
|
||||
|
||||
public:
|
||||
// Node layout (parallels RegionNode):
|
||||
enum { Region, // Control input is the Phi's region.
|
||||
@ -271,6 +273,7 @@ public:
|
||||
#endif //ASSERT
|
||||
|
||||
const TypeTuple* collect_types(PhaseGVN* phase) const;
|
||||
bool can_be_replaced_by(const PhiNode* other) const;
|
||||
};
|
||||
|
||||
//------------------------------GotoNode---------------------------------------
|
||||
|
||||
@ -3414,10 +3414,7 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f
|
||||
|
||||
case Op_AddP: { // Assert sane base pointers
|
||||
Node *addp = n->in(AddPNode::Address);
|
||||
assert( !addp->is_AddP() ||
|
||||
addp->in(AddPNode::Base)->is_top() || // Top OK for allocation
|
||||
addp->in(AddPNode::Base) == n->in(AddPNode::Base),
|
||||
"Base pointers must match (addp %u)", addp->_idx );
|
||||
assert(n->as_AddP()->address_input_has_same_base(), "Base pointers must match (addp %u)", addp->_idx );
|
||||
#ifdef _LP64
|
||||
if ((UseCompressedOops || UseCompressedClassPointers) &&
|
||||
addp->Opcode() == Op_ConP &&
|
||||
@ -4578,7 +4575,7 @@ Node* Compile::constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt*
|
||||
// node from floating above the range check during loop optimizations. Otherwise, the
|
||||
// ConvI2L node may be eliminated independently of the range check, causing the data path
|
||||
// to become TOP while the control path is still there (although it's unreachable).
|
||||
value = new CastIINode(ctrl, value, itype, carry_dependency ? ConstraintCastNode::StrongDependency : ConstraintCastNode::RegularDependency, true /* range check dependency */);
|
||||
value = new CastIINode(ctrl, value, itype, carry_dependency ? ConstraintCastNode::DependencyType::NonFloatingNarrowing : ConstraintCastNode::DependencyType::FloatingNarrowing, true /* range check dependency */);
|
||||
value = phase->transform(value);
|
||||
}
|
||||
const TypeLong* ltype = TypeLong::make(itype->_lo, itype->_hi, itype->_widen);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -748,7 +748,7 @@ Node* ConnectionGraph::specialize_castpp(Node* castpp, Node* base, Node* current
|
||||
_igvn->_worklist.push(current_control);
|
||||
_igvn->_worklist.push(control_successor);
|
||||
|
||||
return _igvn->transform(ConstraintCastNode::make_cast_for_type(not_eq_control, base, _igvn->type(castpp), ConstraintCastNode::UnconditionalDependency, nullptr));
|
||||
return _igvn->transform(ConstraintCastNode::make_cast_for_type(not_eq_control, base, _igvn->type(castpp), ConstraintCastNode::DependencyType::NonFloatingNonNarrowing, nullptr));
|
||||
}
|
||||
|
||||
Node* ConnectionGraph::split_castpp_load_through_phi(Node* curr_addp, Node* curr_load, Node* region, GrowableArray<Node*>* bases_for_loads, GrowableArray<Node *> &alloc_worklist) {
|
||||
@ -1058,6 +1058,39 @@ void ConnectionGraph::updates_after_load_split(Node* data_phi, Node* previous_lo
|
||||
// "new_load" might actually be a constant, parameter, etc.
|
||||
if (new_load->is_Load()) {
|
||||
Node* new_addp = new_load->in(MemNode::Address);
|
||||
|
||||
// If new_load is a Load but not from an AddP, it means that the load is folded into another
|
||||
// load. And since this load is not from a field, we cannot create a unique type for it.
|
||||
// For example:
|
||||
//
|
||||
// if (b) {
|
||||
// Holder h1 = new Holder();
|
||||
// Object o = ...;
|
||||
// h.o = o.getClass();
|
||||
// } else {
|
||||
// Holder h2 = ...;
|
||||
// }
|
||||
// Holder h = Phi(h1, h2);
|
||||
// Object r = h.o;
|
||||
//
|
||||
// Then, splitting r through the merge point results in:
|
||||
//
|
||||
// if (b) {
|
||||
// Holder h1 = new Holder();
|
||||
// Object o = ...;
|
||||
// h.o = o.getClass();
|
||||
// Object o1 = h.o;
|
||||
// } else {
|
||||
// Holder h2 = ...;
|
||||
// Object o2 = h2.o;
|
||||
// }
|
||||
// Object r = Phi(o1, o2);
|
||||
//
|
||||
// In this case, o1 is folded to o.getClass() which is a Load but not from an AddP, but from
|
||||
// an OopHandle that is loaded from the Klass of o.
|
||||
if (!new_addp->is_AddP()) {
|
||||
continue;
|
||||
}
|
||||
Node* base = get_addp_base(new_addp);
|
||||
|
||||
// The base might not be something that we can create an unique
|
||||
@ -1235,7 +1268,7 @@ bool ConnectionGraph::reduce_phi_on_safepoints_helper(Node* ophi, Node* cast, No
|
||||
Node* nsr_merge_pointer = ophi;
|
||||
if (cast != nullptr) {
|
||||
const Type* new_t = merge_t->meet(TypePtr::NULL_PTR);
|
||||
nsr_merge_pointer = _igvn->transform(ConstraintCastNode::make_cast_for_type(cast->in(0), cast->in(1), new_t, ConstraintCastNode::RegularDependency, nullptr));
|
||||
nsr_merge_pointer = _igvn->transform(ConstraintCastNode::make_cast_for_type(cast->in(0), cast->in(1), new_t, ConstraintCastNode::DependencyType::FloatingNarrowing, nullptr));
|
||||
}
|
||||
|
||||
for (uint spi = 0; spi < safepoints.size(); spi++) {
|
||||
@ -1376,7 +1409,7 @@ void ConnectionGraph::reset_scalar_replaceable_entries(PhiNode* ophi) {
|
||||
}
|
||||
|
||||
if (change) {
|
||||
Node* new_cast = ConstraintCastNode::make_cast_for_type(out->in(0), out->in(1), out_new_t, ConstraintCastNode::StrongDependency, nullptr);
|
||||
Node* new_cast = ConstraintCastNode::make_cast_for_type(out->in(0), out->in(1), out_new_t, ConstraintCastNode::DependencyType::NonFloatingNarrowing, nullptr);
|
||||
_igvn->replace_node(out, new_cast);
|
||||
_igvn->register_new_node_with_optimizer(new_cast);
|
||||
}
|
||||
|
||||
@ -940,11 +940,7 @@ inline Node* LibraryCallKit::generate_limit_guard(Node* offset,
|
||||
}
|
||||
|
||||
// Emit range checks for the given String.value byte array
|
||||
void LibraryCallKit::generate_string_range_check(Node* array,
|
||||
Node* offset,
|
||||
Node* count,
|
||||
bool char_count,
|
||||
bool halt_on_oob) {
|
||||
void LibraryCallKit::generate_string_range_check(Node* array, Node* offset, Node* count, bool char_count) {
|
||||
if (stopped()) {
|
||||
return; // already stopped
|
||||
}
|
||||
@ -962,17 +958,10 @@ void LibraryCallKit::generate_string_range_check(Node* array,
|
||||
generate_limit_guard(offset, count, load_array_length(array), bailout);
|
||||
|
||||
if (bailout->req() > 1) {
|
||||
if (halt_on_oob) {
|
||||
bailout = _gvn.transform(bailout)->as_Region();
|
||||
Node* frame = _gvn.transform(new ParmNode(C->start(), TypeFunc::FramePtr));
|
||||
Node* halt = _gvn.transform(new HaltNode(bailout, frame, "unexpected guard failure in intrinsic"));
|
||||
C->root()->add_req(halt);
|
||||
} else {
|
||||
PreserveJVMState pjvms(this);
|
||||
set_control(_gvn.transform(bailout));
|
||||
uncommon_trap(Deoptimization::Reason_intrinsic,
|
||||
Deoptimization::Action_maybe_recompile);
|
||||
}
|
||||
PreserveJVMState pjvms(this);
|
||||
set_control(_gvn.transform(bailout));
|
||||
uncommon_trap(Deoptimization::Reason_intrinsic,
|
||||
Deoptimization::Action_maybe_recompile);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1130,7 +1119,6 @@ bool LibraryCallKit::inline_array_equals(StrIntrinsicNode::ArgEnc ae) {
|
||||
|
||||
|
||||
//------------------------------inline_countPositives------------------------------
|
||||
// int java.lang.StringCoding#countPositives0(byte[] ba, int off, int len)
|
||||
bool LibraryCallKit::inline_countPositives() {
|
||||
if (too_many_traps(Deoptimization::Reason_intrinsic)) {
|
||||
return false;
|
||||
@ -1142,14 +1130,13 @@ bool LibraryCallKit::inline_countPositives() {
|
||||
Node* offset = argument(1);
|
||||
Node* len = argument(2);
|
||||
|
||||
if (VerifyIntrinsicChecks) {
|
||||
ba = must_be_not_null(ba, true);
|
||||
generate_string_range_check(ba, offset, len, false, true);
|
||||
if (stopped()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
ba = must_be_not_null(ba, true);
|
||||
|
||||
// Range checks
|
||||
generate_string_range_check(ba, offset, len, false);
|
||||
if (stopped()) {
|
||||
return true;
|
||||
}
|
||||
Node* ba_start = array_element_address(ba, offset, T_BYTE);
|
||||
Node* result = new CountPositivesNode(control(), memory(TypeAryPtr::BYTES), ba_start, len);
|
||||
set_result(_gvn.transform(result));
|
||||
@ -1183,7 +1170,7 @@ bool LibraryCallKit::inline_preconditions_checkIndex(BasicType bt) {
|
||||
jlong upper_bound = _gvn.type(length)->is_integer(bt)->hi_as_long();
|
||||
Node* casted_length = ConstraintCastNode::make_cast_for_basic_type(
|
||||
control(), length, TypeInteger::make(0, upper_bound, Type::WidenMax, bt),
|
||||
ConstraintCastNode::RegularDependency, bt);
|
||||
ConstraintCastNode::DependencyType::FloatingNarrowing, bt);
|
||||
casted_length = _gvn.transform(casted_length);
|
||||
replace_in_map(length, casted_length);
|
||||
length = casted_length;
|
||||
@ -1213,7 +1200,7 @@ bool LibraryCallKit::inline_preconditions_checkIndex(BasicType bt) {
|
||||
// index is now known to be >= 0 and < length, cast it
|
||||
Node* result = ConstraintCastNode::make_cast_for_basic_type(
|
||||
control(), index, TypeInteger::make(0, upper_bound, Type::WidenMax, bt),
|
||||
ConstraintCastNode::RegularDependency, bt);
|
||||
ConstraintCastNode::DependencyType::FloatingNarrowing, bt);
|
||||
result = _gvn.transform(result);
|
||||
set_result(result);
|
||||
replace_in_map(index, result);
|
||||
@ -6180,9 +6167,6 @@ CallStaticJavaNode* LibraryCallKit::get_uncommon_trap_from_success_proj(Node* no
|
||||
}
|
||||
|
||||
//-------------inline_encodeISOArray-----------------------------------
|
||||
// int sun.nio.cs.ISO_8859_1.Encoder#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
|
||||
// int java.lang.StringCoding#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
|
||||
// int java.lang.StringCoding#encodeAsciiArray0(char[] sa, int sp, byte[] da, int dp, int len)
|
||||
// encode char[] to byte[] in ISO_8859_1 or ASCII
|
||||
bool LibraryCallKit::inline_encodeISOArray(bool ascii) {
|
||||
assert(callee()->signature()->size() == 5, "encodeISOArray has 5 parameters");
|
||||
@ -6193,14 +6177,8 @@ bool LibraryCallKit::inline_encodeISOArray(bool ascii) {
|
||||
Node *dst_offset = argument(3);
|
||||
Node *length = argument(4);
|
||||
|
||||
// Cast source & target arrays to not-null
|
||||
if (VerifyIntrinsicChecks) {
|
||||
src = must_be_not_null(src, true);
|
||||
dst = must_be_not_null(dst, true);
|
||||
if (stopped()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
src = must_be_not_null(src, true);
|
||||
dst = must_be_not_null(dst, true);
|
||||
|
||||
const TypeAryPtr* src_type = src->Value(&_gvn)->isa_aryptr();
|
||||
const TypeAryPtr* dst_type = dst->Value(&_gvn)->isa_aryptr();
|
||||
@ -6217,15 +6195,6 @@ bool LibraryCallKit::inline_encodeISOArray(bool ascii) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check source & target bounds
|
||||
if (VerifyIntrinsicChecks) {
|
||||
generate_string_range_check(src, src_offset, length, src_elem == T_BYTE, true);
|
||||
generate_string_range_check(dst, dst_offset, length, false, true);
|
||||
if (stopped()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
Node* src_start = array_element_address(src, src_offset, T_CHAR);
|
||||
Node* dst_start = array_element_address(dst, dst_offset, dst_elem);
|
||||
// 'src_start' points to src array + scaled offset
|
||||
|
||||
@ -163,8 +163,7 @@ class LibraryCallKit : public GraphKit {
|
||||
Node* array_length,
|
||||
RegionNode* region);
|
||||
void generate_string_range_check(Node* array, Node* offset,
|
||||
Node* length, bool char_count,
|
||||
bool halt_on_oob = false);
|
||||
Node* length, bool char_count);
|
||||
Node* current_thread_helper(Node* &tls_output, ByteSize handle_offset,
|
||||
bool is_immutable);
|
||||
Node* generate_current_thread(Node* &tls_output);
|
||||
|
||||
@ -1366,7 +1366,7 @@ Node *PhaseIdealLoop::clone_up_backedge_goo(Node *back_ctrl, Node *preheader_ctr
|
||||
// the backedge of the main or post loop is removed, a Div node won't be able to float above the zero trip guard of the
|
||||
// loop and can't execute even if the loop is not reached.
|
||||
void PhaseIdealLoop::cast_incr_before_loop(Node* incr, Node* ctrl, CountedLoopNode* loop) {
|
||||
Node* castii = new CastIINode(ctrl, incr, TypeInt::INT, ConstraintCastNode::UnconditionalDependency);
|
||||
Node* castii = new CastIINode(ctrl, incr, TypeInt::INT, ConstraintCastNode::DependencyType::NonFloatingNonNarrowing);
|
||||
register_new_node(castii, ctrl);
|
||||
Node* phi = loop->phi();
|
||||
assert(phi->in(LoopNode::EntryControl) == incr, "replacing wrong input?");
|
||||
@ -3262,7 +3262,7 @@ bool IdealLoopTree::do_remove_empty_loop(PhaseIdealLoop *phase) {
|
||||
Node* cast_ii = ConstraintCastNode::make_cast_for_basic_type(
|
||||
cl->in(LoopNode::EntryControl), exact_limit,
|
||||
phase->_igvn.type(exact_limit),
|
||||
ConstraintCastNode::UnconditionalDependency, T_INT);
|
||||
ConstraintCastNode::DependencyType::NonFloatingNonNarrowing, T_INT);
|
||||
phase->register_new_node(cast_ii, cl->in(LoopNode::EntryControl));
|
||||
|
||||
Node* final_iv = new SubINode(cast_ii, cl->stride());
|
||||
|
||||
@ -1001,7 +1001,7 @@ bool PhaseIdealLoop::create_loop_nest(IdealLoopTree* loop, Node_List &old_new) {
|
||||
// a negative stride). We add a CastII here to guarantee that, when the counted loop is created in a subsequent loop
|
||||
// opts pass, an accurate range of values for the limits is found.
|
||||
const TypeInt* inner_iters_actual_int_range = TypeInt::make(0, iters_limit, Type::WidenMin);
|
||||
inner_iters_actual_int = new CastIINode(outer_head, inner_iters_actual_int, inner_iters_actual_int_range, ConstraintCastNode::UnconditionalDependency);
|
||||
inner_iters_actual_int = new CastIINode(outer_head, inner_iters_actual_int, inner_iters_actual_int_range, ConstraintCastNode::DependencyType::NonFloatingNonNarrowing);
|
||||
_igvn.register_new_node_with_optimizer(inner_iters_actual_int);
|
||||
} else {
|
||||
inner_iters_actual_int = inner_iters_actual;
|
||||
@ -1315,7 +1315,7 @@ bool PhaseIdealLoop::try_make_short_running_loop(IdealLoopTree* loop, jint strid
|
||||
register_new_node(bol, iff->in(0));
|
||||
new_limit = ConstraintCastNode::make_cast_for_basic_type(new_predicate_proj, new_limit,
|
||||
TypeInteger::make(1, iters_limit_long, Type::WidenMin, bt),
|
||||
ConstraintCastNode::UnconditionalDependency, bt);
|
||||
ConstraintCastNode::DependencyType::NonFloatingNonNarrowing, bt);
|
||||
register_new_node(new_limit, new_predicate_proj);
|
||||
|
||||
#ifndef PRODUCT
|
||||
@ -1334,7 +1334,7 @@ bool PhaseIdealLoop::try_make_short_running_loop(IdealLoopTree* loop, jint strid
|
||||
const TypeLong* new_limit_t = new_limit->Value(&_igvn)->is_long();
|
||||
new_limit = ConstraintCastNode::make_cast_for_basic_type(predicates.entry(), new_limit,
|
||||
TypeLong::make(0, new_limit_t->_hi, new_limit_t->_widen),
|
||||
ConstraintCastNode::UnconditionalDependency, bt);
|
||||
ConstraintCastNode::DependencyType::NonFloatingNonNarrowing, bt);
|
||||
register_new_node(new_limit, predicates.entry());
|
||||
} else {
|
||||
assert(bt == T_INT && known_short_running_loop, "only CountedLoop statically known to be short running");
|
||||
|
||||
@ -1174,7 +1174,7 @@ Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) {
|
||||
if ( nn ) return nn;
|
||||
}
|
||||
|
||||
if (n->is_ConstraintCast()) {
|
||||
if (n->is_ConstraintCast() && n->as_ConstraintCast()->dependency().narrows_type()) {
|
||||
Node* dom_cast = n->as_ConstraintCast()->dominating_cast(&_igvn, this);
|
||||
// ConstraintCastNode::dominating_cast() uses node control input to determine domination.
|
||||
// Node control inputs don't necessarily agree with loop control info (due to
|
||||
@ -1708,6 +1708,7 @@ void PhaseIdealLoop::try_sink_out_of_loop(Node* n) {
|
||||
!n->is_OpaqueInitializedAssertionPredicate() &&
|
||||
!n->is_OpaqueTemplateAssertionPredicate() &&
|
||||
!is_raw_to_oop_cast && // don't extend live ranges of raw oops
|
||||
n->Opcode() != Op_CreateEx &&
|
||||
(KillPathsReachableByDeadTypeNode || !n->is_Type())
|
||||
) {
|
||||
Node *n_ctrl = get_ctrl(n);
|
||||
@ -1837,7 +1838,7 @@ void PhaseIdealLoop::try_sink_out_of_loop(Node* n) {
|
||||
if (in != nullptr && ctrl_is_member(n_loop, in)) {
|
||||
const Type* in_t = _igvn.type(in);
|
||||
cast = ConstraintCastNode::make_cast_for_type(x_ctrl, in, in_t,
|
||||
ConstraintCastNode::UnconditionalDependency, nullptr);
|
||||
ConstraintCastNode::DependencyType::NonFloatingNonNarrowing, nullptr);
|
||||
}
|
||||
if (cast != nullptr) {
|
||||
Node* prev = _igvn.hash_find_insert(cast);
|
||||
|
||||
@ -1914,7 +1914,8 @@ Node* PhaseMacroExpand::prefetch_allocation(Node* i_o, Node*& needgc_false,
|
||||
transform_later(cache_adr);
|
||||
cache_adr = new CastP2XNode(needgc_false, cache_adr);
|
||||
transform_later(cache_adr);
|
||||
// Address is aligned to execute prefetch to the beginning of cache line size.
|
||||
// Address is aligned to execute prefetch to the beginning of cache line size
|
||||
// (it is important when BIS instruction is used on SPARC as prefetch).
|
||||
Node* mask = _igvn.MakeConX(~(intptr_t)(step_size-1));
|
||||
cache_adr = new AndXNode(cache_adr, mask);
|
||||
transform_later(cache_adr);
|
||||
|
||||
@ -233,7 +233,7 @@ void PhaseMacroExpand::generate_partial_inlining_block(Node** ctrl, MergeMemNode
|
||||
Node* inline_block = generate_guard(ctrl, bol_le, nullptr, PROB_FAIR);
|
||||
Node* stub_block = *ctrl;
|
||||
|
||||
Node* casted_length = new CastLLNode(inline_block, length, inline_range, ConstraintCastNode::RegularDependency);
|
||||
Node* casted_length = new CastLLNode(inline_block, length, inline_range, ConstraintCastNode::DependencyType::FloatingNarrowing);
|
||||
transform_later(casted_length);
|
||||
Node* mask_gen = VectorMaskGenNode::make(casted_length, type);
|
||||
transform_later(mask_gen);
|
||||
|
||||
@ -329,6 +329,10 @@ public:
|
||||
|
||||
static bool match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt);
|
||||
|
||||
// Determines if a vector operation needs to be partially implemented with a mask
|
||||
// controlling only the lanes in range [0, vector_length) are processed. This applies
|
||||
// to operations whose vector length is less than the hardware-supported maximum
|
||||
// vector length. Returns true if the operation requires masking, false otherwise.
|
||||
static bool vector_needs_partial_operations(Node* node, const TypeVect* vt);
|
||||
|
||||
static bool vector_rearrange_requires_load_shuffle(BasicType elem_bt, int vlen);
|
||||
|
||||
@ -2875,16 +2875,9 @@ Node* Node::find_similar(int opc) {
|
||||
Node* use = def->fast_out(i);
|
||||
if (use != this &&
|
||||
use->Opcode() == opc &&
|
||||
use->req() == req()) {
|
||||
uint j;
|
||||
for (j = 0; j < use->req(); j++) {
|
||||
if (use->in(j) != in(j)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (j == use->req()) {
|
||||
return use;
|
||||
}
|
||||
use->req() == req() &&
|
||||
has_same_inputs_as(use)) {
|
||||
return use;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2892,6 +2885,16 @@ Node* Node::find_similar(int opc) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
bool Node::has_same_inputs_as(const Node* other) const {
|
||||
assert(req() == other->req(), "should have same number of inputs");
|
||||
for (uint j = 0; j < other->req(); j++) {
|
||||
if (in(j) != other->in(j)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
Node* Node::unique_multiple_edges_out_or_null() const {
|
||||
Node* use = nullptr;
|
||||
for (DUIterator_Fast kmax, k = fast_outs(kmax); k < kmax; k++) {
|
||||
|
||||
@ -1179,6 +1179,7 @@ public:
|
||||
// Return a node with opcode "opc" and same inputs as "this" if one can
|
||||
// be found; Otherwise return null;
|
||||
Node* find_similar(int opc);
|
||||
bool has_same_inputs_as(const Node* other) const;
|
||||
|
||||
// Return the unique control out if only one. Null if none or more than one.
|
||||
Node* unique_ctrl_out_or_null() const;
|
||||
|
||||
@ -1076,7 +1076,8 @@ void PhaseIterGVN::verify_optimize() {
|
||||
|
||||
if (is_verify_Value() ||
|
||||
is_verify_Ideal() ||
|
||||
is_verify_Identity()) {
|
||||
is_verify_Identity() ||
|
||||
is_verify_invariants()) {
|
||||
ResourceMark rm;
|
||||
Unique_Node_List worklist;
|
||||
bool failure = false;
|
||||
@ -1088,6 +1089,7 @@ void PhaseIterGVN::verify_optimize() {
|
||||
if (is_verify_Ideal()) { failure |= verify_Ideal_for(n, false); }
|
||||
if (is_verify_Ideal()) { failure |= verify_Ideal_for(n, true); }
|
||||
if (is_verify_Identity()) { failure |= verify_Identity_for(n); }
|
||||
if (is_verify_invariants()) { failure |= verify_node_invariants_for(n); }
|
||||
// traverse all inputs and outputs
|
||||
for (uint i = 0; i < n->req(); i++) {
|
||||
if (n->in(i) != nullptr) {
|
||||
@ -1102,7 +1104,7 @@ void PhaseIterGVN::verify_optimize() {
|
||||
// We should either make sure that these nodes are properly added back to the IGVN worklist
|
||||
// in PhaseIterGVN::add_users_to_worklist to update them again or add an exception
|
||||
// in the verification code above if that is not possible for some reason (like Load nodes).
|
||||
assert(!failure, "Missed optimization opportunity in PhaseIterGVN");
|
||||
assert(!failure, "Missed optimization opportunity/broken graph in PhaseIterGVN");
|
||||
}
|
||||
|
||||
verify_empty_worklist(nullptr);
|
||||
@ -2058,6 +2060,21 @@ bool PhaseIterGVN::verify_Identity_for(Node* n) {
|
||||
tty->print_cr("%s", ss.as_string());
|
||||
return true;
|
||||
}
|
||||
|
||||
// Some other verifications that are not specific to a particular transformation.
|
||||
bool PhaseIterGVN::verify_node_invariants_for(const Node* n) {
|
||||
if (n->is_AddP()) {
|
||||
if (!n->as_AddP()->address_input_has_same_base()) {
|
||||
stringStream ss; // Print as a block without tty lock.
|
||||
ss.cr();
|
||||
ss.print_cr("Base pointers must match for AddP chain:");
|
||||
n->dump_bfs(2, nullptr, "", &ss);
|
||||
tty->print_cr("%s", ss.as_string());
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
|
||||
@ -493,6 +493,7 @@ public:
|
||||
bool verify_Value_for(Node* n, bool strict = false);
|
||||
bool verify_Ideal_for(Node* n, bool can_reshape);
|
||||
bool verify_Identity_for(Node* n);
|
||||
bool verify_node_invariants_for(const Node* n);
|
||||
void verify_empty_worklist(Node* n);
|
||||
#endif
|
||||
|
||||
@ -616,6 +617,10 @@ public:
|
||||
// '-XX:VerifyIterativeGVN=1000'
|
||||
return ((VerifyIterativeGVN % 10000) / 1000) == 1;
|
||||
}
|
||||
static bool is_verify_invariants() {
|
||||
// '-XX:VerifyIterativeGVN=10000'
|
||||
return ((VerifyIterativeGVN % 100000) / 10000) == 1;
|
||||
}
|
||||
protected:
|
||||
// Sub-quadratic implementation of '-XX:VerifyIterativeGVN=1' (Use-Def verification).
|
||||
julong _verify_counter;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2007, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2007, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -2482,6 +2482,11 @@ static bool can_subword_truncate(Node* in, const Type* type) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Since casts specifically change the type of a node, stay on the safe side and do not truncate them.
|
||||
if (in->is_ConstraintCast()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Cannot be truncated:
|
||||
switch (opc) {
|
||||
case Op_AbsI:
|
||||
|
||||
@ -187,7 +187,10 @@ VStatus VLoopAnalyzer::setup_submodules_helper() {
|
||||
return body_status;
|
||||
}
|
||||
|
||||
_memory_slices.find_memory_slices();
|
||||
VStatus slices_status = _memory_slices.find_memory_slices();
|
||||
if (!slices_status.is_success()) {
|
||||
return slices_status;
|
||||
}
|
||||
|
||||
// If there is no memory slice detected, it means there is no store.
|
||||
// If there is no reduction and no store, then we give up, because
|
||||
@ -207,9 +210,11 @@ VStatus VLoopAnalyzer::setup_submodules_helper() {
|
||||
}
|
||||
|
||||
// There are 2 kinds of slices:
|
||||
// - No memory phi: only loads. All have the same input memory state from before the loop.
|
||||
// - No memory phi: only loads.
|
||||
// - Usually, all loads have the same input memory state from before the loop.
|
||||
// - Only rarely this is not the case, and we just bail out for now.
|
||||
// - With memory phi. Chain of memory operations inside the loop.
|
||||
void VLoopMemorySlices::find_memory_slices() {
|
||||
VStatus VLoopMemorySlices::find_memory_slices() {
|
||||
Compile* C = _vloop.phase()->C;
|
||||
// We iterate over the body, which is topologically sorted. Hence, if there is a phi
|
||||
// in a slice, we will find it first, and the loads and stores afterwards.
|
||||
@ -228,8 +233,15 @@ void VLoopMemorySlices::find_memory_slices() {
|
||||
PhiNode* head = _heads.at(alias_idx);
|
||||
if (head == nullptr) {
|
||||
// We did not find a phi on this slice yet -> must be a slice with only loads.
|
||||
assert(_inputs.at(alias_idx) == nullptr || _inputs.at(alias_idx) == load->in(1),
|
||||
"not yet touched or the same input");
|
||||
// For now, we can only handle slices with a single memory input before the loop,
|
||||
// so if we find multiple, we bail out of auto vectorization. If this becomes
|
||||
// too restrictive in the fututure, we could consider tracking multiple inputs.
|
||||
// Different memory inputs can for example happen if one load has its memory state
|
||||
// optimized, and the other load fails to have it optimized, for example because
|
||||
// it does not end up on the IGVN worklist any more.
|
||||
if (_inputs.at(alias_idx) != nullptr && _inputs.at(alias_idx) != load->in(1)) {
|
||||
return VStatus::make_failure(FAILURE_DIFFERENT_MEMORY_INPUT);
|
||||
}
|
||||
_inputs.at_put(alias_idx, load->in(1));
|
||||
} // else: the load belongs to a slice with a phi that already set heads and inputs.
|
||||
#ifdef ASSERT
|
||||
@ -243,6 +255,7 @@ void VLoopMemorySlices::find_memory_slices() {
|
||||
}
|
||||
}
|
||||
NOT_PRODUCT( if (_vloop.is_trace_memory_slices()) { print(); } )
|
||||
return VStatus::make_success();
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
@ -1060,6 +1073,29 @@ bool VPointer::can_make_speculative_aliasing_check_with(const VPointer& other) c
|
||||
return false;
|
||||
}
|
||||
|
||||
// The speculative check also needs to create the pointer expressions for both
|
||||
// VPointers. We must check that we can do that, i.e. that all variables of the
|
||||
// VPointers are available at the speculative check (and not just pre-loop invariant).
|
||||
if (!this->can_make_pointer_expression_at_speculative_check()) {
|
||||
#ifdef ASSERT
|
||||
if (_vloop.is_trace_speculative_aliasing_analysis()) {
|
||||
tty->print_cr("VPointer::can_make_speculative_aliasing_check_with: not all variables of VPointer are avaialbe at speculative check!");
|
||||
this->print_on(tty);
|
||||
}
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!other.can_make_pointer_expression_at_speculative_check()) {
|
||||
#ifdef ASSERT
|
||||
if (_vloop.is_trace_speculative_aliasing_analysis()) {
|
||||
tty->print_cr("VPointer::can_make_speculative_aliasing_check_with: not all variables of VPointer are avaialbe at speculative check!");
|
||||
other.print_on(tty);
|
||||
}
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1147,6 +1183,8 @@ BoolNode* VPointer::make_speculative_aliasing_check_with(const VPointer& other,
|
||||
Node* main_init = new ConvL2INode(main_initL);
|
||||
phase->register_new_node_with_ctrl_of(main_init, pre_init);
|
||||
|
||||
assert(vp1.can_make_pointer_expression_at_speculative_check(), "variables must be available early enough to avoid cycles");
|
||||
assert(vp2.can_make_pointer_expression_at_speculative_check(), "variables must be available early enough to avoid cycles");
|
||||
Node* p1_init = vp1.make_pointer_expression(main_init, ctrl);
|
||||
Node* p2_init = vp2.make_pointer_expression(main_init, ctrl);
|
||||
Node* size1 = igvn.longcon(vp1.size());
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2023, Arm Limited. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -504,6 +504,8 @@ private:
|
||||
//
|
||||
class VLoopMemorySlices : public StackObj {
|
||||
private:
|
||||
static constexpr char const* FAILURE_DIFFERENT_MEMORY_INPUT = "Load only slice has multiple memory inputs";
|
||||
|
||||
const VLoop& _vloop;
|
||||
const VLoopBody& _body;
|
||||
|
||||
@ -521,7 +523,7 @@ public:
|
||||
const GrowableArray<Node*>& inputs() const { return _inputs; }
|
||||
const GrowableArray<PhiNode*>& heads() const { return _heads; }
|
||||
|
||||
void find_memory_slices();
|
||||
VStatus find_memory_slices();
|
||||
void get_slice_in_reverse_order(PhiNode* head, MemNode* tail, GrowableArray<MemNode*>& slice) const;
|
||||
bool same_memory_slice(MemNode* m1, MemNode* m2) const;
|
||||
|
||||
@ -1188,6 +1190,22 @@ private:
|
||||
return true;
|
||||
}
|
||||
|
||||
// We already know that all non-iv summands are pre loop invariant.
|
||||
// See init_are_non_iv_summands_pre_loop_invariant
|
||||
// That is good enough for alignment computations in the pre-loop limit. But it is not
|
||||
// sufficient if we want to use the variables of the VPointer at the speculative check,
|
||||
// which is further up before the pre-loop.
|
||||
bool can_make_pointer_expression_at_speculative_check() const {
|
||||
bool success = true;
|
||||
mem_pointer().for_each_non_empty_summand([&] (const MemPointerSummand& s) {
|
||||
Node* variable = s.variable();
|
||||
if (variable != _vloop.iv() && !_vloop.is_available_for_speculative_check(variable)) {
|
||||
success = false;
|
||||
}
|
||||
});
|
||||
return success;
|
||||
}
|
||||
|
||||
// In the pointer analysis, and especially the AlignVector analysis, we assume that
|
||||
// stride and scale are not too large. For example, we multiply "iv_scale * iv_stride",
|
||||
// and assume that this does not overflow the int range. We also take "abs(iv_scale)"
|
||||
|
||||
@ -936,28 +936,26 @@ bool VectorNode::is_scalar_op_that_returns_int_but_vector_op_returns_long(int op
|
||||
}
|
||||
}
|
||||
|
||||
// Idealize vector operations whose vector size is less than the hardware supported
|
||||
// max vector size. Generate a vector mask for the operation. Lanes with indices
|
||||
// inside of the vector size are set to true, while the remaining lanes are set to
|
||||
// false. Returns the corresponding masked vector node.
|
||||
static Node* ideal_partial_operations(PhaseGVN* phase, Node* node, const TypeVect* vt) {
|
||||
if (!Matcher::vector_needs_partial_operations(node, vt)) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Node* VectorNode::try_to_gen_masked_vector(PhaseGVN* gvn, Node* node, const TypeVect* vt) {
|
||||
int vopc = node->Opcode();
|
||||
uint vlen = vt->length();
|
||||
BasicType bt = vt->element_basic_type();
|
||||
assert(Matcher::match_rule_supported_vector_masked(vopc, vlen, bt),
|
||||
"The masked feature is required for the vector operation");
|
||||
assert(Matcher::match_rule_supported_vector(Op_VectorMaskGen, vlen, bt),
|
||||
"'VectorMaskGen' is required to generate a vector mask");
|
||||
|
||||
// Predicated vectors do not need to add another mask input
|
||||
if (node->is_predicated_vector() || !Matcher::has_predicated_vectors() ||
|
||||
!Matcher::match_rule_supported_vector_masked(vopc, vlen, bt) ||
|
||||
!Matcher::match_rule_supported_vector(Op_VectorMaskGen, vlen, bt)) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Node* mask = nullptr;
|
||||
// Generate a vector mask for vector operation whose vector length is lower than the
|
||||
// hardware supported max vector length.
|
||||
if (vt->length_in_bytes() < (uint)MaxVectorSize) {
|
||||
Node* length = gvn->transform(new ConvI2LNode(gvn->makecon(TypeInt::make(vlen))));
|
||||
mask = gvn->transform(VectorMaskGenNode::make(length, bt, vlen));
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
// Generate a vector mask, with lanes inside of the vector length set to true.
|
||||
Node* length = phase->transform(new ConvI2LNode(phase->makecon(TypeInt::make(vlen))));
|
||||
Node* mask = phase->transform(VectorMaskGenNode::make(length, bt, vlen));
|
||||
|
||||
// Generate the related masked op for vector load/store/load_gather/store_scatter.
|
||||
// Or append the mask to the vector op's input list by default.
|
||||
@ -1037,8 +1035,9 @@ bool VectorNode::should_swap_inputs_to_help_global_value_numbering() {
|
||||
}
|
||||
|
||||
Node* VectorNode::Ideal(PhaseGVN* phase, bool can_reshape) {
|
||||
if (Matcher::vector_needs_partial_operations(this, vect_type())) {
|
||||
return try_to_gen_masked_vector(phase, this, vect_type());
|
||||
Node* n = ideal_partial_operations(phase, this, vect_type());
|
||||
if (n != nullptr) {
|
||||
return n;
|
||||
}
|
||||
|
||||
// Sort inputs of commutative non-predicated vector operations to help value numbering.
|
||||
@ -1119,9 +1118,9 @@ LoadVectorNode* LoadVectorNode::make(int opc, Node* ctl, Node* mem,
|
||||
}
|
||||
|
||||
Node* LoadVectorNode::Ideal(PhaseGVN* phase, bool can_reshape) {
|
||||
const TypeVect* vt = vect_type();
|
||||
if (Matcher::vector_needs_partial_operations(this, vt)) {
|
||||
return VectorNode::try_to_gen_masked_vector(phase, this, vt);
|
||||
Node* n = ideal_partial_operations(phase, this, vect_type());
|
||||
if (n != nullptr) {
|
||||
return n;
|
||||
}
|
||||
return LoadNode::Ideal(phase, can_reshape);
|
||||
}
|
||||
@ -1133,9 +1132,9 @@ StoreVectorNode* StoreVectorNode::make(int opc, Node* ctl, Node* mem, Node* adr,
|
||||
}
|
||||
|
||||
Node* StoreVectorNode::Ideal(PhaseGVN* phase, bool can_reshape) {
|
||||
const TypeVect* vt = vect_type();
|
||||
if (Matcher::vector_needs_partial_operations(this, vt)) {
|
||||
return VectorNode::try_to_gen_masked_vector(phase, this, vt);
|
||||
Node* n = ideal_partial_operations(phase, this, vect_type());
|
||||
if (n != nullptr) {
|
||||
return n;
|
||||
}
|
||||
return StoreNode::Ideal(phase, can_reshape);
|
||||
}
|
||||
@ -1411,11 +1410,11 @@ ReductionNode* ReductionNode::make(int opc, Node* ctrl, Node* n1, Node* n2, Basi
|
||||
}
|
||||
|
||||
Node* ReductionNode::Ideal(PhaseGVN* phase, bool can_reshape) {
|
||||
const TypeVect* vt = vect_type();
|
||||
if (Matcher::vector_needs_partial_operations(this, vt)) {
|
||||
return VectorNode::try_to_gen_masked_vector(phase, this, vt);
|
||||
Node* n = ideal_partial_operations(phase, this, vect_type());
|
||||
if (n != nullptr) {
|
||||
return n;
|
||||
}
|
||||
return nullptr;
|
||||
return Node::Ideal(phase, can_reshape);
|
||||
}
|
||||
|
||||
// Convert fromLong to maskAll if the input sets or unsets all lanes.
|
||||
@ -1893,11 +1892,11 @@ Node* VectorMaskOpNode::make(Node* mask, const Type* ty, int mopc) {
|
||||
}
|
||||
|
||||
Node* VectorMaskOpNode::Ideal(PhaseGVN* phase, bool can_reshape) {
|
||||
const TypeVect* vt = vect_type();
|
||||
if (Matcher::vector_needs_partial_operations(this, vt)) {
|
||||
return VectorNode::try_to_gen_masked_vector(phase, this, vt);
|
||||
Node* n = ideal_partial_operations(phase, this, vect_type());
|
||||
if (n != nullptr) {
|
||||
return n;
|
||||
}
|
||||
return nullptr;
|
||||
return TypeNode::Ideal(phase, can_reshape);
|
||||
}
|
||||
|
||||
Node* VectorMaskCastNode::Identity(PhaseGVN* phase) {
|
||||
|
||||
@ -117,7 +117,6 @@ class VectorNode : public TypeNode {
|
||||
static bool is_vector_bitwise_not_pattern(Node* n);
|
||||
static Node* degenerate_vector_rotate(Node* n1, Node* n2, bool is_rotate_left, int vlen,
|
||||
BasicType bt, PhaseGVN* phase);
|
||||
static Node* try_to_gen_masked_vector(PhaseGVN* gvn, Node* node, const TypeVect* vt);
|
||||
|
||||
// [Start, end) half-open range defining which operands are vectors
|
||||
static void vector_operands(Node* n, uint* start, uint* end);
|
||||
|
||||
@ -229,6 +229,19 @@ extern void trace_class_resolution(Klass* to_class) {
|
||||
|
||||
// java.lang.System //////////////////////////////////////////////////////////////////////
|
||||
|
||||
JVM_ENTRY(jboolean, JVM_AOTEndRecording(JNIEnv *env))
|
||||
#if INCLUDE_CDS
|
||||
if (CDSConfig::is_dumping_preimage_static_archive()) {
|
||||
if (!AOTMetaspace::preimage_static_archive_dumped()) {
|
||||
AOTMetaspace::dump_static_archive(THREAD);
|
||||
return JNI_TRUE;
|
||||
}
|
||||
}
|
||||
return JNI_FALSE;
|
||||
#else
|
||||
return JNI_FALSE;
|
||||
#endif // INCLUDE_CDS
|
||||
JVM_END
|
||||
|
||||
JVM_LEAF(jlong, JVM_CurrentTimeMillis(JNIEnv *env, jclass ignored))
|
||||
return os::javaTimeMillis();
|
||||
|
||||
@ -199,6 +199,10 @@ WB_ENTRY(jint, WB_TakeLockAndHangInSafepoint(JNIEnv* env, jobject wb))
|
||||
return 0;
|
||||
WB_END
|
||||
|
||||
WB_ENTRY(jlong, WB_GetMinimumJavaStackSize(JNIEnv* env, jobject o))
|
||||
return os::get_minimum_java_stack_size();
|
||||
WB_END
|
||||
|
||||
class WBIsKlassAliveClosure : public LockedClassesDo {
|
||||
Symbol* _name;
|
||||
int _count;
|
||||
@ -3133,7 +3137,8 @@ static JNINativeMethod methods[] = {
|
||||
{CC"cleanMetaspaces", CC"()V", (void*)&WB_CleanMetaspaces},
|
||||
{CC"rss", CC"()J", (void*)&WB_Rss},
|
||||
{CC"printString", CC"(Ljava/lang/String;I)Ljava/lang/String;", (void*)&WB_PrintString},
|
||||
{CC"lockAndStuckInSafepoint", CC"()V", (void*)&WB_TakeLockAndHangInSafepoint},
|
||||
{CC"lockAndStuckInSafepoint", CC"()V", (void*)&WB_TakeLockAndHangInSafepoint},
|
||||
{CC"getMinimumJavaStackSize", CC"()J", (void*)&WB_GetMinimumJavaStackSize},
|
||||
{CC"wordSize", CC"()J", (void*)&WB_WordSize},
|
||||
{CC"rootChunkWordSize", CC"()J", (void*)&WB_RootChunkWordSize},
|
||||
{CC"isStatic", CC"()Z", (void*)&WB_IsStaticallyLinked},
|
||||
|
||||
@ -306,7 +306,7 @@ JVMFlag::Error TypeProfileLevelConstraintFunc(uint value, bool verbose) {
|
||||
}
|
||||
|
||||
JVMFlag::Error VerifyIterativeGVNConstraintFunc(uint value, bool verbose) {
|
||||
const int max_modes = 4;
|
||||
const int max_modes = 5;
|
||||
uint original_value = value;
|
||||
for (int i = 0; i < max_modes; i++) {
|
||||
if (value % 10 > 1) {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -465,10 +465,7 @@ void before_exit(JavaThread* thread, bool halt) {
|
||||
event.commit();
|
||||
}
|
||||
|
||||
// 2nd argument (emit_event_shutdown) should be set to false
|
||||
// because EventShutdown would be emitted at Threads::destroy_vm().
|
||||
// (one of the callers of before_exit())
|
||||
JFR_ONLY(Jfr::on_vm_shutdown(true, false, halt);)
|
||||
JFR_ONLY(Jfr::on_vm_shutdown(false, halt);)
|
||||
|
||||
// Stop the WatcherThread. We do this before disenrolling various
|
||||
// PeriodicTasks to reduce the likelihood of races.
|
||||
|
||||
@ -28,6 +28,7 @@
|
||||
#include "prims/jvmtiThreadState.inline.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/javaThread.hpp"
|
||||
#include "runtime/jniHandles.hpp"
|
||||
#include "runtime/mountUnmountDisabler.hpp"
|
||||
#include "runtime/threadSMR.hpp"
|
||||
|
||||
@ -193,6 +194,13 @@ void MountUnmountDisabler::end_transition(JavaThread* current, oop vthread, bool
|
||||
}
|
||||
}
|
||||
|
||||
// disable transitions for one virtual thread
|
||||
// disable transitions for all threads if thread is nullptr or a platform thread
|
||||
MountUnmountDisabler::MountUnmountDisabler(jthread thread)
|
||||
: MountUnmountDisabler(JNIHandles::resolve_external_guard(thread))
|
||||
{
|
||||
}
|
||||
|
||||
// disable transitions for one virtual thread
|
||||
// disable transitions for all threads if thread is nullptr or a platform thread
|
||||
MountUnmountDisabler::MountUnmountDisabler(oop thread_oop)
|
||||
@ -367,7 +375,7 @@ MountUnmountDisabler::enable_transition_for_all() {
|
||||
OrderAccess::release();
|
||||
|
||||
MonitorLocker ml(VThreadTransition_lock);
|
||||
if (exclusive_operation_ongoing()) {
|
||||
if (_is_exclusive) {
|
||||
set_exclusive_operation_ongoing(false);
|
||||
}
|
||||
dec_active_disablers();
|
||||
|
||||
@ -58,6 +58,7 @@ class MountUnmountDisabler : public AnyObj {
|
||||
public:
|
||||
MountUnmountDisabler(bool exlusive = false);
|
||||
MountUnmountDisabler(oop thread_oop);
|
||||
MountUnmountDisabler(jthread thread);
|
||||
~MountUnmountDisabler();
|
||||
|
||||
static int global_vthread_transition_disable_count();
|
||||
|
||||
@ -2577,6 +2577,10 @@ jint os::set_minimum_stack_sizes() {
|
||||
return JNI_OK;
|
||||
}
|
||||
|
||||
jlong os::get_minimum_java_stack_size() {
|
||||
return static_cast<jlong>(_java_thread_min_stack_allowed);
|
||||
}
|
||||
|
||||
// Builds a platform dependent Agent_OnLoad_<lib_name> function name
|
||||
// which is used to find statically linked in agents.
|
||||
// Parameters:
|
||||
|
||||
@ -390,6 +390,8 @@ class os: AllStatic {
|
||||
static jint set_minimum_stack_sizes();
|
||||
|
||||
public:
|
||||
// get allowed minimum java stack size
|
||||
static jlong get_minimum_java_stack_size();
|
||||
// Find committed memory region within specified range (start, start + size),
|
||||
// return true if found any
|
||||
static bool committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size);
|
||||
|
||||
@ -3362,7 +3362,7 @@ JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *current) )
|
||||
RegisterMap::WalkContinuation::skip);
|
||||
frame sender = fr.sender(&map);
|
||||
if (sender.is_interpreted_frame()) {
|
||||
current->push_cont_fastpath(sender.sp());
|
||||
current->push_cont_fastpath(sender.unextended_sp());
|
||||
}
|
||||
|
||||
return buf;
|
||||
|
||||
@ -353,6 +353,7 @@
|
||||
nonstatic_field(ThreadLocalAllocBuffer, _pf_top, HeapWord*) \
|
||||
nonstatic_field(ThreadLocalAllocBuffer, _desired_size, size_t) \
|
||||
nonstatic_field(ThreadLocalAllocBuffer, _refill_waste_limit, size_t) \
|
||||
static_field(ThreadLocalAllocBuffer, _reserve_for_allocation_prefetch, int) \
|
||||
static_field(ThreadLocalAllocBuffer, _target_refills, unsigned) \
|
||||
nonstatic_field(ThreadLocalAllocBuffer, _number_of_refills, unsigned) \
|
||||
nonstatic_field(ThreadLocalAllocBuffer, _refill_waste, unsigned) \
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -63,6 +63,9 @@
|
||||
#include "utilities/nativeStackPrinter.hpp"
|
||||
#include "utilities/unsigned5.hpp"
|
||||
#include "utilities/vmError.hpp"
|
||||
#if INCLUDE_JFR
|
||||
#include "jfr/jfr.hpp"
|
||||
#endif
|
||||
|
||||
#include <stdarg.h>
|
||||
#include <stdio.h>
|
||||
@ -262,6 +265,8 @@ void report_untested(const char* file, int line, const char* message) {
|
||||
void report_java_out_of_memory(const char* message) {
|
||||
static int out_of_memory_reported = 0;
|
||||
|
||||
JFR_ONLY(Jfr::on_report_java_out_of_memory();)
|
||||
|
||||
// A number of threads may attempt to report OutOfMemoryError at around the
|
||||
// same time. To avoid dumping the heap or executing the data collection
|
||||
// commands multiple times we just do it once when the first threads reports
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2024 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2023, 2025, Red Hat, Inc. and/or its affiliates.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@ -1898,7 +1898,7 @@ void VMError::report_and_die(int id, const char* message, const char* detail_fmt
|
||||
log.set_fd(-1);
|
||||
}
|
||||
|
||||
JFR_ONLY(Jfr::on_vm_shutdown(static_cast<VMErrorType>(_id) == OOM_JAVA_HEAP_FATAL, true);)
|
||||
JFR_ONLY(Jfr::on_vm_shutdown(true, false, static_cast<VMErrorType>(_id) == OOM_JAVA_HEAP_FATAL);)
|
||||
|
||||
if (PrintNMTStatistics) {
|
||||
fdStream fds(fd_out);
|
||||
|
||||
@ -1111,7 +1111,7 @@ public final class String
|
||||
int sp = 0;
|
||||
int sl = len;
|
||||
while (sp < sl) {
|
||||
int ret = StringCoding.encodeISOArray(val, sp, dst, dp, len);
|
||||
int ret = StringCoding.implEncodeISOArray(val, sp, dst, dp, len);
|
||||
sp = sp + ret;
|
||||
dp = dp + ret;
|
||||
if (ret != len) {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2024, Alibaba Group Holding Limited. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -26,11 +26,8 @@
|
||||
|
||||
package java.lang;
|
||||
|
||||
import jdk.internal.util.Preconditions;
|
||||
import jdk.internal.vm.annotation.IntrinsicCandidate;
|
||||
|
||||
import java.util.function.BiFunction;
|
||||
|
||||
/**
|
||||
* Utility class for string encoding and decoding.
|
||||
*/
|
||||
@ -41,7 +38,7 @@ class StringCoding {
|
||||
/**
|
||||
* Count the number of leading non-zero ascii chars in the range.
|
||||
*/
|
||||
static int countNonZeroAscii(String s) {
|
||||
public static int countNonZeroAscii(String s) {
|
||||
byte[] value = s.value();
|
||||
if (s.isLatin1()) {
|
||||
return countNonZeroAsciiLatin1(value, 0, value.length);
|
||||
@ -53,7 +50,7 @@ class StringCoding {
|
||||
/**
|
||||
* Count the number of non-zero ascii chars in the range.
|
||||
*/
|
||||
private static int countNonZeroAsciiLatin1(byte[] ba, int off, int len) {
|
||||
public static int countNonZeroAsciiLatin1(byte[] ba, int off, int len) {
|
||||
int limit = off + len;
|
||||
for (int i = off; i < limit; i++) {
|
||||
if (ba[i] <= 0) {
|
||||
@ -66,7 +63,7 @@ class StringCoding {
|
||||
/**
|
||||
* Count the number of leading non-zero ascii chars in the range.
|
||||
*/
|
||||
private static int countNonZeroAsciiUTF16(byte[] ba, int off, int strlen) {
|
||||
public static int countNonZeroAsciiUTF16(byte[] ba, int off, int strlen) {
|
||||
int limit = off + strlen;
|
||||
for (int i = off; i < limit; i++) {
|
||||
char c = StringUTF16.charAt(ba, i);
|
||||
@ -77,7 +74,7 @@ class StringCoding {
|
||||
return strlen;
|
||||
}
|
||||
|
||||
static boolean hasNegatives(byte[] ba, int off, int len) {
|
||||
public static boolean hasNegatives(byte[] ba, int off, int len) {
|
||||
return countPositives(ba, off, len) != len;
|
||||
}
|
||||
|
||||
@ -88,24 +85,9 @@ class StringCoding {
|
||||
* bytes in the range. If there are negative bytes, the implementation must return
|
||||
* a value that is less than or equal to the index of the first negative byte
|
||||
* in the range.
|
||||
*
|
||||
* @param ba a byte array
|
||||
* @param off the index of the first byte to start reading from
|
||||
* @param len the total number of bytes to read
|
||||
* @throws NullPointerException if {@code ba} is null
|
||||
* @throws ArrayIndexOutOfBoundsException if the provided sub-range is
|
||||
* {@linkplain Preconditions#checkFromIndexSize(int, int, int, BiFunction) out of bounds}
|
||||
*/
|
||||
static int countPositives(byte[] ba, int off, int len) {
|
||||
Preconditions.checkFromIndexSize(
|
||||
off, len,
|
||||
ba.length, // Implicit null check on `ba`
|
||||
Preconditions.AIOOBE_FORMATTER);
|
||||
return countPositives0(ba, off, len);
|
||||
}
|
||||
|
||||
@IntrinsicCandidate
|
||||
private static int countPositives0(byte[] ba, int off, int len) {
|
||||
public static int countPositives(byte[] ba, int off, int len) {
|
||||
int limit = off + len;
|
||||
for (int i = off; i < limit; i++) {
|
||||
if (ba[i] < 0) {
|
||||
@ -115,37 +97,9 @@ class StringCoding {
|
||||
return len;
|
||||
}
|
||||
|
||||
/**
|
||||
* Encodes as many ISO-8859-1 codepoints as possible from the source byte
|
||||
* array containing characters encoded in UTF-16, into the destination byte
|
||||
* array, assuming that the encoding is ISO-8859-1 compatible.
|
||||
*
|
||||
* @param sa the source byte array containing characters encoded in UTF-16
|
||||
* @param sp the index of the <em>character (not byte!)</em> from the source array to start reading from
|
||||
* @param da the target byte array
|
||||
* @param dp the index of the target array to start writing to
|
||||
* @param len the maximum number of <em>characters (not bytes!)</em> to be encoded
|
||||
* @return the total number of <em>characters (not bytes!)</em> successfully encoded
|
||||
* @throws NullPointerException if any of the provided arrays is null
|
||||
*/
|
||||
static int encodeISOArray(byte[] sa, int sp,
|
||||
byte[] da, int dp, int len) {
|
||||
// This method should tolerate invalid arguments, matching the lenient behavior of the VM intrinsic.
|
||||
// Hence, using operator expressions instead of `Preconditions`, which throw on failure.
|
||||
int sl;
|
||||
if ((sp | dp | len) < 0 ||
|
||||
// Halving the length of `sa` to obtain the number of characters:
|
||||
sp >= (sl = sa.length >>> 1) || // Implicit null check on `sa`
|
||||
dp >= da.length) { // Implicit null check on `da`
|
||||
return 0;
|
||||
}
|
||||
int minLen = Math.min(len, Math.min(sl - sp, da.length - dp));
|
||||
return encodeISOArray0(sa, sp, da, dp, minLen);
|
||||
}
|
||||
|
||||
@IntrinsicCandidate
|
||||
private static int encodeISOArray0(byte[] sa, int sp,
|
||||
byte[] da, int dp, int len) {
|
||||
public static int implEncodeISOArray(byte[] sa, int sp,
|
||||
byte[] da, int dp, int len) {
|
||||
int i = 0;
|
||||
for (; i < len; i++) {
|
||||
char c = StringUTF16.getChar(sa, sp++);
|
||||
@ -156,35 +110,10 @@ class StringCoding {
|
||||
return i;
|
||||
}
|
||||
|
||||
/**
|
||||
* Encodes as many ASCII codepoints as possible from the source
|
||||
* character array into the destination byte array, assuming that
|
||||
* the encoding is ASCII compatible.
|
||||
*
|
||||
* @param sa the source character array
|
||||
* @param sp the index of the source array to start reading from
|
||||
* @param da the target byte array
|
||||
* @param dp the index of the target array to start writing to
|
||||
* @param len the maximum number of characters to be encoded
|
||||
* @return the total number of characters successfully encoded
|
||||
* @throws NullPointerException if any of the provided arrays is null
|
||||
*/
|
||||
static int encodeAsciiArray(char[] sa, int sp,
|
||||
byte[] da, int dp, int len) {
|
||||
// This method should tolerate invalid arguments, matching the lenient behavior of the VM intrinsic.
|
||||
// Hence, using operator expressions instead of `Preconditions`, which throw on failure.
|
||||
if ((sp | dp | len) < 0 ||
|
||||
sp >= sa.length || // Implicit null check on `sa`
|
||||
dp >= da.length) { // Implicit null check on `da`
|
||||
return 0;
|
||||
}
|
||||
int minLen = Math.min(len, Math.min(sa.length - sp, da.length - dp));
|
||||
return encodeAsciiArray0(sa, sp, da, dp, minLen);
|
||||
}
|
||||
|
||||
@IntrinsicCandidate
|
||||
static int encodeAsciiArray0(char[] sa, int sp,
|
||||
byte[] da, int dp, int len) {
|
||||
public static int implEncodeAsciiArray(char[] sa, int sp,
|
||||
byte[] da, int dp, int len)
|
||||
{
|
||||
int i = 0;
|
||||
for (; i < len; i++) {
|
||||
char c = sa[sp++];
|
||||
|
||||
@ -55,6 +55,7 @@ import java.util.Properties;
|
||||
import java.util.ResourceBundle;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.stream.Stream;
|
||||
@ -2176,8 +2177,8 @@ public final class System {
|
||||
return String.decodeASCII(src, srcOff, dst, dstOff, len);
|
||||
}
|
||||
|
||||
public int encodeASCII(char[] sa, int sp, byte[] da, int dp, int len) {
|
||||
return StringCoding.encodeAsciiArray(sa, sp, da, dp, len);
|
||||
public int uncheckedEncodeASCII(char[] src, int srcOff, byte[] dst, int dstOff, int len) {
|
||||
return StringCoding.implEncodeAsciiArray(src, srcOff, dst, dstOff, len);
|
||||
}
|
||||
|
||||
public InputStream initialSystemIn() {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -89,15 +89,19 @@ final class VirtualThread extends BaseVirtualThread {
|
||||
*
|
||||
* RUNNING -> PARKING // Thread parking with LockSupport.park
|
||||
* PARKING -> PARKED // cont.yield successful, parked indefinitely
|
||||
* PARKING -> PINNED // cont.yield failed, parked indefinitely on carrier
|
||||
* PARKED -> UNPARKED // unparked, may be scheduled to continue
|
||||
* PINNED -> RUNNING // unparked, continue execution on same carrier
|
||||
* UNPARKED -> RUNNING // continue execution after park
|
||||
*
|
||||
* PARKING -> RUNNING // cont.yield failed, need to park on carrier
|
||||
* RUNNING -> PINNED // park on carrier
|
||||
* PINNED -> RUNNING // unparked, continue execution on same carrier
|
||||
*
|
||||
* RUNNING -> TIMED_PARKING // Thread parking with LockSupport.parkNanos
|
||||
* TIMED_PARKING -> TIMED_PARKED // cont.yield successful, timed-parked
|
||||
* TIMED_PARKING -> TIMED_PINNED // cont.yield failed, timed-parked on carrier
|
||||
* TIMED_PARKED -> UNPARKED // unparked, may be scheduled to continue
|
||||
*
|
||||
* TIMED_PARKING -> RUNNING // cont.yield failed, need to park on carrier
|
||||
* RUNNING -> TIMED_PINNED // park on carrier
|
||||
* TIMED_PINNED -> RUNNING // unparked, continue execution on same carrier
|
||||
*
|
||||
* RUNNING -> BLOCKING // blocking on monitor enter
|
||||
@ -108,7 +112,7 @@ final class VirtualThread extends BaseVirtualThread {
|
||||
* RUNNING -> WAITING // transitional state during wait on monitor
|
||||
* WAITING -> WAIT // waiting on monitor
|
||||
* WAIT -> BLOCKED // notified, waiting to be unblocked by monitor owner
|
||||
* WAIT -> UNBLOCKED // timed-out/interrupted
|
||||
* WAIT -> UNBLOCKED // interrupted
|
||||
*
|
||||
* RUNNING -> TIMED_WAITING // transition state during timed-waiting on monitor
|
||||
* TIMED_WAITING -> TIMED_WAIT // timed-waiting on monitor
|
||||
@ -856,16 +860,20 @@ final class VirtualThread extends BaseVirtualThread {
|
||||
* Re-enables this virtual thread for scheduling. If this virtual thread is parked
|
||||
* then its task is scheduled to continue, otherwise its next call to {@code park} or
|
||||
* {@linkplain #parkNanos(long) parkNanos} is guaranteed not to block.
|
||||
* @param lazySubmit to use lazySubmit if possible
|
||||
* @throws RejectedExecutionException if the scheduler cannot accept a task
|
||||
*/
|
||||
@Override
|
||||
void unpark() {
|
||||
private void unpark(boolean lazySubmit) {
|
||||
if (!getAndSetParkPermit(true) && currentThread() != this) {
|
||||
int s = state();
|
||||
|
||||
// unparked while parked
|
||||
if ((s == PARKED || s == TIMED_PARKED) && compareAndSetState(s, UNPARKED)) {
|
||||
submitRunContinuation();
|
||||
if (lazySubmit) {
|
||||
lazySubmitRunContinuation();
|
||||
} else {
|
||||
submitRunContinuation();
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
@ -888,6 +896,11 @@ final class VirtualThread extends BaseVirtualThread {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
void unpark() {
|
||||
unpark(false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Invoked by unblocker thread to unblock this virtual thread.
|
||||
*/
|
||||
@ -904,11 +917,7 @@ final class VirtualThread extends BaseVirtualThread {
|
||||
*/
|
||||
private void parkTimeoutExpired() {
|
||||
assert !VirtualThread.currentThread().isVirtual();
|
||||
if (!getAndSetParkPermit(true)
|
||||
&& (state() == TIMED_PARKED)
|
||||
&& compareAndSetState(TIMED_PARKED, UNPARKED)) {
|
||||
lazySubmitRunContinuation();
|
||||
}
|
||||
unpark(true);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@ -64,9 +64,12 @@ package java.lang.runtime;
|
||||
* floating-point type is considered exact.</li>
|
||||
* </ul>
|
||||
*
|
||||
* @jls 5.7.1 Exact Testing Conversions
|
||||
* @jls 5.7.2 Unconditionally Exact Testing Conversions
|
||||
* @jls 15.20.2 The instanceof Operator
|
||||
* @see <a href="../../../../../specs/primitive-types-in-patterns-instanceof-switch-jls.html#jls-5.7.1">
|
||||
* JLS 5.7.1 Exact Testing Conversions</a>
|
||||
* @see <a href="../../../../../specs/primitive-types-in-patterns-instanceof-switch-jls.html#jls-5.7.2">
|
||||
* JLS 5.7.2 Unconditionally Exact Testing Conversions</a>
|
||||
* @see <a href="../../../../../specs/primitive-types-in-patterns-instanceof-switch-jls.html#jls-15.20.2">
|
||||
* JLS 15.20.2 The instanceof Operator</a>
|
||||
*
|
||||
* @implNote Some exactness checks describe a test which can be redirected
|
||||
* safely through one of the existing methods. Those are omitted too (i.e.,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user