mirror of
https://github.com/openjdk/jdk.git
synced 2026-01-28 12:09:14 +00:00
Merge
This commit is contained in:
commit
631085bdfd
6
.hgtags
6
.hgtags
@ -645,6 +645,8 @@ bcbe7b8a77b8971bc221c0be1bd2abb6fb68c2d0 jdk-16+2
|
||||
b58fc60580550a4a587cab729d8fd87223ad6932 jdk-15+29
|
||||
76810b3a88c8c641ae3850a8dfd7c40c984aea9d jdk-16+3
|
||||
6909e4a1f25bfe9a2727026f5845fc1fc44a36aa jdk-15+30
|
||||
78c07dd7240412e60d8694e9dbfd46e57bd42ee0 jdk-16+4
|
||||
78c07dd7240412e60d8694e9dbfd46e57bd42ee0 jdk-16+4
|
||||
e2622818f0bd30e736252eba101fe7d2c27f400b jdk-16+4
|
||||
a32f58c6b8be81877411767de7ba9c4cf087c1b5 jdk-15+31
|
||||
143e258f64af490010eb7e0bacc1cfaeceff0993 jdk-16+5
|
||||
2dad000726b8d5db9f3df647fb4949d88f269dd4 jdk-15+32
|
||||
4a8fd81d64bafa523cddb45f82805536edace106 jdk-16+6
|
||||
|
||||
@ -86,17 +86,18 @@ AC_DEFUN_ONCE([BASIC_SETUP_PATHS],
|
||||
AC_SUBST(TOPDIR)
|
||||
AC_SUBST(CONFIGURE_START_DIR)
|
||||
|
||||
# We can only call UTIL_FIXUP_PATH after BASIC_CHECK_PATHS_WINDOWS.
|
||||
UTIL_FIXUP_PATH(TOPDIR)
|
||||
UTIL_FIXUP_PATH(CONFIGURE_START_DIR)
|
||||
|
||||
if test "x$CUSTOM_ROOT" != x; then
|
||||
UTIL_FIXUP_PATH(CUSTOM_ROOT)
|
||||
WORKSPACE_ROOT="${CUSTOM_ROOT}"
|
||||
else
|
||||
WORKSPACE_ROOT="${TOPDIR}"
|
||||
fi
|
||||
AC_SUBST(WORKSPACE_ROOT)
|
||||
|
||||
# We can only call UTIL_FIXUP_PATH after BASIC_CHECK_PATHS_WINDOWS.
|
||||
UTIL_FIXUP_PATH(CONFIGURE_START_DIR)
|
||||
UTIL_FIXUP_PATH(TOPDIR)
|
||||
|
||||
# Locate the directory of this script.
|
||||
AUTOCONF_DIR=$TOPDIR/make/autoconf
|
||||
|
||||
|
||||
@ -484,7 +484,7 @@ endif
|
||||
# Defines the sub directory structure to store variable value file in
|
||||
DependOnVariableDirName = \
|
||||
$(strip $(addsuffix $(if $(MODULE),/$(MODULE)), \
|
||||
$(subst $(TOPDIR)/,, $(if $(filter /%, $(firstword $(MAKEFILE_LIST))), \
|
||||
$(subst $(WORKSPACE_ROOT)/,, $(if $(filter /%, $(firstword $(MAKEFILE_LIST))), \
|
||||
$(firstword $(MAKEFILE_LIST)), \
|
||||
$(CURDIR)/$(firstword $(MAKEFILE_LIST))))))
|
||||
|
||||
@ -496,6 +496,13 @@ DependOnVariableFileName = \
|
||||
$(strip $(if $(strip $2), $2, \
|
||||
$(MAKESUPPORT_OUTPUTDIR)/vardeps/$(DependOnVariableDirName)/$(strip $1).vardeps))
|
||||
|
||||
# Writes the vardeps file. Assumes $1_filename has been setup
|
||||
# Param 1 - Name of variable
|
||||
DependOnVariableWriteFile = \
|
||||
$(call MakeDir, $(dir $($1_filename))) \
|
||||
$(call WriteFile, $1_old:=$(call DoubleDollar,$(call EscapeHash,$($1))), \
|
||||
$($1_filename)) \
|
||||
|
||||
# Does the actual work with parameters stripped.
|
||||
# If the file exists AND the contents is the same as the variable, do nothing
|
||||
# else print a new file.
|
||||
@ -505,14 +512,18 @@ DependOnVariableFileName = \
|
||||
DependOnVariableHelper = \
|
||||
$(strip \
|
||||
$(eval $1_filename := $(call DependOnVariableFileName, $1, $2)) \
|
||||
$(if $(wildcard $($1_filename)), $(eval include $($1_filename))) \
|
||||
$(if $(call equals, $(strip $($1)), $(strip $($1_old))),,\
|
||||
$(call MakeDir, $(dir $($1_filename))) \
|
||||
$(if $(findstring $(LOG_LEVEL), trace), \
|
||||
$(info NewVariable $1: >$(strip $($1))<) \
|
||||
$(info OldVariable $1: >$(strip $($1_old))<)) \
|
||||
$(call WriteFile, $1_old:=$(call DoubleDollar,$(call EscapeHash,$($1))), \
|
||||
$($1_filename))) \
|
||||
$(if $(wildcard $($1_filename)), \
|
||||
$(eval include $($1_filename)) \
|
||||
$(if $(call equals, $(strip $($1)), $(strip $($1_old))),,\
|
||||
$(if $(findstring $(LOG_LEVEL), trace), \
|
||||
$(info NewVariable $1: >$(strip $($1))<) \
|
||||
$(info OldVariable $1: >$(strip $($1_old))<) \
|
||||
) \
|
||||
$(call DependOnVariableWriteFile,$1) \
|
||||
) \
|
||||
, \
|
||||
$(call DependOnVariableWriteFile,$1) \
|
||||
) \
|
||||
$($1_filename) \
|
||||
)
|
||||
|
||||
|
||||
@ -346,6 +346,7 @@ $(MODULE_DEPS_MAKEFILE): $(MODULE_INFOS) \
|
||||
sub(/\/\*.*\*\//, ""); \
|
||||
gsub(/^ +\*.*/, ""); \
|
||||
gsub(/ /, ""); \
|
||||
gsub(/\r/, ""); \
|
||||
printf(" %s", $$0) } \
|
||||
END { printf("\n") }' $m && \
|
||||
$(PRINTF) "TRANSITIVE_MODULES_$(call GetModuleNameFromModuleInfo, $m) :=" && \
|
||||
@ -359,6 +360,7 @@ $(MODULE_DEPS_MAKEFILE): $(MODULE_INFOS) \
|
||||
sub(/\/\*.*\*\//, ""); \
|
||||
gsub(/^ +\*.*/, ""); \
|
||||
gsub(/ /, ""); \
|
||||
gsub(/\r/, ""); \
|
||||
printf(" %s", $$0) } \
|
||||
END { printf("\n") }' $m \
|
||||
) >> $@ $(NEWLINE))
|
||||
|
||||
@ -62,7 +62,11 @@ define SetupTestFilesCompilationBody
|
||||
$1_OUTPUT_SUBDIR := lib
|
||||
$1_BASE_CFLAGS := $(CFLAGS_JDKLIB)
|
||||
$1_BASE_CXXFLAGS := $(CXXFLAGS_JDKLIB)
|
||||
$1_LDFLAGS := $(LDFLAGS_JDKLIB) $$(call SET_SHARED_LIBRARY_ORIGIN)
|
||||
ifeq ($(call isTargetOs, windows), false)
|
||||
$1_LDFLAGS := $(LDFLAGS_JDKLIB) $$(call SET_SHARED_LIBRARY_ORIGIN) -pthread
|
||||
else
|
||||
$1_LDFLAGS := $(LDFLAGS_JDKLIB) $$(call SET_SHARED_LIBRARY_ORIGIN)
|
||||
endif
|
||||
$1_COMPILATION_TYPE := LIBRARY
|
||||
else ifeq ($$($1_TYPE), PROGRAM)
|
||||
$1_PREFIX = exe
|
||||
|
||||
@ -960,7 +960,7 @@ var getJibProfilesDependencies = function (input, common) {
|
||||
|
||||
var devkit_platform_revisions = {
|
||||
linux_x64: "gcc9.2.0-OL6.4+1.0",
|
||||
macosx_x64: "Xcode10.1-MacOSX10.14+1.0",
|
||||
macosx_x64: "Xcode11.3.1-MacOSX10.15+1.0",
|
||||
windows_x64: "VS2019-16.5.3+1.0",
|
||||
linux_aarch64: "gcc9.2.0-OL7.6+1.0",
|
||||
linux_arm: "gcc8.2.0-Fedora27+1.0",
|
||||
|
||||
@ -530,7 +530,7 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
"returned for each class. "
|
||||
"Generic signatures are described in the signature attribute "
|
||||
"section in "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. "
|
||||
"<cite>The Java Virtual Machine Specification</cite>. "
|
||||
"Since JDWP version 1.5."
|
||||
(Out
|
||||
)
|
||||
@ -643,7 +643,7 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
)
|
||||
(Reply
|
||||
(int modBits "Modifier bits as defined in Chapter 4 of "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>")
|
||||
"<cite>The Java Virtual Machine Specification</cite>")
|
||||
)
|
||||
(ErrorSet
|
||||
(Error INVALID_CLASS "refType is not the ID of a reference "
|
||||
@ -671,7 +671,7 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
"which provide additional information on the "
|
||||
"field declaration. Individual flag values are "
|
||||
"defined in Chapter 4 of "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. "
|
||||
"<cite>The Java Virtual Machine Specification</cite>. "
|
||||
"In addition, The <code>0xf0000000</code> bit identifies "
|
||||
"the field as synthetic, if the synthetic attribute "
|
||||
"<a href=\"#JDWP_VirtualMachine_Capabilities\">capability</a> is available.")
|
||||
@ -706,7 +706,7 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
"which provide additional information on the "
|
||||
"method declaration. Individual flag values are "
|
||||
"defined in Chapter 4 of "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. "
|
||||
"<cite>The Java Virtual Machine Specification</cite>. "
|
||||
"In addition, The <code>0xf0000000</code> bit identifies "
|
||||
"the method as synthetic, if the synthetic attribute "
|
||||
"<a href=\"#JDWP_VirtualMachine_Capabilities\">capability</a> is available.")
|
||||
@ -793,7 +793,7 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
"Returns the current status of the reference type. The status "
|
||||
"indicates the extent to which the reference type has been "
|
||||
"initialized, as described in section 2.1.6 of "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. "
|
||||
"<cite>The Java Virtual Machine Specification</cite>. "
|
||||
"If the class is linked the PREPARED and VERIFIED bits in the returned status bits "
|
||||
"will be set. If the class is initialized the INITIALIZED bit in the returned "
|
||||
"status bits will be set. If an error occured during initialization then the "
|
||||
@ -872,7 +872,7 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
"generic signature if there is one. "
|
||||
"Generic signatures are described in the signature attribute "
|
||||
"section in "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. "
|
||||
"<cite>The Java Virtual Machine Specification</cite>. "
|
||||
"Since JDWP version 1.5."
|
||||
(Out
|
||||
(referenceType refType "The reference type ID.")
|
||||
@ -900,7 +900,7 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
"Fields are returned in the order they occur in the class file. "
|
||||
"Generic signatures are described in the signature attribute "
|
||||
"section in "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. "
|
||||
"<cite>The Java Virtual Machine Specification</cite>. "
|
||||
"Since JDWP version 1.5."
|
||||
(Out
|
||||
(referenceType refType "The reference type ID.")
|
||||
@ -917,7 +917,7 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
"which provide additional information on the "
|
||||
"field declaration. Individual flag values are "
|
||||
"defined in Chapter 4 of "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. "
|
||||
"<cite>The Java Virtual Machine Specification</cite>. "
|
||||
"In addition, The <code>0xf0000000</code> bit identifies "
|
||||
"the field as synthetic, if the synthetic attribute "
|
||||
"<a href=\"#JDWP_VirtualMachine_Capabilities\">capability</a> is available.")
|
||||
@ -942,7 +942,7 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
"Methods are returned in the order they occur in the class file. "
|
||||
"Generic signatures are described in the signature attribute "
|
||||
"section in "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. "
|
||||
"<cite>The Java Virtual Machine Specification</cite>. "
|
||||
"Since JDWP version 1.5."
|
||||
(Out
|
||||
(referenceType refType "The reference type ID.")
|
||||
@ -959,7 +959,7 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
"which provide additional information on the "
|
||||
"method declaration. Individual flag values are "
|
||||
"defined in Chapter 4 of "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. "
|
||||
"<cite>The Java Virtual Machine Specification</cite>. "
|
||||
"In addition, The <code>0xf0000000</code> bit identifies "
|
||||
"the method as synthetic, if the synthetic attribute "
|
||||
"<a href=\"#JDWP_VirtualMachine_Capabilities\">capability</a> is available.")
|
||||
@ -1022,7 +1022,7 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
(Command ConstantPool=18
|
||||
"Return the raw bytes of the constant pool in the format of the "
|
||||
"constant_pool item of the Class File Format in "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. "
|
||||
"<cite>The Java Virtual Machine Specification</cite>. "
|
||||
"<p>Since JDWP version 1.6. Requires canGetConstantPool capability - see "
|
||||
"<a href=\"#JDWP_VirtualMachine_CapabilitiesNew\">CapabilitiesNew</a>.""
|
||||
(Out
|
||||
@ -1032,7 +1032,7 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
(int count "Total number of constant pool entries plus one. This "
|
||||
"corresponds to the constant_pool_count item of the "
|
||||
"Class File Format in "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. ")
|
||||
"<cite>The Java Virtual Machine Specification</cite>. ")
|
||||
(Repeat bytes
|
||||
(byte cpbytes "Raw bytes of constant pool")
|
||||
)
|
||||
@ -1435,7 +1435,7 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
)
|
||||
(Command Bytecodes=3
|
||||
"Retrieve the method's bytecodes as defined in "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. "
|
||||
"<cite>The Java Virtual Machine Specification</cite>. "
|
||||
"Requires canGetBytecodes capability - see "
|
||||
"<a href=\"#JDWP_VirtualMachine_CapabilitiesNew\">CapabilitiesNew</a>."
|
||||
(Out
|
||||
@ -1491,7 +1491,7 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
"table. Also, synthetic variables may be present. "
|
||||
"Generic signatures are described in the signature attribute "
|
||||
"section in "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. "
|
||||
"<cite>The Java Virtual Machine Specification</cite>. "
|
||||
"Since JDWP version 1.5."
|
||||
(Out
|
||||
(referenceType refType "The class.")
|
||||
@ -2082,7 +2082,7 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
"The method which will return early is referred to as the "
|
||||
"called method. The called method is the current method (as "
|
||||
"defined by the Frames section in "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>) "
|
||||
"<cite>The Java Virtual Machine Specification</cite>) "
|
||||
"for the specified thread at the time this command "
|
||||
"is received. "
|
||||
"<p>"
|
||||
|
||||
@ -254,18 +254,18 @@ method name merge descriptor (Ljava/lang/Object;Ljava/lang/Object;Ljava/util/fun
|
||||
|
||||
class name java/util/concurrent/locks/StampedLock
|
||||
header extends java/lang/Object implements java/io/Serializable flags 21 classAnnotations @Ljdk/Profile+Annotation;(value=I1)
|
||||
method name tryWriteLock descriptor ()J flags 1
|
||||
method name writeLockInterruptibly descriptor ()J thrownTypes java/lang/InterruptedException flags 1
|
||||
method name tryReadLock descriptor ()J flags 1
|
||||
method name tryReadLock descriptor (JLjava/util/concurrent/TimeUnit;)J thrownTypes java/lang/InterruptedException flags 1
|
||||
method name readLockInterruptibly descriptor ()J thrownTypes java/lang/InterruptedException flags 1
|
||||
method name unlock descriptor (J)V flags 1
|
||||
-method name tryWriteLock descriptor ()J
|
||||
-method name writeLockInterruptibly descriptor ()J
|
||||
-method name tryReadLock descriptor ()J
|
||||
-method name tryReadLock descriptor (JLjava/util/concurrent/TimeUnit;)J
|
||||
-method name readLockInterruptibly descriptor ()J
|
||||
-method name unlock descriptor (J)V
|
||||
method name tryWriteLock descriptor ()J flags 1
|
||||
method name writeLockInterruptibly descriptor ()J thrownTypes java/lang/InterruptedException flags 1
|
||||
method name tryReadLock descriptor ()J flags 1
|
||||
method name tryReadLock descriptor (JLjava/util/concurrent/TimeUnit;)J thrownTypes java/lang/InterruptedException flags 1
|
||||
method name readLockInterruptibly descriptor ()J thrownTypes java/lang/InterruptedException flags 1
|
||||
method name unlock descriptor (J)V flags 1
|
||||
|
||||
class name javax/net/ssl/SSLSession
|
||||
-method name getPeerCertificateChain descriptor ()[Ljavax/security/cert/X509Certificate;
|
||||
|
||||
@ -27,6 +27,6 @@
|
||||
# ##########################################################
|
||||
#
|
||||
class name javax/xml/stream/XMLInputFactory
|
||||
method name newFactory descriptor ()Ljavax/xml/stream/XMLInputFactory; thrownTypes javax/xml/stream/FactoryConfigurationError flags 9
|
||||
-method name newFactory descriptor ()Ljavax/xml/stream/XMLInputFactory;
|
||||
method name newFactory descriptor ()Ljavax/xml/stream/XMLInputFactory; thrownTypes javax/xml/stream/FactoryConfigurationError flags 9
|
||||
|
||||
|
||||
@ -27,7 +27,7 @@
|
||||
# ##########################################################
|
||||
#
|
||||
#command used to generate this file:
|
||||
#build.tools.symbolgenerator.CreateSymbols build-description-incremental-file symbols include.list 8 jdk8-updated.classes <none> --normalize-method-flags
|
||||
#build.tools.symbolgenerator.CreateSymbols build-description-incremental symbols include.list
|
||||
#
|
||||
generate platforms 7:8:9:A:B:C:D:E:F
|
||||
platform version 8 files java.activation-8.sym.txt:java.base-8.sym.txt:java.compiler-8.sym.txt:java.corba-8.sym.txt:java.datatransfer-8.sym.txt:java.desktop-8.sym.txt:java.instrument-8.sym.txt:java.logging-8.sym.txt:java.management-8.sym.txt:java.management.rmi-8.sym.txt:java.naming-8.sym.txt:java.prefs-8.sym.txt:java.rmi-8.sym.txt:java.scripting-8.sym.txt:java.security.jgss-8.sym.txt:java.security.sasl-8.sym.txt:java.sql-8.sym.txt:java.sql.rowset-8.sym.txt:java.transaction-8.sym.txt:java.xml-8.sym.txt:java.xml.bind-8.sym.txt:java.xml.crypto-8.sym.txt:java.xml.ws-8.sym.txt:java.xml.ws.annotation-8.sym.txt:jdk.httpserver-8.sym.txt:jdk.management-8.sym.txt:jdk.scripting.nashorn-8.sym.txt:jdk.sctp-8.sym.txt:jdk.security.auth-8.sym.txt:jdk.security.jgss-8.sym.txt
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -63,45 +63,42 @@ mkdir -p $DEVKIT_ROOT
|
||||
################################################################################
|
||||
# Copy the relevant parts of Xcode.app, removing things that are both big and
|
||||
# unecessary for our purposes, without building an impossibly long exclude list.
|
||||
#
|
||||
# Not including WatchSimulator.platform makes ibtool crashes in some situations.
|
||||
# It doesn't seem to matter which extra platform is included, but that is the
|
||||
# smallest one.
|
||||
|
||||
EXCLUDE_DIRS=" \
|
||||
Contents/_CodeSignature \
|
||||
$XCODE_APP_DIR_NAME/Contents/Applications \
|
||||
$XCODE_APP_DIR_NAME/Contents/Resources \
|
||||
$XCODE_APP_DIR_NAME/Contents/Library \
|
||||
$XCODE_APP_DIR_NAME/Contents/XPCServices \
|
||||
$XCODE_APP_DIR_NAME/Contents/OtherFrameworks \
|
||||
$XCODE_APP_DIR_NAME/Contents/Developer/Documentation \
|
||||
$XCODE_APP_DIR_NAME/Contents/Developer/usr/share \
|
||||
$XCODE_APP_DIR_NAME/Contents/Developer/usr/libexec/git-core \
|
||||
$XCODE_APP_DIR_NAME/Contents/Developer/usr/bin/git* \
|
||||
$XCODE_APP_DIR_NAME/Contents/Developer/usr/bin/svn* \
|
||||
$XCODE_APP_DIR_NAME/Contents/Developer/usr/lib/libgit* \
|
||||
$XCODE_APP_DIR_NAME/Contents/Developer/usr/lib/libsvn* \
|
||||
$XCODE_APP_DIR_NAME/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/share/man \
|
||||
$XCODE_APP_DIR_NAME/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/${SDK_VERSION}.sdk/usr/share/man \
|
||||
$XCODE_APP_DIR_NAME/Contents/Developer/Platforms/MacOSX.platform/Developer/usr/share/man \
|
||||
$XCODE_APP_DIR_NAME/Contents/Developer/Platforms/MacOSX.platform/usr \
|
||||
$XCODE_APP_DIR_NAME/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/share/man \
|
||||
$XCODE_APP_DIR_NAME/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/swift* \
|
||||
$XCODE_APP_DIR_NAME/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/swift* \
|
||||
$XCODE_APP_DIR_NAME/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/sourcekitd.framework \
|
||||
$XCODE_APP_DIR_NAME/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/libexec/swift* \
|
||||
$XCODE_APP_DIR_NAME/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include/swift* \
|
||||
$XCODE_APP_DIR_NAME/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/arc \
|
||||
Contents/Applications \
|
||||
Contents/Resources \
|
||||
Contents/Library \
|
||||
Contents/XPCServices \
|
||||
Contents/OtherFrameworks \
|
||||
Contents/Developer/Documentation \
|
||||
Contents/Developer/usr/share \
|
||||
Contents/Developer/usr/libexec/git-core \
|
||||
Contents/Developer/usr/bin/git* \
|
||||
Contents/Developer/usr/bin/svn* \
|
||||
Contents/Developer/usr/lib/libgit* \
|
||||
Contents/Developer/usr/lib/libsvn* \
|
||||
Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/share/man \
|
||||
Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/${SDK_VERSION}.sdk/usr/share/man \
|
||||
Contents/Developer/Platforms/MacOSX.platform/Developer/usr/share/man \
|
||||
Contents/Developer/Platforms/MacOSX.platform/usr \
|
||||
Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/share/man \
|
||||
Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/swift* \
|
||||
Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/swift* \
|
||||
Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/sourcekitd.framework \
|
||||
Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/libexec/swift* \
|
||||
Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include/swift* \
|
||||
Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/arc \
|
||||
Platforms/AppleTVSimulator.platform \
|
||||
Platforms/iPhoneSimulator.platform \
|
||||
$XCODE_APP_DIR_NAME/Contents/SharedFrameworks/LLDB.framework \
|
||||
$XCODE_APP_DIR_NAME/Contents/SharedFrameworks/ModelIO.framework \
|
||||
$XCODE_APP_DIR_NAME/Contents/SharedFrameworks/XCSUI.framework \
|
||||
$XCODE_APP_DIR_NAME/Contents/SharedFrameworks/SceneKit.framework \
|
||||
$XCODE_APP_DIR_NAME/Contents/SharedFrameworks/XCBuild.framework \
|
||||
$XCODE_APP_DIR_NAME/Contents/SharedFrameworks/GPUTools.framework \
|
||||
$(cd $XCODE_APP/.. && ls -d $XCODE_APP_DIR_NAME/Contents/Developer/Platforms/* \
|
||||
Platforms/WatchSimulator.platform \
|
||||
Contents/SharedFrameworks/LLDB.framework \
|
||||
Contents/SharedFrameworks/ModelIO.framework \
|
||||
Contents/SharedFrameworks/XCSUI.framework \
|
||||
Contents/SharedFrameworks/SceneKit.framework \
|
||||
Contents/SharedFrameworks/XCBuild.framework \
|
||||
Contents/SharedFrameworks/GPUTools*.framework \
|
||||
Contents/SharedFrameworks/DNTDocumentationSupport.framework/Versions/A/Resources/external \
|
||||
$(cd $XCODE_APP && ls -d Contents/Developer/Platforms/* \
|
||||
| grep -v MacOSX.platform | grep -v WatchSimulator.platform) \
|
||||
"
|
||||
|
||||
@ -110,8 +107,8 @@ for ex in $EXCLUDE_DIRS; do
|
||||
done
|
||||
|
||||
echo "Copying Xcode.app..."
|
||||
echo rsync -rlH $INCLUDE_ARGS $EXCLUDE_ARGS "$XCODE_APP" $DEVKIT_ROOT/
|
||||
rsync -rlH $INCLUDE_ARGS $EXCLUDE_ARGS "$XCODE_APP" $DEVKIT_ROOT/
|
||||
echo rsync -rlH $INCLUDE_ARGS $EXCLUDE_ARGS "$XCODE_APP/." $DEVKIT_ROOT/Xcode.app/
|
||||
rsync -rlH $INCLUDE_ARGS $EXCLUDE_ARGS "$XCODE_APP/." $DEVKIT_ROOT/Xcode.app/
|
||||
|
||||
################################################################################
|
||||
|
||||
|
||||
@ -277,6 +277,13 @@ else
|
||||
BUILD_LIBLCMS_INCLUDE_FILES :=
|
||||
endif
|
||||
|
||||
ifeq ($(TOOLCHAIN_TYPE), clang)
|
||||
ifeq ($(TOOLCHAIN_VERSION), 10.1)
|
||||
# Work around an optimizer bug seen with Xcode 10.1, but fixed by 10.3
|
||||
BUILD_LIBLCMS_cmsopt.c_CFLAGS := -O0
|
||||
endif
|
||||
endif
|
||||
|
||||
$(eval $(call SetupJdkLibrary, BUILD_LIBLCMS, \
|
||||
NAME := lcms, \
|
||||
INCLUDE_FILES := $(BUILD_LIBLCMS_INCLUDE_FILES), \
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2010, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2010, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions
|
||||
@ -388,7 +388,7 @@ OptionPaneDemo.confirmbutton=\u78BA\u8A8D\u30C0\u30A4\u30A2\u30ED\u30B0\u306E\u8
|
||||
OptionPaneDemo.messagebutton=\u30E1\u30C3\u30BB\u30FC\u30B8\u30FB\u30C0\u30A4\u30A2\u30ED\u30B0\u306E\u8868\u793A
|
||||
|
||||
OptionPaneDemo.warningtitle=\u8B66\u544A\u30C0\u30A4\u30A2\u30ED\u30B0\u306E\u4F8B
|
||||
OptionPaneDemo.warningtext=<html><P><font color=black>\u3053\u308C\u306F<font color=red><b>\u7DCA\u6025\u653E\u9001\u30B7\u30B9\u30C6\u30E0</b></font>\u306E\u30C6\u30B9\u30C8\u3067\u3059\u3002<i><b>\u3053\u308C\u306F\u5358\u306A\u308B<br>\u30C6\u30B9\u30C8\u3067\u3059</b></i>\u3002\u30ED\u30FC\u30AB\u30EB\u30FB\u30A4\u30F3\u30C8\u30E9\u30CD\u30C3\u30C8\u306EWeb\u30DE\u30B9\u30BF\u30FC\u304C<br><font color=blue><b>\u9023\u90A6\u653F\u5E9C</b></font>\u304A\u3088\u3073<font color=blue><b>\u5DDE</b></font>\u5F53\u5C40\u3068\u81EA\u4E3B\u7684\u306B\u5354\u529B\u3057\u3001\u7DCA\u6025\u4E8B\u614B\u306E<br>\u767A\u751F\u6642\u306B\u901A\u5831\u3092\u884C\u3046\u305F\u3081\u306B\u3053\u306E\u30B7\u30B9\u30C6\u30E0\u3092\u958B\u767A\u3057\u307E\u3057\u305F\u3002<br>\u5B9F\u969B\u306E\u7DCA\u6025\u6642\u306B\u306F\u3001\u304A\u805E\u304D\u306B\u306A\u3063\u305F\u4FE1\u53F7\u306B\u7D9A\u3044\u3066\u5F53\u5C40\u304B\u3089\u306E<br>\u60C5\u5831\u3001\u30CB\u30E5\u30FC\u30B9\u307E\u305F\u306F\u6307\u793A\u304C\u901A\u77E5\u3055\u308C\u307E\u3059\u3002\u3053\u308C\u3067\u3001<br><font color=red><b>\u7DCA\u6025\u653E\u9001\u30B7\u30B9\u30C6\u30E0</b></font></font>\u306E\u30C6\u30B9\u30C8\u3092<br>\u7D42\u4E86\u3057\u307E\u3059\u3002</P><P><br>\u958B\u767A\u8005\u5411\u3051\u306E\u6CE8\u610F: \u3053\u306E\u30C0\u30A4\u30A2\u30ED\u30B0\u306E\u30C7\u30E2\u3067\u306F\u3001\u30C6\u30AD\u30B9\u30C8\u306E\u30D5\u30A9\u30FC\u30DE\u30C3\u30C8\u306BHTML\u304C\u4F7F\u7528\u3055\u308C\u3066\u3044\u307E\u3059\u3002</P></html>
|
||||
OptionPaneDemo.warningtext=<html><P><font color=black>\u3053\u308C\u306F<font color=red><b>\u7DCA\u6025\u653E\u9001\u30B7\u30B9\u30C6\u30E0</b></font>\u306E\u30C6\u30B9\u30C8\u3067\u3059\u3002<i><b>\u3053\u308C\u306F\u5358\u306A\u308B<br>\u30C6\u30B9\u30C8\u3067\u3059</b></i>\u3002\u30ED\u30FC\u30AB\u30EB\u30FB\u30A4\u30F3\u30C8\u30E9\u30CD\u30C3\u30C8\u306EWeb\u30DE\u30B9\u30BF\u30FC\u304C<br><font color=blue><b>\u9023\u90A6\u653F\u5E9C</b></font>\u304A\u3088\u3073<font color=blue><b>\u5DDE</b></font>\u5F53\u5C40\u3068\u81EA\u4E3B\u7684\u306B\u5354\u529B\u3057\u3001\u7DCA\u6025\u4E8B\u614B\u306E<br>\u767A\u751F\u6642\u306B\u901A\u5831\u3092\u884C\u3046\u305F\u3081\u306B\u3053\u306E\u30B7\u30B9\u30C6\u30E0\u3092\u958B\u767A\u3057\u307E\u3057\u305F\u3002<br>\u5B9F\u969B\u306E\u7DCA\u6025\u6642\u306B\u306F\u3001\u304A\u805E\u304D\u306B\u306A\u3063\u305F\u4FE1\u53F7\u306B\u7D9A\u3044\u3066\u5F53\u5C40\u304B\u3089\u306E<br>\u60C5\u5831\u3001\u30CB\u30E5\u30FC\u30B9\u307E\u305F\u306F\u6307\u793A\u304C\u901A\u77E5\u3055\u308C\u307E\u3059\u3002\u3053\u308C\u3067\u3001<br><font color=red><b>\u7DCA\u6025\u653E\u9001\u30B7\u30B9\u30C6\u30E0</b></font></font>\u306E\u30C6\u30B9\u30C8\u3092<br>\u7D42\u4E86\u3057\u307E\u3059\u3002</P><P><br>\u958B\u767A\u8005\u5411\u3051\u306E\u30CE\u30FC\u30C8: \u3053\u306E\u30C0\u30A4\u30A2\u30ED\u30B0\u306E\u30C7\u30E2\u3067\u306F\u3001\u30C6\u30AD\u30B9\u30C8\u306E\u30D5\u30A9\u30FC\u30DE\u30C3\u30C8\u306BHTML\u304C\u4F7F\u7528\u3055\u308C\u3066\u3044\u307E\u3059\u3002</P></html>
|
||||
|
||||
OptionPaneDemo.messagetext=\u74F6\u306B\u5165\u308C\u305F\u30E1\u30C3\u30BB\u30FC\u30B8
|
||||
|
||||
|
||||
@ -1,8 +1,8 @@
|
||||
import random
|
||||
|
||||
AARCH64_AS = "<PATH-TO-AS>"
|
||||
AARCH64_OBJDUMP = "<PATH-TO-OBJDUMP>"
|
||||
AARCH64_OBJCOPY = "<PATH-TO-OBJCOPY>"
|
||||
AARCH64_AS = "as"
|
||||
AARCH64_OBJDUMP = "objdump"
|
||||
AARCH64_OBJCOPY = "objcopy"
|
||||
|
||||
class Operand(object):
|
||||
|
||||
@ -348,7 +348,7 @@ class LogicalImmOp(AddSubImmOp):
|
||||
+ ', #0x%x' % self.immed)
|
||||
|
||||
def cstr(self):
|
||||
return super(AddSubImmOp, self).cstr() + "l);"
|
||||
return super(AddSubImmOp, self).cstr() + "ll);"
|
||||
|
||||
class MultiOp():
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,4 +1,4 @@
|
||||
dnl Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
dnl Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
|
||||
dnl DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
dnl
|
||||
dnl This code is free software; you can redistribute it and/or modify it
|
||||
@ -23,12 +23,12 @@ dnl
|
||||
dnl Process this file with m4 aarch64_ad.m4 to generate the arithmetic
|
||||
dnl and shift patterns patterns used in aarch64.ad.
|
||||
dnl
|
||||
// BEGIN This section of the file is automatically generated. Do not edit --------------
|
||||
dnl
|
||||
define(`ORL2I', `ifelse($1,I,orL2I)')
|
||||
dnl
|
||||
define(`BASE_SHIFT_INSN',
|
||||
`
|
||||
`// This pattern is automatically generated from aarch64_ad.m4
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct $2$1_reg_$4_reg(iReg$1NoSp dst,
|
||||
iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2,
|
||||
immI src3, rFlagsReg cr) %{
|
||||
@ -46,9 +46,11 @@ instruct $2$1_reg_$4_reg(iReg$1NoSp dst,
|
||||
%}
|
||||
|
||||
ins_pipe(ialu_reg_reg_shift);
|
||||
%}')dnl
|
||||
%}
|
||||
')dnl
|
||||
define(`BASE_INVERTED_INSN',
|
||||
`
|
||||
`// This pattern is automatically generated from aarch64_ad.m4
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct $2$1_reg_not_reg(iReg$1NoSp dst,
|
||||
iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, imm$1_M1 m1,
|
||||
rFlagsReg cr) %{
|
||||
@ -68,9 +70,11 @@ dnl into this canonical form.
|
||||
%}
|
||||
|
||||
ins_pipe(ialu_reg_reg);
|
||||
%}')dnl
|
||||
%}
|
||||
')dnl
|
||||
define(`INVERTED_SHIFT_INSN',
|
||||
`
|
||||
`// This pattern is automatically generated from aarch64_ad.m4
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct $2$1_reg_$4_not_reg(iReg$1NoSp dst,
|
||||
iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2,
|
||||
immI src3, imm$1_M1 src4, rFlagsReg cr) %{
|
||||
@ -91,9 +95,12 @@ dnl into this canonical form.
|
||||
%}
|
||||
|
||||
ins_pipe(ialu_reg_reg_shift);
|
||||
%}')dnl
|
||||
%}
|
||||
')dnl
|
||||
define(`NOT_INSN',
|
||||
`instruct reg$1_not_reg(iReg$1NoSp dst,
|
||||
`// This pattern is automatically generated from aarch64_ad.m4
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct reg$1_not_reg(iReg$1NoSp dst,
|
||||
iReg$1`'ORL2I($1) src1, imm$1_M1 m1,
|
||||
rFlagsReg cr) %{
|
||||
match(Set dst (Xor$1 src1 m1));
|
||||
@ -108,7 +115,8 @@ define(`NOT_INSN',
|
||||
%}
|
||||
|
||||
ins_pipe(ialu_reg);
|
||||
%}')dnl
|
||||
%}
|
||||
')dnl
|
||||
dnl
|
||||
define(`BOTH_SHIFT_INSNS',
|
||||
`BASE_SHIFT_INSN(I, $1, ifelse($2,andr,andw,$2w), $3, $4)
|
||||
@ -120,7 +128,7 @@ BASE_INVERTED_INSN(L, $1, $2, $3, $4)')dnl
|
||||
dnl
|
||||
define(`BOTH_INVERTED_SHIFT_INSNS',
|
||||
`INVERTED_SHIFT_INSN(I, $1, $2w, $3, $4, ~0, int)
|
||||
INVERTED_SHIFT_INSN(L, $1, $2, $3, $4, ~0l, long)')dnl
|
||||
INVERTED_SHIFT_INSN(L, $1, $2, $3, $4, ~0l, jlong)')dnl
|
||||
dnl
|
||||
define(`ALL_SHIFT_KINDS',
|
||||
`BOTH_SHIFT_INSNS($1, $2, URShift, LSR)
|
||||
@ -147,8 +155,10 @@ ALL_SHIFT_KINDS(Add, add)
|
||||
ALL_SHIFT_KINDS(Sub, sub)
|
||||
dnl
|
||||
dnl EXTEND mode, rshift_op, src, lshift_count, rshift_count
|
||||
define(`EXTEND', `($2$1 (LShift$1 $3 $4) $5)')
|
||||
define(`BFM_INSN',`
|
||||
define(`EXTEND', `($2$1 (LShift$1 $3 $4) $5)') dnl
|
||||
define(`BFM_INSN',`// This pattern is automatically generated from aarch64_ad.m4
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
|
||||
// Shift Left followed by Shift Right.
|
||||
// This idiom is used by the compiler for the i2b bytecode etc.
|
||||
instruct $4$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI lshift_count, immI rshift_count)
|
||||
@ -167,7 +177,8 @@ instruct $4$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI lshift_count, immI rsh
|
||||
%}
|
||||
|
||||
ins_pipe(ialu_reg_shift);
|
||||
%}')
|
||||
%}
|
||||
')
|
||||
BFM_INSN(L, 63, RShift, sbfm)
|
||||
BFM_INSN(I, 31, RShift, sbfmw)
|
||||
BFM_INSN(L, 63, URShift, ubfm)
|
||||
@ -175,7 +186,9 @@ BFM_INSN(I, 31, URShift, ubfmw)
|
||||
dnl
|
||||
// Bitfield extract with shift & mask
|
||||
define(`BFX_INSN',
|
||||
`instruct $3$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI rshift, imm$1_bitmask mask)
|
||||
`// This pattern is automatically generated from aarch64_ad.m4
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct $3$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI rshift, imm$1_bitmask mask)
|
||||
%{
|
||||
match(Set dst (And$1 ($2$1 src rshift) mask));
|
||||
// Make sure we are not going to exceed what $3 can do.
|
||||
@ -185,16 +198,20 @@ define(`BFX_INSN',
|
||||
format %{ "$3 $dst, $src, $rshift, $mask" %}
|
||||
ins_encode %{
|
||||
int rshift = $rshift$$constant & $4;
|
||||
long mask = $mask$$constant;
|
||||
intptr_t mask = $mask$$constant;
|
||||
int width = exact_log2$6(mask+1);
|
||||
__ $3(as_Register($dst$$reg),
|
||||
as_Register($src$$reg), rshift, width);
|
||||
%}
|
||||
ins_pipe(ialu_reg_shift);
|
||||
%}')
|
||||
%}
|
||||
')
|
||||
BFX_INSN(I, URShift, ubfxw, 31, int)
|
||||
BFX_INSN(L, URShift, ubfx, 63, long, _long)
|
||||
|
||||
// This pattern is automatically generated from aarch64_ad.m4
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
|
||||
// We can use ubfx when extending an And with a mask when we know mask
|
||||
// is positive. We know that because immI_bitmask guarantees it.
|
||||
instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
|
||||
@ -207,7 +224,7 @@ instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask m
|
||||
format %{ "ubfx $dst, $src, $rshift, $mask" %}
|
||||
ins_encode %{
|
||||
int rshift = $rshift$$constant & 31;
|
||||
long mask = $mask$$constant;
|
||||
intptr_t mask = $mask$$constant;
|
||||
int width = exact_log2(mask+1);
|
||||
__ ubfx(as_Register($dst$$reg),
|
||||
as_Register($src$$reg), rshift, width);
|
||||
@ -215,10 +232,12 @@ instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask m
|
||||
ins_pipe(ialu_reg_shift);
|
||||
%}
|
||||
|
||||
define(`UBFIZ_INSN',
|
||||
define(`UBFIZ_INSN', `// This pattern is automatically generated from aarch64_ad.m4
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
|
||||
// We can use ubfiz when masking by a positive number and then left shifting the result.
|
||||
// We know that the mask is positive because imm$1_bitmask guarantees it.
|
||||
`instruct $2$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI lshift, imm$1_bitmask mask)
|
||||
instruct $2$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI lshift, imm$1_bitmask mask)
|
||||
%{
|
||||
match(Set dst (LShift$1 (And$1 src mask) lshift));
|
||||
predicate((exact_log2$5(n->in(1)->in(2)->get_$4() + 1) + (n->in(2)->get_int() & $3)) <= ($3 + 1));
|
||||
@ -227,16 +246,20 @@ define(`UBFIZ_INSN',
|
||||
format %{ "$2 $dst, $src, $lshift, $mask" %}
|
||||
ins_encode %{
|
||||
int lshift = $lshift$$constant & $3;
|
||||
long mask = $mask$$constant;
|
||||
intptr_t mask = $mask$$constant;
|
||||
int width = exact_log2$5(mask+1);
|
||||
__ $2(as_Register($dst$$reg),
|
||||
as_Register($src$$reg), lshift, width);
|
||||
%}
|
||||
ins_pipe(ialu_reg_shift);
|
||||
%}')
|
||||
%}
|
||||
')
|
||||
UBFIZ_INSN(I, ubfizw, 31, int)
|
||||
UBFIZ_INSN(L, ubfiz, 63, long, _long)
|
||||
|
||||
// This pattern is automatically generated from aarch64_ad.m4
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
|
||||
// If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
|
||||
instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
|
||||
%{
|
||||
@ -247,7 +270,7 @@ instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask
|
||||
format %{ "ubfiz $dst, $src, $lshift, $mask" %}
|
||||
ins_encode %{
|
||||
int lshift = $lshift$$constant & 63;
|
||||
long mask = $mask$$constant;
|
||||
intptr_t mask = $mask$$constant;
|
||||
int width = exact_log2(mask+1);
|
||||
__ ubfiz(as_Register($dst$$reg),
|
||||
as_Register($src$$reg), lshift, width);
|
||||
@ -255,10 +278,12 @@ instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask
|
||||
ins_pipe(ialu_reg_shift);
|
||||
%}
|
||||
|
||||
// Rotations
|
||||
|
||||
define(`EXTRACT_INSN',
|
||||
`instruct extr$3$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, immI lshift, immI rshift, rFlagsReg cr)
|
||||
// Rotations dnl
|
||||
define(`EXTRACT_INSN',`
|
||||
// This pattern is automatically generated from aarch64_ad.m4
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct extr$3$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, immI lshift, immI rshift, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst ($3$1 (LShift$1 src1 lshift) (URShift$1 src2 rshift)));
|
||||
predicate(0 == (((n->in(1)->in(2)->get_int() & $2) + (n->in(2)->in(2)->get_int() & $2)) & $2));
|
||||
@ -277,9 +302,10 @@ EXTRACT_INSN(L, 63, Or, extr)
|
||||
EXTRACT_INSN(I, 31, Or, extrw)
|
||||
EXTRACT_INSN(L, 63, Add, extr)
|
||||
EXTRACT_INSN(I, 31, Add, extrw)
|
||||
define(`ROL_EXPAND', `
|
||||
// $2 expander
|
||||
define(`ROL_EXPAND', `// This pattern is automatically generated from aarch64_ad.m4
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
|
||||
// $2 expander
|
||||
instruct $2$1_rReg(iReg$1NoSp dst, iReg$1 src, iRegI shift, rFlagsReg cr)
|
||||
%{
|
||||
effect(DEF dst, USE src, USE shift);
|
||||
@ -292,10 +318,12 @@ instruct $2$1_rReg(iReg$1NoSp dst, iReg$1 src, iRegI shift, rFlagsReg cr)
|
||||
rscratch1);
|
||||
%}
|
||||
ins_pipe(ialu_reg_reg_vshift);
|
||||
%}')dnl
|
||||
define(`ROR_EXPAND', `
|
||||
// $2 expander
|
||||
%}
|
||||
')
|
||||
define(`ROR_EXPAND', `// This pattern is automatically generated from aarch64_ad.m4
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
|
||||
// $2 expander
|
||||
instruct $2$1_rReg(iReg$1NoSp dst, iReg$1 src, iRegI shift, rFlagsReg cr)
|
||||
%{
|
||||
effect(DEF dst, USE src, USE shift);
|
||||
@ -307,8 +335,10 @@ instruct $2$1_rReg(iReg$1NoSp dst, iReg$1 src, iRegI shift, rFlagsReg cr)
|
||||
as_Register($shift$$reg));
|
||||
%}
|
||||
ins_pipe(ialu_reg_reg_vshift);
|
||||
%}')dnl
|
||||
define(ROL_INSN, `
|
||||
%}
|
||||
')dnl
|
||||
define(ROL_INSN, `// This pattern is automatically generated from aarch64_ad.m4
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct $3$1_rReg_Var_C$2(iReg$1NoSp dst, iReg$1 src, iRegI shift, immI$2 c$2, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (Or$1 (LShift$1 src shift) (URShift$1 src (SubI c$2 shift))));
|
||||
@ -316,8 +346,10 @@ instruct $3$1_rReg_Var_C$2(iReg$1NoSp dst, iReg$1 src, iRegI shift, immI$2 c$2,
|
||||
expand %{
|
||||
$3$1_rReg(dst, src, shift, cr);
|
||||
%}
|
||||
%}')dnl
|
||||
define(ROR_INSN, `
|
||||
%}
|
||||
')dnl
|
||||
define(ROR_INSN, `// This pattern is automatically generated from aarch64_ad.m4
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct $3$1_rReg_Var_C$2(iReg$1NoSp dst, iReg$1 src, iRegI shift, immI$2 c$2, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (Or$1 (URShift$1 src shift) (LShift$1 src (SubI c$2 shift))));
|
||||
@ -325,7 +357,8 @@ instruct $3$1_rReg_Var_C$2(iReg$1NoSp dst, iReg$1 src, iRegI shift, immI$2 c$2,
|
||||
expand %{
|
||||
$3$1_rReg(dst, src, shift, cr);
|
||||
%}
|
||||
%}')dnl
|
||||
%}
|
||||
')dnl
|
||||
ROL_EXPAND(L, rol, rorv)
|
||||
ROL_EXPAND(I, rol, rorvw)
|
||||
ROL_INSN(L, _64, rol)
|
||||
@ -342,6 +375,8 @@ ROR_INSN(I, 0, ror)
|
||||
// Add/subtract (extended)
|
||||
dnl ADD_SUB_EXTENDED(mode, size, add node, shift node, insn, shift type, wordsize
|
||||
define(`ADD_SUB_CONV', `
|
||||
// This pattern is automatically generated from aarch64_ad.m4
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct $3Ext$1(iReg$2NoSp dst, iReg$2`'ORL2I($2) src1, iReg$1`'ORL2I($1) src2, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst ($3$2 src1 (ConvI2L src2)));
|
||||
@ -354,10 +389,12 @@ instruct $3Ext$1(iReg$2NoSp dst, iReg$2`'ORL2I($2) src1, iReg$1`'ORL2I($1) src2,
|
||||
%}
|
||||
ins_pipe(ialu_reg_reg);
|
||||
%}')dnl
|
||||
ADD_SUB_CONV(I,L,Add,add,sxtw);
|
||||
ADD_SUB_CONV(I,L,Sub,sub,sxtw);
|
||||
ADD_SUB_CONV(I,L,Add,add,sxtw)
|
||||
ADD_SUB_CONV(I,L,Sub,sub,sxtw)
|
||||
dnl
|
||||
define(`ADD_SUB_EXTENDED', `
|
||||
// This pattern is automatically generated from aarch64_ad.m4
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct $3Ext$1_$6(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, immI_`'eval($7-$2) lshift, immI_`'eval($7-$2) rshift, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst ($3$1 src1 EXTEND($1, $4, src2, lshift, rshift)));
|
||||
@ -369,7 +406,7 @@ instruct $3Ext$1_$6(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) sr
|
||||
as_Register($src2$$reg), ext::$6);
|
||||
%}
|
||||
ins_pipe(ialu_reg_reg);
|
||||
%}')
|
||||
%}')dnl
|
||||
ADD_SUB_EXTENDED(I,16,Add,RShift,add,sxth,32)
|
||||
ADD_SUB_EXTENDED(I,8,Add,RShift,add,sxtb,32)
|
||||
ADD_SUB_EXTENDED(I,8,Add,URShift,add,uxtb,32)
|
||||
@ -379,7 +416,8 @@ ADD_SUB_EXTENDED(L,8,Add,RShift,add,sxtb,64)
|
||||
ADD_SUB_EXTENDED(L,8,Add,URShift,add,uxtb,64)
|
||||
dnl
|
||||
dnl ADD_SUB_ZERO_EXTEND(mode, size, add node, insn, shift type)
|
||||
define(`ADD_SUB_ZERO_EXTEND', `
|
||||
define(`ADD_SUB_ZERO_EXTEND', `// This pattern is automatically generated from aarch64_ad.m4
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct $3Ext$1_$5_and(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, imm$1_$2 mask, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst ($3$1 src1 (And$1 src2 mask)));
|
||||
@ -391,7 +429,8 @@ instruct $3Ext$1_$5_and(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1
|
||||
as_Register($src2$$reg), ext::$5);
|
||||
%}
|
||||
ins_pipe(ialu_reg_reg);
|
||||
%}')
|
||||
%}
|
||||
')
|
||||
dnl
|
||||
ADD_SUB_ZERO_EXTEND(I,255,Add,addw,uxtb)
|
||||
ADD_SUB_ZERO_EXTEND(I,65535,Add,addw,uxth)
|
||||
@ -406,7 +445,8 @@ ADD_SUB_ZERO_EXTEND(L,65535,Sub,sub,uxth)
|
||||
ADD_SUB_ZERO_EXTEND(L,4294967295,Sub,sub,uxtw)
|
||||
dnl
|
||||
dnl ADD_SUB_ZERO_EXTEND_SHIFT(mode, size, add node, insn, ext type)
|
||||
define(`ADD_SUB_EXTENDED_SHIFT', `
|
||||
define(`ADD_SUB_EXTENDED_SHIFT', `// This pattern is automatically generated from aarch64_ad.m4
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct $3Ext$1_$6_shift(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, immIExt lshift2, immI_`'eval($7-$2) lshift1, immI_`'eval($7-$2) rshift1, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst ($3$1 src1 (LShift$1 EXTEND($1, $4, src2, lshift1, rshift1) lshift2)));
|
||||
@ -418,7 +458,8 @@ instruct $3Ext$1_$6_shift(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I(
|
||||
as_Register($src2$$reg), ext::$6, ($lshift2$$constant));
|
||||
%}
|
||||
ins_pipe(ialu_reg_reg_shift);
|
||||
%}')
|
||||
%}
|
||||
')
|
||||
dnl $1 $2 $3 $4 $5 $6 $7
|
||||
ADD_SUB_EXTENDED_SHIFT(L,8,Add,RShift,add,sxtb,64)
|
||||
ADD_SUB_EXTENDED_SHIFT(L,16,Add,RShift,add,sxth,64)
|
||||
@ -435,7 +476,8 @@ ADD_SUB_EXTENDED_SHIFT(I,8,Sub,RShift,subw,sxtb,32)
|
||||
ADD_SUB_EXTENDED_SHIFT(I,16,Sub,RShift,subw,sxth,32)
|
||||
dnl
|
||||
dnl ADD_SUB_CONV_SHIFT(mode, add node, insn, ext type)
|
||||
define(`ADD_SUB_CONV_SHIFT', `
|
||||
define(`ADD_SUB_CONV_SHIFT', `// This pattern is automatically generated from aarch64_ad.m4
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct $2ExtI_shift(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst ($2$1 src1 (LShiftL (ConvI2L src2) lshift)));
|
||||
@ -447,13 +489,14 @@ instruct $2ExtI_shift(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iRegIorL2I src2, i
|
||||
as_Register($src2$$reg), ext::$4, ($lshift$$constant));
|
||||
%}
|
||||
ins_pipe(ialu_reg_reg_shift);
|
||||
%}')
|
||||
dnl
|
||||
ADD_SUB_CONV_SHIFT(L,Add,add,sxtw);
|
||||
ADD_SUB_CONV_SHIFT(L,Sub,sub,sxtw);
|
||||
%}
|
||||
')dnl
|
||||
ADD_SUB_CONV_SHIFT(L,Add,add,sxtw)
|
||||
ADD_SUB_CONV_SHIFT(L,Sub,sub,sxtw)
|
||||
dnl
|
||||
dnl ADD_SUB_ZERO_EXTEND(mode, size, add node, insn, ext type)
|
||||
define(`ADD_SUB_ZERO_EXTEND_SHIFT', `
|
||||
define(`ADD_SUB_ZERO_EXTEND_SHIFT', `// This pattern is automatically generated from aarch64_ad.m4
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct $3Ext$1_$5_and_shift(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, imm$1_$2 mask, immIExt lshift, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst ($3$1 src1 (LShift$1 (And$1 src2 mask) lshift)));
|
||||
@ -465,8 +508,8 @@ instruct $3Ext$1_$5_and_shift(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'OR
|
||||
as_Register($src2$$reg), ext::$5, ($lshift$$constant));
|
||||
%}
|
||||
ins_pipe(ialu_reg_reg_shift);
|
||||
%}')
|
||||
dnl
|
||||
%}
|
||||
')dnl
|
||||
dnl $1 $2 $3 $4 $5
|
||||
ADD_SUB_ZERO_EXTEND_SHIFT(L,255,Add,add,uxtb)
|
||||
ADD_SUB_ZERO_EXTEND_SHIFT(L,65535,Add,add,uxth)
|
||||
@ -482,4 +525,4 @@ dnl
|
||||
ADD_SUB_ZERO_EXTEND_SHIFT(I,255,Sub,subw,uxtb)
|
||||
ADD_SUB_ZERO_EXTEND_SHIFT(I,65535,Sub,subw,uxth)
|
||||
dnl
|
||||
// END This section of the file is automatically generated. Do not edit --------------
|
||||
|
||||
|
||||
@ -31,7 +31,7 @@
|
||||
#include "interpreter/interpreter.hpp"
|
||||
|
||||
#ifndef PRODUCT
|
||||
const unsigned long Assembler::asm_bp = 0x00007fffee09ac88;
|
||||
const uintptr_t Assembler::asm_bp = 0x00007fffee09ac88;
|
||||
#endif
|
||||
|
||||
#include "compiler/disassembler.hpp"
|
||||
@ -132,14 +132,14 @@ void entry(CodeBuffer *cb) {
|
||||
__ subs(r4, r1, 698u); // subs x4, x1, #698
|
||||
|
||||
// LogicalImmOp
|
||||
__ andw(r28, r19, 4294709247ul); // and w28, w19, #0xfffc0fff
|
||||
__ orrw(r27, r5, 536870910ul); // orr w27, w5, #0x1ffffffe
|
||||
__ eorw(r30, r20, 4294840319ul); // eor w30, w20, #0xfffe0fff
|
||||
__ andsw(r22, r26, 4294959615ul); // ands w22, w26, #0xffffe1ff
|
||||
__ andr(r5, r7, 4194300ul); // and x5, x7, #0x3ffffc
|
||||
__ orr(r13, r7, 18014398509481728ul); // orr x13, x7, #0x3fffffffffff00
|
||||
__ eor(r7, r9, 18442240474082197503ul); // eor x7, x9, #0xfff0000000003fff
|
||||
__ ands(r3, r0, 18374686479671656447ul); // ands x3, x0, #0xff00000000007fff
|
||||
__ andw(r28, r19, 4294709247ull); // and w28, w19, #0xfffc0fff
|
||||
__ orrw(r27, r5, 536870910ull); // orr w27, w5, #0x1ffffffe
|
||||
__ eorw(r30, r20, 4294840319ull); // eor w30, w20, #0xfffe0fff
|
||||
__ andsw(r22, r26, 4294959615ull); // ands w22, w26, #0xffffe1ff
|
||||
__ andr(r5, r7, 4194300ull); // and x5, x7, #0x3ffffc
|
||||
__ orr(r13, r7, 18014398509481728ull); // orr x13, x7, #0x3fffffffffff00
|
||||
__ eor(r7, r9, 18442240474082197503ull); // eor x7, x9, #0xfff0000000003fff
|
||||
__ ands(r3, r0, 18374686479671656447ull); // ands x3, x0, #0xff00000000007fff
|
||||
|
||||
// AbsOp
|
||||
__ b(__ pc()); // b .
|
||||
@ -1493,7 +1493,7 @@ extern "C" {
|
||||
Disassembler::decode((address)start, (address)start + len);
|
||||
}
|
||||
|
||||
JNIEXPORT void das1(unsigned long insn) {
|
||||
JNIEXPORT void das1(uintptr_t insn) {
|
||||
das(insn, 1);
|
||||
}
|
||||
}
|
||||
@ -1532,7 +1532,7 @@ void Address::lea(MacroAssembler *as, Register r) const {
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::adrp(Register reg1, const Address &dest, unsigned long &byte_offset) {
|
||||
void Assembler::adrp(Register reg1, const Address &dest, uintptr_t &byte_offset) {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
@ -1541,7 +1541,7 @@ void Assembler::adrp(Register reg1, const Address &dest, unsigned long &byte_off
|
||||
#define starti Instruction_aarch64 do_not_use(this); set_current(&do_not_use)
|
||||
|
||||
void Assembler::adr(Register Rd, address adr) {
|
||||
long offset = adr - pc();
|
||||
intptr_t offset = adr - pc();
|
||||
int offset_lo = offset & 3;
|
||||
offset >>= 2;
|
||||
starti;
|
||||
@ -1552,7 +1552,7 @@ void Assembler::adrp(Register reg1, const Address &dest, unsigned long &byte_off
|
||||
void Assembler::_adrp(Register Rd, address adr) {
|
||||
uint64_t pc_page = (uint64_t)pc() >> 12;
|
||||
uint64_t adr_page = (uint64_t)adr >> 12;
|
||||
long offset = adr_page - pc_page;
|
||||
intptr_t offset = adr_page - pc_page;
|
||||
int offset_lo = offset & 3;
|
||||
offset >>= 2;
|
||||
starti;
|
||||
@ -1701,9 +1701,9 @@ void Assembler::add_sub_immediate(Register Rd, Register Rn, unsigned uimm, int o
|
||||
srf(Rn, 5);
|
||||
}
|
||||
|
||||
bool Assembler::operand_valid_for_add_sub_immediate(long imm) {
|
||||
bool Assembler::operand_valid_for_add_sub_immediate(int64_t imm) {
|
||||
bool shift = false;
|
||||
unsigned long uimm = uabs(imm);
|
||||
uint64_t uimm = (uint64_t)uabs(imm);
|
||||
if (uimm < (1 << 12))
|
||||
return true;
|
||||
if (uimm < (1 << 24)
|
||||
|
||||
@ -199,7 +199,7 @@ public:
|
||||
return extend(uval, msb - lsb);
|
||||
}
|
||||
|
||||
static void patch(address a, int msb, int lsb, unsigned long val) {
|
||||
static void patch(address a, int msb, int lsb, uint64_t val) {
|
||||
int nbits = msb - lsb + 1;
|
||||
guarantee(val < (1U << nbits), "Field too big for insn");
|
||||
assert_cond(msb >= lsb);
|
||||
@ -212,9 +212,9 @@ public:
|
||||
*(unsigned *)a = target;
|
||||
}
|
||||
|
||||
static void spatch(address a, int msb, int lsb, long val) {
|
||||
static void spatch(address a, int msb, int lsb, int64_t val) {
|
||||
int nbits = msb - lsb + 1;
|
||||
long chk = val >> (nbits - 1);
|
||||
int64_t chk = val >> (nbits - 1);
|
||||
guarantee (chk == -1 || chk == 0, "Field too big for insn");
|
||||
unsigned uval = val;
|
||||
unsigned mask = (1U << nbits) - 1;
|
||||
@ -245,9 +245,9 @@ public:
|
||||
f(val, bit, bit);
|
||||
}
|
||||
|
||||
void sf(long val, int msb, int lsb) {
|
||||
void sf(int64_t val, int msb, int lsb) {
|
||||
int nbits = msb - lsb + 1;
|
||||
long chk = val >> (nbits - 1);
|
||||
int64_t chk = val >> (nbits - 1);
|
||||
guarantee (chk == -1 || chk == 0, "Field too big for insn");
|
||||
unsigned uval = val;
|
||||
unsigned mask = (1U << nbits) - 1;
|
||||
@ -357,7 +357,7 @@ class Address {
|
||||
private:
|
||||
Register _base;
|
||||
Register _index;
|
||||
long _offset;
|
||||
int64_t _offset;
|
||||
enum mode _mode;
|
||||
extend _ext;
|
||||
|
||||
@ -380,9 +380,9 @@ class Address {
|
||||
: _base(r), _index(noreg), _offset(0), _mode(base_plus_offset), _target(0) { }
|
||||
Address(Register r, int o)
|
||||
: _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { }
|
||||
Address(Register r, long o)
|
||||
Address(Register r, int64_t o)
|
||||
: _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { }
|
||||
Address(Register r, unsigned long o)
|
||||
Address(Register r, uint64_t o)
|
||||
: _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { }
|
||||
#ifdef ASSERT
|
||||
Address(Register r, ByteSize disp)
|
||||
@ -422,7 +422,7 @@ class Address {
|
||||
"wrong mode");
|
||||
return _base;
|
||||
}
|
||||
long offset() const {
|
||||
int64_t offset() const {
|
||||
return _offset;
|
||||
}
|
||||
Register index() const {
|
||||
@ -554,7 +554,7 @@ class Address {
|
||||
|
||||
void lea(MacroAssembler *, Register) const;
|
||||
|
||||
static bool offset_ok_for_immed(long offset, int shift) {
|
||||
static bool offset_ok_for_immed(int64_t offset, int shift) {
|
||||
unsigned mask = (1 << shift) - 1;
|
||||
if (offset < 0 || offset & mask) {
|
||||
return (uabs(offset) < (1 << (20 - 12))); // Unscaled offset
|
||||
@ -616,10 +616,10 @@ typedef enum {
|
||||
class Assembler : public AbstractAssembler {
|
||||
|
||||
#ifndef PRODUCT
|
||||
static const unsigned long asm_bp;
|
||||
static const uintptr_t asm_bp;
|
||||
|
||||
void emit_long(jint x) {
|
||||
if ((unsigned long)pc() == asm_bp)
|
||||
if ((uintptr_t)pc() == asm_bp)
|
||||
asm volatile ("nop");
|
||||
AbstractAssembler::emit_int32(x);
|
||||
}
|
||||
@ -670,7 +670,7 @@ public:
|
||||
void f(unsigned val, int msb) {
|
||||
current->f(val, msb, msb);
|
||||
}
|
||||
void sf(long val, int msb, int lsb) {
|
||||
void sf(int64_t val, int msb, int lsb) {
|
||||
current->sf(val, msb, lsb);
|
||||
}
|
||||
void rf(Register reg, int lsb) {
|
||||
@ -720,7 +720,7 @@ public:
|
||||
wrap_label(Rd, L, &Assembler::_adrp);
|
||||
}
|
||||
|
||||
void adrp(Register Rd, const Address &dest, unsigned long &offset);
|
||||
void adrp(Register Rd, const Address &dest, uint64_t &offset);
|
||||
|
||||
#undef INSN
|
||||
|
||||
@ -846,7 +846,7 @@ public:
|
||||
// architecture. In debug mode we shrink it in order to test
|
||||
// trampolines, but not so small that branches in the interpreter
|
||||
// are out of range.
|
||||
static const unsigned long branch_range = NOT_DEBUG(128 * M) DEBUG_ONLY(2 * M);
|
||||
static const uint64_t branch_range = NOT_DEBUG(128 * M) DEBUG_ONLY(2 * M);
|
||||
|
||||
static bool reachable_from_branch_at(address branch, address target) {
|
||||
return uabs(target - branch) < branch_range;
|
||||
@ -856,7 +856,7 @@ public:
|
||||
#define INSN(NAME, opcode) \
|
||||
void NAME(address dest) { \
|
||||
starti; \
|
||||
long offset = (dest - pc()) >> 2; \
|
||||
int64_t offset = (dest - pc()) >> 2; \
|
||||
DEBUG_ONLY(assert(reachable_from_branch_at(pc(), dest), "debug only")); \
|
||||
f(opcode, 31), f(0b00101, 30, 26), sf(offset, 25, 0); \
|
||||
} \
|
||||
@ -873,7 +873,7 @@ public:
|
||||
// Compare & branch (immediate)
|
||||
#define INSN(NAME, opcode) \
|
||||
void NAME(Register Rt, address dest) { \
|
||||
long offset = (dest - pc()) >> 2; \
|
||||
int64_t offset = (dest - pc()) >> 2; \
|
||||
starti; \
|
||||
f(opcode, 31, 24), sf(offset, 23, 5), rf(Rt, 0); \
|
||||
} \
|
||||
@ -891,7 +891,7 @@ public:
|
||||
// Test & branch (immediate)
|
||||
#define INSN(NAME, opcode) \
|
||||
void NAME(Register Rt, int bitpos, address dest) { \
|
||||
long offset = (dest - pc()) >> 2; \
|
||||
int64_t offset = (dest - pc()) >> 2; \
|
||||
int b5 = bitpos >> 5; \
|
||||
bitpos &= 0x1f; \
|
||||
starti; \
|
||||
@ -912,7 +912,7 @@ public:
|
||||
{EQ, NE, HS, CS=HS, LO, CC=LO, MI, PL, VS, VC, HI, LS, GE, LT, GT, LE, AL, NV};
|
||||
|
||||
void br(Condition cond, address dest) {
|
||||
long offset = (dest - pc()) >> 2;
|
||||
int64_t offset = (dest - pc()) >> 2;
|
||||
starti;
|
||||
f(0b0101010, 31, 25), f(0, 24), sf(offset, 23, 5), f(0, 4), f(cond, 3, 0);
|
||||
}
|
||||
@ -1292,7 +1292,7 @@ public:
|
||||
// Load register (literal)
|
||||
#define INSN(NAME, opc, V) \
|
||||
void NAME(Register Rt, address dest) { \
|
||||
long offset = (dest - pc()) >> 2; \
|
||||
int64_t offset = (dest - pc()) >> 2; \
|
||||
starti; \
|
||||
f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24), \
|
||||
sf(offset, 23, 5); \
|
||||
@ -1317,7 +1317,7 @@ public:
|
||||
|
||||
#define INSN(NAME, opc, V) \
|
||||
void NAME(FloatRegister Rt, address dest) { \
|
||||
long offset = (dest - pc()) >> 2; \
|
||||
int64_t offset = (dest - pc()) >> 2; \
|
||||
starti; \
|
||||
f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24), \
|
||||
sf(offset, 23, 5); \
|
||||
@ -1332,7 +1332,7 @@ public:
|
||||
|
||||
#define INSN(NAME, opc, V) \
|
||||
void NAME(address dest, prfop op = PLDL1KEEP) { \
|
||||
long offset = (dest - pc()) >> 2; \
|
||||
int64_t offset = (dest - pc()) >> 2; \
|
||||
starti; \
|
||||
f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24), \
|
||||
sf(offset, 23, 5); \
|
||||
@ -1408,7 +1408,7 @@ public:
|
||||
assert(size == 0b10 || size == 0b11, "bad operand size in ldr");
|
||||
assert(op == 0b01, "literal form can only be used with loads");
|
||||
f(size & 0b01, 31, 30), f(0b011, 29, 27), f(0b00, 25, 24);
|
||||
long offset = (adr.target() - pc()) >> 2;
|
||||
int64_t offset = (adr.target() - pc()) >> 2;
|
||||
sf(offset, 23, 5);
|
||||
code_section()->relocate(pc(), adr.rspec());
|
||||
return;
|
||||
@ -2683,7 +2683,7 @@ void ext(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister V
|
||||
virtual void bang_stack_with_offset(int offset);
|
||||
|
||||
static bool operand_valid_for_logical_immediate(bool is32, uint64_t imm);
|
||||
static bool operand_valid_for_add_sub_immediate(long imm);
|
||||
static bool operand_valid_for_add_sub_immediate(int64_t imm);
|
||||
static bool operand_valid_for_float_immediate(double imm);
|
||||
|
||||
void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -30,6 +30,7 @@
|
||||
#include "c1/c1_LIRAssembler.hpp"
|
||||
#include "c1/c1_MacroAssembler.hpp"
|
||||
#include "c1/c1_Runtime1.hpp"
|
||||
#include "classfile/javaClasses.hpp"
|
||||
#include "nativeInst_aarch64.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "vmreg_aarch64.inline.hpp"
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1352,7 +1352,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
|
||||
__ load_klass(klass_RInfo, obj);
|
||||
if (k->is_loaded()) {
|
||||
// See if we get an immediate positive hit
|
||||
__ ldr(rscratch1, Address(klass_RInfo, long(k->super_check_offset())));
|
||||
__ ldr(rscratch1, Address(klass_RInfo, int64_t(k->super_check_offset())));
|
||||
__ cmp(k_RInfo, rscratch1);
|
||||
if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
|
||||
__ br(Assembler::NE, *failure_target);
|
||||
@ -2016,7 +2016,7 @@ void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Op
|
||||
} else if (code == lir_cmp_l2i) {
|
||||
Label done;
|
||||
__ cmp(left->as_register_lo(), right->as_register_lo());
|
||||
__ mov(dst->as_register(), (u_int64_t)-1L);
|
||||
__ mov(dst->as_register(), (uint64_t)-1L);
|
||||
__ br(Assembler::LT, done);
|
||||
__ csinc(dst->as_register(), zr, zr, Assembler::EQ);
|
||||
__ bind(done);
|
||||
@ -2675,7 +2675,7 @@ void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
|
||||
Register res = op->result_opr()->as_register();
|
||||
|
||||
assert_different_registers(val, crc, res);
|
||||
unsigned long offset;
|
||||
uint64_t offset;
|
||||
__ adrp(res, ExternalAddress(StubRoutines::crc_table_addr()), offset);
|
||||
if (offset) __ add(res, res, offset);
|
||||
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -653,14 +653,14 @@ intptr_t* frame::real_fp() const {
|
||||
|
||||
#define DESCRIBE_FP_OFFSET(name) \
|
||||
{ \
|
||||
unsigned long *p = (unsigned long *)fp; \
|
||||
printf("0x%016lx 0x%016lx %s\n", (unsigned long)(p + frame::name##_offset), \
|
||||
uintptr_t *p = (uintptr_t *)fp; \
|
||||
printf("0x%016lx 0x%016lx %s\n", (uintptr_t)(p + frame::name##_offset), \
|
||||
p[frame::name##_offset], #name); \
|
||||
}
|
||||
|
||||
static __thread unsigned long nextfp;
|
||||
static __thread unsigned long nextpc;
|
||||
static __thread unsigned long nextsp;
|
||||
static __thread uintptr_t nextfp;
|
||||
static __thread uintptr_t nextpc;
|
||||
static __thread uintptr_t nextsp;
|
||||
static __thread RegisterMap *reg_map;
|
||||
|
||||
static void printbc(Method *m, intptr_t bcx) {
|
||||
@ -679,7 +679,7 @@ static void printbc(Method *m, intptr_t bcx) {
|
||||
printf("%s : %s ==> %s\n", m->name_and_sig_as_C_string(), buf, name);
|
||||
}
|
||||
|
||||
void internal_pf(unsigned long sp, unsigned long fp, unsigned long pc, unsigned long bcx) {
|
||||
void internal_pf(uintptr_t sp, uintptr_t fp, uintptr_t pc, uintptr_t bcx) {
|
||||
if (! fp)
|
||||
return;
|
||||
|
||||
@ -693,7 +693,7 @@ void internal_pf(unsigned long sp, unsigned long fp, unsigned long pc, unsigned
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_locals);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_bcp);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_initial_sp);
|
||||
unsigned long *p = (unsigned long *)fp;
|
||||
uintptr_t *p = (uintptr_t *)fp;
|
||||
|
||||
// We want to see all frames, native and Java. For compiled and
|
||||
// interpreted frames we have special information that allows us to
|
||||
@ -703,16 +703,16 @@ void internal_pf(unsigned long sp, unsigned long fp, unsigned long pc, unsigned
|
||||
if (this_frame.is_compiled_frame() ||
|
||||
this_frame.is_interpreted_frame()) {
|
||||
frame sender = this_frame.sender(reg_map);
|
||||
nextfp = (unsigned long)sender.fp();
|
||||
nextpc = (unsigned long)sender.pc();
|
||||
nextsp = (unsigned long)sender.unextended_sp();
|
||||
nextfp = (uintptr_t)sender.fp();
|
||||
nextpc = (uintptr_t)sender.pc();
|
||||
nextsp = (uintptr_t)sender.unextended_sp();
|
||||
} else {
|
||||
nextfp = p[frame::link_offset];
|
||||
nextpc = p[frame::return_addr_offset];
|
||||
nextsp = (unsigned long)&p[frame::sender_sp_offset];
|
||||
nextsp = (uintptr_t)&p[frame::sender_sp_offset];
|
||||
}
|
||||
|
||||
if (bcx == -1ul)
|
||||
if (bcx == -1ULL)
|
||||
bcx = p[frame::interpreter_frame_bcp_offset];
|
||||
|
||||
if (Interpreter::contains((address)pc)) {
|
||||
@ -746,8 +746,8 @@ extern "C" void npf() {
|
||||
internal_pf (nextsp, nextfp, nextpc, -1);
|
||||
}
|
||||
|
||||
extern "C" void pf(unsigned long sp, unsigned long fp, unsigned long pc,
|
||||
unsigned long bcx, unsigned long thread) {
|
||||
extern "C" void pf(uintptr_t sp, uintptr_t fp, uintptr_t pc,
|
||||
uintptr_t bcx, uintptr_t thread) {
|
||||
if (!reg_map) {
|
||||
reg_map = NEW_C_HEAP_OBJ(RegisterMap, mtInternal);
|
||||
::new (reg_map) RegisterMap((JavaThread*)thread, false);
|
||||
@ -766,9 +766,9 @@ extern "C" void pf(unsigned long sp, unsigned long fp, unsigned long pc,
|
||||
// support for printing out where we are in a Java method
|
||||
// needs to be passed current fp and bcp register values
|
||||
// prints method name, bc index and bytecode name
|
||||
extern "C" void pm(unsigned long fp, unsigned long bcx) {
|
||||
extern "C" void pm(uintptr_t fp, uintptr_t bcx) {
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_method);
|
||||
unsigned long *p = (unsigned long *)fp;
|
||||
uintptr_t *p = (uintptr_t *)fp;
|
||||
Method* m = (Method*)p[frame::interpreter_frame_method_offset];
|
||||
printbc(m, bcx);
|
||||
}
|
||||
|
||||
@ -178,7 +178,7 @@ void BarrierSetAssembler::eden_allocate(MacroAssembler* masm, Register obj,
|
||||
Label retry;
|
||||
__ bind(retry);
|
||||
{
|
||||
unsigned long offset;
|
||||
uint64_t offset;
|
||||
__ adrp(rscratch1, ExternalAddress((address) Universe::heap()->end_addr()), offset);
|
||||
__ ldr(heap_end, Address(rscratch1, offset));
|
||||
}
|
||||
@ -187,7 +187,7 @@ void BarrierSetAssembler::eden_allocate(MacroAssembler* masm, Register obj,
|
||||
|
||||
// Get the current top of the heap
|
||||
{
|
||||
unsigned long offset;
|
||||
uint64_t offset;
|
||||
__ adrp(rscratch1, heap_top, offset);
|
||||
// Use add() here after ARDP, rather than lea().
|
||||
// lea() does not generate anything if its offset is zero.
|
||||
|
||||
@ -48,7 +48,7 @@ void LIR_OpShenandoahCompareAndSwap::emit_code(LIR_Assembler* masm) {
|
||||
newval = tmp2;
|
||||
}
|
||||
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm->masm(), addr, cmpval, newval, /*acquire*/ false, /*release*/ true, /*weak*/ false, /*is_cae*/ false, result);
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm->masm(), addr, cmpval, newval, /*acquire*/ false, /*release*/ true, /*is_cae*/ false, result);
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
@ -449,9 +449,64 @@ void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler
|
||||
__ bind(done);
|
||||
}
|
||||
|
||||
|
||||
void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, Register addr, Register expected, Register new_val,
|
||||
bool acquire, bool release, bool weak, bool is_cae,
|
||||
// Special Shenandoah CAS implementation that handles false negatives due
|
||||
// to concurrent evacuation. The service is more complex than a
|
||||
// traditional CAS operation because the CAS operation is intended to
|
||||
// succeed if the reference at addr exactly matches expected or if the
|
||||
// reference at addr holds a pointer to a from-space object that has
|
||||
// been relocated to the location named by expected. There are two
|
||||
// races that must be addressed:
|
||||
// a) A parallel thread may mutate the contents of addr so that it points
|
||||
// to a different object. In this case, the CAS operation should fail.
|
||||
// b) A parallel thread may heal the contents of addr, replacing a
|
||||
// from-space pointer held in addr with the to-space pointer
|
||||
// representing the new location of the object.
|
||||
// Upon entry to cmpxchg_oop, it is assured that new_val equals NULL
|
||||
// or it refers to an object that is not being evacuated out of
|
||||
// from-space, or it refers to the to-space version of an object that
|
||||
// is being evacuated out of from-space.
|
||||
//
|
||||
// By default, this operation implements sequential consistency and the
|
||||
// value held in the result register following execution of the
|
||||
// generated code sequence is 0 to indicate failure of CAS, non-zero
|
||||
// to indicate success. Arguments support variations on this theme:
|
||||
//
|
||||
// acquire: Allow relaxation of the memory ordering on CAS from
|
||||
// sequential consistency. This can be useful when
|
||||
// sequential consistency is not required, such as when
|
||||
// another sequentially consistent operation is already
|
||||
// present in the execution stream. If acquire, successful
|
||||
// execution has the side effect of assuring that memory
|
||||
// values updated by other threads and "released" will be
|
||||
// visible to any read operations perfomed by this thread
|
||||
// which follow this operation in program order. This is a
|
||||
// special optimization that should not be enabled by default.
|
||||
// release: Allow relaxation of the memory ordering on CAS from
|
||||
// sequential consistency. This can be useful when
|
||||
// sequential consistency is not required, such as when
|
||||
// another sequentially consistent operation is already
|
||||
// present in the execution stream. If release, successful
|
||||
// completion of this operation has the side effect of
|
||||
// assuring that all writes to memory performed by this
|
||||
// thread that precede this operation in program order are
|
||||
// visible to all other threads that subsequently "acquire"
|
||||
// before reading the respective memory values. This is a
|
||||
// special optimization that should not be enabled by default.
|
||||
// is_cae: This turns CAS (compare and swap) into CAE (compare and
|
||||
// exchange). This HotSpot convention is that CAE makes
|
||||
// available to the caller the "failure witness", which is
|
||||
// the value that was stored in memory which did not match
|
||||
// the expected value. If is_cae, the result is the value
|
||||
// most recently fetched from addr rather than a boolean
|
||||
// success indicator.
|
||||
//
|
||||
// Clobbers rscratch1, rscratch2
|
||||
void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
|
||||
Register addr,
|
||||
Register expected,
|
||||
Register new_val,
|
||||
bool acquire, bool release,
|
||||
bool is_cae,
|
||||
Register result) {
|
||||
Register tmp1 = rscratch1;
|
||||
Register tmp2 = rscratch2;
|
||||
@ -460,48 +515,124 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, Register a
|
||||
|
||||
assert_different_registers(addr, expected, new_val, tmp1, tmp2);
|
||||
|
||||
Label retry, done, fail;
|
||||
Label step4, done;
|
||||
|
||||
// CAS, using LL/SC pair.
|
||||
__ bind(retry);
|
||||
__ load_exclusive(tmp1, addr, size, acquire);
|
||||
if (is_narrow) {
|
||||
__ cmpw(tmp1, expected);
|
||||
} else {
|
||||
__ cmp(tmp1, expected);
|
||||
}
|
||||
__ br(Assembler::NE, fail);
|
||||
__ store_exclusive(tmp2, new_val, addr, size, release);
|
||||
if (weak) {
|
||||
__ cmpw(tmp2, 0u); // If the store fails, return NE to our caller
|
||||
} else {
|
||||
__ cbnzw(tmp2, retry);
|
||||
}
|
||||
__ b(done);
|
||||
// There are two ways to reach this label. Initial entry into the
|
||||
// cmpxchg_oop code expansion starts at step1 (which is equivalent
|
||||
// to label step4). Additionally, in the rare case that four steps
|
||||
// are required to perform the requested operation, the fourth step
|
||||
// is the same as the first. On a second pass through step 1,
|
||||
// control may flow through step 2 on its way to failure. It will
|
||||
// not flow from step 2 to step 3 since we are assured that the
|
||||
// memory at addr no longer holds a from-space pointer.
|
||||
//
|
||||
// The comments that immediately follow the step4 label apply only
|
||||
// to the case in which control reaches this label by branch from
|
||||
// step 3.
|
||||
|
||||
__ bind (step4);
|
||||
|
||||
// Step 4. CAS has failed because the value most recently fetched
|
||||
// from addr (which is now held in tmp1) is no longer the from-space
|
||||
// pointer held in tmp2. If a different thread replaced the
|
||||
// in-memory value with its equivalent to-space pointer, then CAS
|
||||
// may still be able to succeed. The value held in the expected
|
||||
// register has not changed.
|
||||
//
|
||||
// It is extremely rare we reach this point. For this reason, the
|
||||
// implementation opts for smaller rather than potentially faster
|
||||
// code. Ultimately, smaller code for this rare case most likely
|
||||
// delivers higher overall throughput by enabling improved icache
|
||||
// performance.
|
||||
|
||||
// Step 1. Fast-path.
|
||||
//
|
||||
// Try to CAS with given arguments. If successful, then we are done.
|
||||
//
|
||||
// No label required for step 1.
|
||||
|
||||
__ cmpxchg(addr, expected, new_val, size, acquire, release, false, tmp2);
|
||||
// EQ flag set iff success. tmp2 holds value fetched.
|
||||
|
||||
// If expected equals null but tmp2 does not equal null, the
|
||||
// following branches to done to report failure of CAS. If both
|
||||
// expected and tmp2 equal null, the following branches to done to
|
||||
// report success of CAS. There's no need for a special test of
|
||||
// expected equal to null.
|
||||
|
||||
__ br(Assembler::EQ, done);
|
||||
// if CAS failed, fall through to step 2
|
||||
|
||||
// Step 2. CAS has failed because the value held at addr does not
|
||||
// match expected. This may be a false negative because the value fetched
|
||||
// from addr (now held in tmp2) may be a from-space pointer to the
|
||||
// original copy of same object referenced by to-space pointer expected.
|
||||
//
|
||||
// To resolve this, it suffices to find the forward pointer associated
|
||||
// with fetched value. If this matches expected, retry CAS with new
|
||||
// parameters. If this mismatches, then we have a legitimate
|
||||
// failure, and we're done.
|
||||
//
|
||||
// No need for step2 label.
|
||||
|
||||
// overwrite tmp1 with from-space pointer fetched from memory
|
||||
__ mov(tmp1, tmp2);
|
||||
|
||||
__ bind(fail);
|
||||
// Check if rb(expected)==rb(tmp1)
|
||||
// Shuffle registers so that we have memory value ready for next expected.
|
||||
__ mov(tmp2, expected);
|
||||
__ mov(expected, tmp1);
|
||||
if (is_narrow) {
|
||||
// Decode tmp1 in order to resolve its forward pointer
|
||||
__ decode_heap_oop(tmp1, tmp1);
|
||||
__ decode_heap_oop(tmp2, tmp2);
|
||||
}
|
||||
resolve_forward_pointer(masm, tmp1);
|
||||
resolve_forward_pointer(masm, tmp2);
|
||||
__ cmp(tmp1, tmp2);
|
||||
// Retry with expected now being the value we just loaded from addr.
|
||||
__ br(Assembler::EQ, retry);
|
||||
if (is_cae && is_narrow) {
|
||||
// For cmp-and-exchange and narrow oops, we need to restore
|
||||
// the compressed old-value. We moved it to 'expected' a few lines up.
|
||||
__ mov(tmp1, expected);
|
||||
}
|
||||
__ bind(done);
|
||||
// Encode tmp1 to compare against expected.
|
||||
__ encode_heap_oop(tmp1, tmp1);
|
||||
|
||||
// Does forwarded value of fetched from-space pointer match original
|
||||
// value of expected? If tmp1 holds null, this comparison will fail
|
||||
// because we know from step1 that expected is not null. There is
|
||||
// no need for a separate test for tmp1 (the value originally held
|
||||
// in memory) equal to null.
|
||||
__ cmp(tmp1, expected);
|
||||
|
||||
// If not, then the failure was legitimate and we're done.
|
||||
// Branching to done with NE condition denotes failure.
|
||||
__ br(Assembler::NE, done);
|
||||
|
||||
// Fall through to step 3. No need for step3 label.
|
||||
|
||||
// Step 3. We've confirmed that the value originally held in memory
|
||||
// (now held in tmp2) pointed to from-space version of original
|
||||
// expected value. Try the CAS again with the from-space expected
|
||||
// value. If it now succeeds, we're good.
|
||||
//
|
||||
// Note: tmp2 holds encoded from-space pointer that matches to-space
|
||||
// object residing at expected. tmp2 is the new "expected".
|
||||
|
||||
// Note that macro implementation of __cmpxchg cannot use same register
|
||||
// tmp2 for result and expected since it overwrites result before it
|
||||
// compares result with expected.
|
||||
__ cmpxchg(addr, tmp2, new_val, size, acquire, release, false, tmp1);
|
||||
// EQ flag set iff success. tmp2 holds value fetched.
|
||||
|
||||
// If fetched value did not equal the new expected, this could
|
||||
// still be a false negative because some other thread may have
|
||||
// newly overwritten the memory value with its to-space equivalent.
|
||||
__ br(Assembler::NE, step4);
|
||||
|
||||
if (is_cae) {
|
||||
__ mov(result, tmp1);
|
||||
// We're falling through to done to indicate success. Success
|
||||
// with is_cae is denoted by returning the value of expected as
|
||||
// result.
|
||||
__ mov(tmp2, expected);
|
||||
}
|
||||
|
||||
__ bind(done);
|
||||
// At entry to done, the Z (EQ) flag is on iff if the CAS
|
||||
// operation was successful. Additionally, if is_cae, tmp2 holds
|
||||
// the value most recently fetched from addr. In this case, success
|
||||
// is denoted by tmp2 matching expected.
|
||||
|
||||
if (is_cae) {
|
||||
__ mov(result, tmp2);
|
||||
} else {
|
||||
__ cset(result, Assembler::EQ);
|
||||
}
|
||||
|
||||
@ -84,7 +84,7 @@ public:
|
||||
virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
|
||||
Register obj, Register tmp, Label& slowpath);
|
||||
void cmpxchg_oop(MacroAssembler* masm, Register addr, Register expected, Register new_val,
|
||||
bool acquire, bool release, bool weak, bool is_cae, Register result);
|
||||
bool acquire, bool release, bool is_cae, Register result);
|
||||
|
||||
virtual void barrier_stubs_init();
|
||||
};
|
||||
|
||||
@ -33,7 +33,7 @@ encode %{
|
||||
Register tmp = $tmp$$Register;
|
||||
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
|
||||
/*acquire*/ false, /*release*/ true, /*weak*/ false, /*is_cae*/ false, $res$$Register);
|
||||
/*acquire*/ false, /*release*/ true, /*is_cae*/ false, $res$$Register);
|
||||
%}
|
||||
|
||||
enc_class aarch64_enc_cmpxchg_acq_oop_shenandoah(memory mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, iRegINoSp res) %{
|
||||
@ -42,7 +42,7 @@ encode %{
|
||||
Register tmp = $tmp$$Register;
|
||||
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
|
||||
/*acquire*/ true, /*release*/ true, /*weak*/ false, /*is_cae*/ false, $res$$Register);
|
||||
/*acquire*/ true, /*release*/ true, /*is_cae*/ false, $res$$Register);
|
||||
%}
|
||||
%}
|
||||
|
||||
@ -76,7 +76,7 @@ instruct compareAndSwapN_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, i
|
||||
ins_encode %{
|
||||
Register tmp = $tmp$$Register;
|
||||
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, /*acquire*/ false, /*release*/ true, /*weak*/ false, /*is_cae*/ false, $res$$Register);
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, /*acquire*/ false, /*release*/ true, /*is_cae*/ false, $res$$Register);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
@ -114,7 +114,7 @@ instruct compareAndSwapNAcq_shenandoah(iRegINoSp res, indirect mem, iRegN oldval
|
||||
ins_encode %{
|
||||
Register tmp = $tmp$$Register;
|
||||
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, /*acquire*/ true, /*release*/ true, /*weak*/ false, /*is_cae*/ false, $res$$Register);
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, /*acquire*/ true, /*release*/ true, /*is_cae*/ false, $res$$Register);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
@ -131,7 +131,7 @@ instruct compareAndExchangeN_shenandoah(iRegNNoSp res, indirect mem, iRegN oldva
|
||||
Register tmp = $tmp$$Register;
|
||||
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
|
||||
/*acquire*/ false, /*release*/ true, /*weak*/ false, /*is_cae*/ true, $res$$Register);
|
||||
/*acquire*/ false, /*release*/ true, /*is_cae*/ true, $res$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
@ -147,7 +147,7 @@ instruct compareAndExchangeP_shenandoah(iRegPNoSp res, indirect mem, iRegP oldva
|
||||
Register tmp = $tmp$$Register;
|
||||
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
|
||||
/*acquire*/ false, /*release*/ true, /*weak*/ false, /*is_cae*/ true, $res$$Register);
|
||||
/*acquire*/ false, /*release*/ true, /*is_cae*/ true, $res$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
@ -164,7 +164,7 @@ instruct compareAndExchangeNAcq_shenandoah(iRegNNoSp res, indirect mem, iRegN ol
|
||||
Register tmp = $tmp$$Register;
|
||||
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
|
||||
/*acquire*/ true, /*release*/ true, /*weak*/ false, /*is_cae*/ true, $res$$Register);
|
||||
/*acquire*/ true, /*release*/ true, /*is_cae*/ true, $res$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
@ -181,7 +181,7 @@ instruct compareAndExchangePAcq_shenandoah(iRegPNoSp res, indirect mem, iRegP ol
|
||||
Register tmp = $tmp$$Register;
|
||||
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
|
||||
/*acquire*/ true, /*release*/ true, /*weak*/ false, /*is_cae*/ true, $res$$Register);
|
||||
/*acquire*/ true, /*release*/ true, /*is_cae*/ true, $res$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
@ -197,8 +197,9 @@ instruct weakCompareAndSwapN_shenandoah(iRegINoSp res, indirect mem, iRegN oldva
|
||||
ins_encode %{
|
||||
Register tmp = $tmp$$Register;
|
||||
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
|
||||
// Weak is not currently supported by ShenandoahBarrierSet::cmpxchg_oop
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
|
||||
/*acquire*/ false, /*release*/ true, /*weak*/ true, /*is_cae*/ false, $res$$Register);
|
||||
/*acquire*/ false, /*release*/ true, /*is_cae*/ false, $res$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
@ -213,8 +214,9 @@ instruct weakCompareAndSwapP_shenandoah(iRegINoSp res, indirect mem, iRegP oldva
|
||||
ins_encode %{
|
||||
Register tmp = $tmp$$Register;
|
||||
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
|
||||
// Weak is not currently supported by ShenandoahBarrierSet::cmpxchg_oop
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
|
||||
/*acquire*/ false, /*release*/ true, /*weak*/ true, /*is_cae*/ false, $res$$Register);
|
||||
/*acquire*/ false, /*release*/ true, /*is_cae*/ false, $res$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
@ -231,8 +233,9 @@ instruct weakCompareAndSwapNAcq_shenandoah(iRegINoSp res, indirect mem, iRegN ol
|
||||
ins_encode %{
|
||||
Register tmp = $tmp$$Register;
|
||||
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
|
||||
// Weak is not currently supported by ShenandoahBarrierSet::cmpxchg_oop
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
|
||||
/*acquire*/ true, /*release*/ true, /*weak*/ true, /*is_cae*/ false, $res$$Register);
|
||||
/*acquire*/ true, /*release*/ true, /*is_cae*/ false, $res$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
@ -249,8 +252,9 @@ instruct weakCompareAndSwapPAcq_shenandoah(iRegINoSp res, indirect mem, iRegP ol
|
||||
ins_encode %{
|
||||
Register tmp = $tmp$$Register;
|
||||
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
|
||||
// Weak is not currently supported by ShenandoahBarrierSet::cmpxchg_oop
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
|
||||
/*acquire*/ true, /*release*/ true, /*weak*/ true, /*is_cae*/ false, $res$$Register);
|
||||
/*acquire*/ true, /*release*/ true, /*is_cae*/ false, $res$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
@ -206,7 +206,7 @@ void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm,
|
||||
BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, robj, tmp, slowpath);
|
||||
|
||||
// The Address offset is too large to direct load - -784. Our range is +127, -128.
|
||||
__ mov(tmp, (long int)(in_bytes(ZThreadLocalData::address_bad_mask_offset()) -
|
||||
__ mov(tmp, (int64_t)(in_bytes(ZThreadLocalData::address_bad_mask_offset()) -
|
||||
in_bytes(JavaThread::jni_environment_offset())));
|
||||
|
||||
// Load address bad mask
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -23,6 +23,7 @@
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdint.h>
|
||||
#include "immediate_aarch64.hpp"
|
||||
|
||||
// there are at most 2^13 possible logical immediate encodings
|
||||
@ -34,14 +35,14 @@ static int li_table_entry_count;
|
||||
// for forward lookup we just use a direct array lookup
|
||||
// and assume that the cient has supplied a valid encoding
|
||||
// table[encoding] = immediate
|
||||
static u_int64_t LITable[LI_TABLE_SIZE];
|
||||
static uint64_t LITable[LI_TABLE_SIZE];
|
||||
|
||||
// for reverse lookup we need a sparse map so we store a table of
|
||||
// immediate and encoding pairs sorted by immediate value
|
||||
|
||||
struct li_pair {
|
||||
u_int64_t immediate;
|
||||
u_int32_t encoding;
|
||||
uint64_t immediate;
|
||||
uint32_t encoding;
|
||||
};
|
||||
|
||||
static struct li_pair InverseLITable[LI_TABLE_SIZE];
|
||||
@ -63,9 +64,9 @@ int compare_immediate_pair(const void *i1, const void *i2)
|
||||
// helper functions used by expandLogicalImmediate
|
||||
|
||||
// for i = 1, ... N result<i-1> = 1 other bits are zero
|
||||
static inline u_int64_t ones(int N)
|
||||
static inline uint64_t ones(int N)
|
||||
{
|
||||
return (N == 64 ? (u_int64_t)-1UL : ((1UL << N) - 1));
|
||||
return (N == 64 ? -1ULL : (1ULL << N) - 1);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -73,49 +74,49 @@ static inline u_int64_t ones(int N)
|
||||
*/
|
||||
|
||||
// 32 bit mask with bits [hi,...,lo] set
|
||||
static inline u_int32_t mask32(int hi = 31, int lo = 0)
|
||||
static inline uint32_t mask32(int hi = 31, int lo = 0)
|
||||
{
|
||||
int nbits = (hi + 1) - lo;
|
||||
return ((1 << nbits) - 1) << lo;
|
||||
}
|
||||
|
||||
static inline u_int64_t mask64(int hi = 63, int lo = 0)
|
||||
static inline uint64_t mask64(int hi = 63, int lo = 0)
|
||||
{
|
||||
int nbits = (hi + 1) - lo;
|
||||
return ((1L << nbits) - 1) << lo;
|
||||
}
|
||||
|
||||
// pick bits [hi,...,lo] from val
|
||||
static inline u_int32_t pick32(u_int32_t val, int hi = 31, int lo = 0)
|
||||
static inline uint32_t pick32(uint32_t val, int hi = 31, int lo = 0)
|
||||
{
|
||||
return (val & mask32(hi, lo));
|
||||
}
|
||||
|
||||
// pick bits [hi,...,lo] from val
|
||||
static inline u_int64_t pick64(u_int64_t val, int hi = 31, int lo = 0)
|
||||
static inline uint64_t pick64(uint64_t val, int hi = 31, int lo = 0)
|
||||
{
|
||||
return (val & mask64(hi, lo));
|
||||
}
|
||||
|
||||
// mask [hi,lo] and shift down to start at bit 0
|
||||
static inline u_int32_t pickbits32(u_int32_t val, int hi = 31, int lo = 0)
|
||||
static inline uint32_t pickbits32(uint32_t val, int hi = 31, int lo = 0)
|
||||
{
|
||||
return (pick32(val, hi, lo) >> lo);
|
||||
}
|
||||
|
||||
// mask [hi,lo] and shift down to start at bit 0
|
||||
static inline u_int64_t pickbits64(u_int64_t val, int hi = 63, int lo = 0)
|
||||
static inline uint64_t pickbits64(uint64_t val, int hi = 63, int lo = 0)
|
||||
{
|
||||
return (pick64(val, hi, lo) >> lo);
|
||||
}
|
||||
|
||||
// result<0> to val<N>
|
||||
static inline u_int64_t pickbit(u_int64_t val, int N)
|
||||
static inline uint64_t pickbit(uint64_t val, int N)
|
||||
{
|
||||
return pickbits64(val, N, N);
|
||||
}
|
||||
|
||||
static inline u_int32_t uimm(u_int32_t val, int hi, int lo)
|
||||
static inline uint32_t uimm(uint32_t val, int hi, int lo)
|
||||
{
|
||||
return pickbits32(val, hi, lo);
|
||||
}
|
||||
@ -123,11 +124,11 @@ static inline u_int32_t uimm(u_int32_t val, int hi, int lo)
|
||||
// SPEC bits(M*N) Replicate(bits(M) x, integer N);
|
||||
// this is just an educated guess
|
||||
|
||||
u_int64_t replicate(u_int64_t bits, int nbits, int count)
|
||||
uint64_t replicate(uint64_t bits, int nbits, int count)
|
||||
{
|
||||
u_int64_t result = 0;
|
||||
uint64_t result = 0;
|
||||
// nbits may be 64 in which case we want mask to be -1
|
||||
u_int64_t mask = ones(nbits);
|
||||
uint64_t mask = ones(nbits);
|
||||
for (int i = 0; i < count ; i++) {
|
||||
result <<= nbits;
|
||||
result |= (bits & mask);
|
||||
@ -140,24 +141,24 @@ u_int64_t replicate(u_int64_t bits, int nbits, int count)
|
||||
// encoding must be treated as an UNALLOC instruction
|
||||
|
||||
// construct a 32 bit immediate value for a logical immediate operation
|
||||
int expandLogicalImmediate(u_int32_t immN, u_int32_t immr,
|
||||
u_int32_t imms, u_int64_t &bimm)
|
||||
int expandLogicalImmediate(uint32_t immN, uint32_t immr,
|
||||
uint32_t imms, uint64_t &bimm)
|
||||
{
|
||||
int len; // ought to be <= 6
|
||||
u_int32_t levels; // 6 bits
|
||||
u_int32_t tmask_and; // 6 bits
|
||||
u_int32_t wmask_and; // 6 bits
|
||||
u_int32_t tmask_or; // 6 bits
|
||||
u_int32_t wmask_or; // 6 bits
|
||||
u_int64_t imm64; // 64 bits
|
||||
u_int64_t tmask, wmask; // 64 bits
|
||||
u_int32_t S, R, diff; // 6 bits?
|
||||
int len; // ought to be <= 6
|
||||
uint32_t levels; // 6 bits
|
||||
uint32_t tmask_and; // 6 bits
|
||||
uint32_t wmask_and; // 6 bits
|
||||
uint32_t tmask_or; // 6 bits
|
||||
uint32_t wmask_or; // 6 bits
|
||||
uint64_t imm64; // 64 bits
|
||||
uint64_t tmask, wmask; // 64 bits
|
||||
uint32_t S, R, diff; // 6 bits?
|
||||
|
||||
if (immN == 1) {
|
||||
len = 6; // looks like 7 given the spec above but this cannot be!
|
||||
} else {
|
||||
len = 0;
|
||||
u_int32_t val = (~imms & 0x3f);
|
||||
uint32_t val = (~imms & 0x3f);
|
||||
for (int i = 5; i > 0; i--) {
|
||||
if (val & (1 << i)) {
|
||||
len = i;
|
||||
@ -170,7 +171,7 @@ int expandLogicalImmediate(u_int32_t immN, u_int32_t immr,
|
||||
// for valid inputs leading 1s in immr must be less than leading
|
||||
// zeros in imms
|
||||
int len2 = 0; // ought to be < len
|
||||
u_int32_t val2 = (~immr & 0x3f);
|
||||
uint32_t val2 = (~immr & 0x3f);
|
||||
for (int i = 5; i > 0; i--) {
|
||||
if (!(val2 & (1 << i))) {
|
||||
len2 = i;
|
||||
@ -199,12 +200,12 @@ int expandLogicalImmediate(u_int32_t immN, u_int32_t immr,
|
||||
|
||||
for (int i = 0; i < 6; i++) {
|
||||
int nbits = 1 << i;
|
||||
u_int64_t and_bit = pickbit(tmask_and, i);
|
||||
u_int64_t or_bit = pickbit(tmask_or, i);
|
||||
u_int64_t and_bits_sub = replicate(and_bit, 1, nbits);
|
||||
u_int64_t or_bits_sub = replicate(or_bit, 1, nbits);
|
||||
u_int64_t and_bits_top = (and_bits_sub << nbits) | ones(nbits);
|
||||
u_int64_t or_bits_top = (0 << nbits) | or_bits_sub;
|
||||
uint64_t and_bit = pickbit(tmask_and, i);
|
||||
uint64_t or_bit = pickbit(tmask_or, i);
|
||||
uint64_t and_bits_sub = replicate(and_bit, 1, nbits);
|
||||
uint64_t or_bits_sub = replicate(or_bit, 1, nbits);
|
||||
uint64_t and_bits_top = (and_bits_sub << nbits) | ones(nbits);
|
||||
uint64_t or_bits_top = (0 << nbits) | or_bits_sub;
|
||||
|
||||
tmask = ((tmask
|
||||
& (replicate(and_bits_top, 2 * nbits, 32 / nbits)))
|
||||
@ -218,12 +219,12 @@ int expandLogicalImmediate(u_int32_t immN, u_int32_t immr,
|
||||
|
||||
for (int i = 0; i < 6; i++) {
|
||||
int nbits = 1 << i;
|
||||
u_int64_t and_bit = pickbit(wmask_and, i);
|
||||
u_int64_t or_bit = pickbit(wmask_or, i);
|
||||
u_int64_t and_bits_sub = replicate(and_bit, 1, nbits);
|
||||
u_int64_t or_bits_sub = replicate(or_bit, 1, nbits);
|
||||
u_int64_t and_bits_top = (ones(nbits) << nbits) | and_bits_sub;
|
||||
u_int64_t or_bits_top = (or_bits_sub << nbits) | 0;
|
||||
uint64_t and_bit = pickbit(wmask_and, i);
|
||||
uint64_t or_bit = pickbit(wmask_or, i);
|
||||
uint64_t and_bits_sub = replicate(and_bit, 1, nbits);
|
||||
uint64_t or_bits_sub = replicate(or_bit, 1, nbits);
|
||||
uint64_t and_bits_top = (ones(nbits) << nbits) | and_bits_sub;
|
||||
uint64_t or_bits_top = (or_bits_sub << nbits) | 0;
|
||||
|
||||
wmask = ((wmask
|
||||
& (replicate(and_bits_top, 2 * nbits, 32 / nbits)))
|
||||
@ -248,9 +249,9 @@ static void initLITables()
|
||||
{
|
||||
li_table_entry_count = 0;
|
||||
for (unsigned index = 0; index < LI_TABLE_SIZE; index++) {
|
||||
u_int32_t N = uimm(index, 12, 12);
|
||||
u_int32_t immr = uimm(index, 11, 6);
|
||||
u_int32_t imms = uimm(index, 5, 0);
|
||||
uint32_t N = uimm(index, 12, 12);
|
||||
uint32_t immr = uimm(index, 11, 6);
|
||||
uint32_t imms = uimm(index, 5, 0);
|
||||
if (expandLogicalImmediate(N, immr, imms, LITable[index])) {
|
||||
InverseLITable[li_table_entry_count].immediate = LITable[index];
|
||||
InverseLITable[li_table_entry_count].encoding = index;
|
||||
@ -264,12 +265,12 @@ static void initLITables()
|
||||
|
||||
// public APIs provided for logical immediate lookup and reverse lookup
|
||||
|
||||
u_int64_t logical_immediate_for_encoding(u_int32_t encoding)
|
||||
uint64_t logical_immediate_for_encoding(uint32_t encoding)
|
||||
{
|
||||
return LITable[encoding];
|
||||
}
|
||||
|
||||
u_int32_t encoding_for_logical_immediate(u_int64_t immediate)
|
||||
uint32_t encoding_for_logical_immediate(uint64_t immediate)
|
||||
{
|
||||
struct li_pair pair;
|
||||
struct li_pair *result;
|
||||
@ -293,15 +294,15 @@ u_int32_t encoding_for_logical_immediate(u_int64_t immediate)
|
||||
// fpimm[3:0] = fraction (assuming leading 1)
|
||||
// i.e. F = s * 1.f * 2^(e - b)
|
||||
|
||||
u_int64_t fp_immediate_for_encoding(u_int32_t imm8, int is_dp)
|
||||
uint64_t fp_immediate_for_encoding(uint32_t imm8, int is_dp)
|
||||
{
|
||||
union {
|
||||
float fpval;
|
||||
double dpval;
|
||||
u_int64_t val;
|
||||
uint64_t val;
|
||||
};
|
||||
|
||||
u_int32_t s, e, f;
|
||||
uint32_t s, e, f;
|
||||
s = (imm8 >> 7 ) & 0x1;
|
||||
e = (imm8 >> 4) & 0x7;
|
||||
f = imm8 & 0xf;
|
||||
@ -329,7 +330,7 @@ u_int64_t fp_immediate_for_encoding(u_int32_t imm8, int is_dp)
|
||||
return val;
|
||||
}
|
||||
|
||||
u_int32_t encoding_for_fp_immediate(float immediate)
|
||||
uint32_t encoding_for_fp_immediate(float immediate)
|
||||
{
|
||||
// given a float which is of the form
|
||||
//
|
||||
@ -341,10 +342,10 @@ u_int32_t encoding_for_fp_immediate(float immediate)
|
||||
|
||||
union {
|
||||
float fpval;
|
||||
u_int32_t val;
|
||||
uint32_t val;
|
||||
};
|
||||
fpval = immediate;
|
||||
u_int32_t s, r, f, res;
|
||||
uint32_t s, r, f, res;
|
||||
// sign bit is 31
|
||||
s = (val >> 31) & 0x1;
|
||||
// exponent is bits 30-23 but we only want the bottom 3 bits
|
||||
|
||||
@ -46,9 +46,9 @@
|
||||
* encoding then a map lookup will return 0xffffffff.
|
||||
*/
|
||||
|
||||
u_int64_t logical_immediate_for_encoding(u_int32_t encoding);
|
||||
u_int32_t encoding_for_logical_immediate(u_int64_t immediate);
|
||||
u_int64_t fp_immediate_for_encoding(u_int32_t imm8, int is_dp);
|
||||
u_int32_t encoding_for_fp_immediate(float immediate);
|
||||
uint64_t logical_immediate_for_encoding(uint32_t encoding);
|
||||
uint32_t encoding_for_logical_immediate(uint64_t immediate);
|
||||
uint64_t fp_immediate_for_encoding(uint32_t imm8, int is_dp);
|
||||
uint32_t encoding_for_fp_immediate(float immediate);
|
||||
|
||||
#endif // _IMMEDIATE_H
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -168,7 +168,7 @@ void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::get_dispatch() {
|
||||
unsigned long offset;
|
||||
uint64_t offset;
|
||||
adrp(rdispatch, ExternalAddress((address)Interpreter::dispatch_table()), offset);
|
||||
lea(rdispatch, Address(rdispatch, offset));
|
||||
}
|
||||
@ -765,7 +765,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
|
||||
// copy
|
||||
mov(rscratch1, sp);
|
||||
sub(swap_reg, swap_reg, rscratch1);
|
||||
ands(swap_reg, swap_reg, (unsigned long)(7 - os::vm_page_size()));
|
||||
ands(swap_reg, swap_reg, (uint64_t)(7 - os::vm_page_size()));
|
||||
|
||||
// Save the test result, for recursive case, the result is zero
|
||||
str(swap_reg, Address(lock_reg, mark_offset));
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -346,7 +346,7 @@ class SlowSignatureHandler
|
||||
|
||||
if (_num_fp_args < Argument::n_float_register_parameters_c) {
|
||||
*_fp_args++ = from_obj;
|
||||
*_fp_identifiers |= (1 << _num_fp_args); // mark as double
|
||||
*_fp_identifiers |= (1ull << _num_fp_args); // mark as double
|
||||
_num_fp_args++;
|
||||
} else {
|
||||
*_to++ = from_obj;
|
||||
@ -382,7 +382,7 @@ JRT_ENTRY(address,
|
||||
|
||||
// handle arguments
|
||||
SlowSignatureHandler ssh(m, (address)from, to);
|
||||
ssh.iterate(UCONST64(-1));
|
||||
ssh.iterate((uint64_t)CONST64(-1));
|
||||
|
||||
// return result handler
|
||||
return Interpreter::result_handler(m->result_type());
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2004, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -73,7 +73,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
|
||||
|
||||
Label slow;
|
||||
|
||||
unsigned long offset;
|
||||
uint64_t offset;
|
||||
__ adrp(rcounter_addr,
|
||||
SafepointSynchronize::safepoint_counter_addr(), offset);
|
||||
Address safepoint_counter_addr(rcounter_addr, offset);
|
||||
@ -88,7 +88,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
|
||||
|
||||
// Check to see if a field access watch has been set before we
|
||||
// take the fast path.
|
||||
unsigned long offset2;
|
||||
uint64_t offset2;
|
||||
__ adrp(result,
|
||||
ExternalAddress((address) JvmtiExport::get_field_access_count_addr()),
|
||||
offset2);
|
||||
|
||||
@ -70,8 +70,8 @@
|
||||
// Return the total length (in bytes) of the instructions.
|
||||
int MacroAssembler::pd_patch_instruction_size(address branch, address target) {
|
||||
int instructions = 1;
|
||||
assert((uint64_t)target < (1ul << 48), "48-bit overflow in address constant");
|
||||
long offset = (target - branch) >> 2;
|
||||
assert((uint64_t)target < (1ull << 48), "48-bit overflow in address constant");
|
||||
intptr_t offset = (target - branch) >> 2;
|
||||
unsigned insn = *(unsigned*)branch;
|
||||
if ((Instruction_aarch64::extract(insn, 29, 24) & 0b111011) == 0b011000) {
|
||||
// Load register (literal)
|
||||
@ -93,7 +93,7 @@ int MacroAssembler::pd_patch_instruction_size(address branch, address target) {
|
||||
offset = target-branch;
|
||||
int shift = Instruction_aarch64::extract(insn, 31, 31);
|
||||
if (shift) {
|
||||
u_int64_t dest = (u_int64_t)target;
|
||||
uint64_t dest = (uint64_t)target;
|
||||
uint64_t pc_page = (uint64_t)branch >> 12;
|
||||
uint64_t adr_page = (uint64_t)target >> 12;
|
||||
unsigned offset_lo = dest & 0xfff;
|
||||
@ -134,9 +134,9 @@ int MacroAssembler::pd_patch_instruction_size(address branch, address target) {
|
||||
Instruction_aarch64::extract(insn2, 4, 0)) {
|
||||
// movk #imm16<<32
|
||||
Instruction_aarch64::patch(branch + 4, 20, 5, (uint64_t)target >> 32);
|
||||
long dest = ((long)target & 0xffffffffL) | ((long)branch & 0xffff00000000L);
|
||||
long pc_page = (long)branch >> 12;
|
||||
long adr_page = (long)dest >> 12;
|
||||
uintptr_t dest = ((uintptr_t)target & 0xffffffffULL) | ((uintptr_t)branch & 0xffff00000000ULL);
|
||||
uintptr_t pc_page = (uintptr_t)branch >> 12;
|
||||
uintptr_t adr_page = (uintptr_t)dest >> 12;
|
||||
offset = adr_page - pc_page;
|
||||
instructions = 2;
|
||||
}
|
||||
@ -146,7 +146,7 @@ int MacroAssembler::pd_patch_instruction_size(address branch, address target) {
|
||||
Instruction_aarch64::spatch(branch, 23, 5, offset);
|
||||
Instruction_aarch64::patch(branch, 30, 29, offset_lo);
|
||||
} else if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010100) {
|
||||
u_int64_t dest = (u_int64_t)target;
|
||||
uint64_t dest = (uint64_t)target;
|
||||
// Move wide constant
|
||||
assert(nativeInstruction_at(branch+4)->is_movk(), "wrong insns in patch");
|
||||
assert(nativeInstruction_at(branch+8)->is_movk(), "wrong insns in patch");
|
||||
@ -205,7 +205,7 @@ int MacroAssembler::patch_narrow_klass(address insn_addr, narrowKlass n) {
|
||||
}
|
||||
|
||||
address MacroAssembler::target_addr_for_insn(address insn_addr, unsigned insn) {
|
||||
long offset = 0;
|
||||
intptr_t offset = 0;
|
||||
if ((Instruction_aarch64::extract(insn, 29, 24) & 0b011011) == 0b00011000) {
|
||||
// Load register (literal)
|
||||
offset = Instruction_aarch64::sextract(insn, 23, 5);
|
||||
@ -272,13 +272,13 @@ address MacroAssembler::target_addr_for_insn(address insn_addr, unsigned insn) {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
} else if (Instruction_aarch64::extract(insn, 31, 23) == 0b110100101) {
|
||||
u_int32_t *insns = (u_int32_t *)insn_addr;
|
||||
uint32_t *insns = (uint32_t *)insn_addr;
|
||||
// Move wide constant: movz, movk, movk. See movptr().
|
||||
assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
|
||||
assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
|
||||
return address(u_int64_t(Instruction_aarch64::extract(insns[0], 20, 5))
|
||||
+ (u_int64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
|
||||
+ (u_int64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
|
||||
return address(uint64_t(Instruction_aarch64::extract(insns[0], 20, 5))
|
||||
+ (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
|
||||
+ (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
|
||||
} else if (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 &&
|
||||
Instruction_aarch64::extract(insn, 4, 0) == 0b11111) {
|
||||
return 0;
|
||||
@ -389,7 +389,7 @@ void MacroAssembler::far_call(Address entry, CodeBuffer *cbuf, Register tmp) {
|
||||
assert(CodeCache::find_blob(entry.target()) != NULL,
|
||||
"destination of far call not found in code cache");
|
||||
if (far_branches()) {
|
||||
unsigned long offset;
|
||||
uintptr_t offset;
|
||||
// We can use ADRP here because we know that the total size of
|
||||
// the code cache cannot exceed 2Gb.
|
||||
adrp(tmp, entry, offset);
|
||||
@ -407,7 +407,7 @@ void MacroAssembler::far_jump(Address entry, CodeBuffer *cbuf, Register tmp) {
|
||||
assert(CodeCache::find_blob(entry.target()) != NULL,
|
||||
"destination of far call not found in code cache");
|
||||
if (far_branches()) {
|
||||
unsigned long offset;
|
||||
uintptr_t offset;
|
||||
// We can use ADRP here because we know that the total size of
|
||||
// the code cache cannot exceed 2Gb.
|
||||
adrp(tmp, entry, offset);
|
||||
@ -824,7 +824,7 @@ void MacroAssembler::c2bool(Register x) {
|
||||
address MacroAssembler::ic_call(address entry, jint method_index) {
|
||||
RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index);
|
||||
// address const_ptr = long_constant((jlong)Universe::non_oop_word());
|
||||
// unsigned long offset;
|
||||
// uintptr_t offset;
|
||||
// ldr_constant(rscratch2, const_ptr);
|
||||
movptr(rscratch2, (uintptr_t)Universe::non_oop_word());
|
||||
return trampoline_call(Address(entry, rh));
|
||||
@ -1491,7 +1491,7 @@ void MacroAssembler::null_check(Register reg, int offset) {
|
||||
|
||||
void MacroAssembler::mov(Register r, Address dest) {
|
||||
code_section()->relocate(pc(), dest.rspec());
|
||||
u_int64_t imm64 = (u_int64_t)dest.target();
|
||||
uint64_t imm64 = (uint64_t)dest.target();
|
||||
movptr(r, imm64);
|
||||
}
|
||||
|
||||
@ -1524,20 +1524,20 @@ void MacroAssembler::movptr(Register r, uintptr_t imm64) {
|
||||
// imm32 == hex abcdefgh T2S: Vd = abcdefghabcdefgh
|
||||
// imm32 == hex abcdefgh T4S: Vd = abcdefghabcdefghabcdefghabcdefgh
|
||||
// T1D/T2D: invalid
|
||||
void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, u_int32_t imm32) {
|
||||
void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, uint32_t imm32) {
|
||||
assert(T != T1D && T != T2D, "invalid arrangement");
|
||||
if (T == T8B || T == T16B) {
|
||||
assert((imm32 & ~0xff) == 0, "extraneous bits in unsigned imm32 (T8B/T16B)");
|
||||
movi(Vd, T, imm32 & 0xff, 0);
|
||||
return;
|
||||
}
|
||||
u_int32_t nimm32 = ~imm32;
|
||||
uint32_t nimm32 = ~imm32;
|
||||
if (T == T4H || T == T8H) {
|
||||
assert((imm32 & ~0xffff) == 0, "extraneous bits in unsigned imm32 (T4H/T8H)");
|
||||
imm32 &= 0xffff;
|
||||
nimm32 &= 0xffff;
|
||||
}
|
||||
u_int32_t x = imm32;
|
||||
uint32_t x = imm32;
|
||||
int movi_cnt = 0;
|
||||
int movn_cnt = 0;
|
||||
while (x) { if (x & 0xff) movi_cnt++; x >>= 8; }
|
||||
@ -1561,7 +1561,7 @@ void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, u_int32_t imm32)
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::mov_immediate64(Register dst, u_int64_t imm64)
|
||||
void MacroAssembler::mov_immediate64(Register dst, uint64_t imm64)
|
||||
{
|
||||
#ifndef PRODUCT
|
||||
{
|
||||
@ -1575,7 +1575,7 @@ void MacroAssembler::mov_immediate64(Register dst, u_int64_t imm64)
|
||||
} else {
|
||||
// we can use a combination of MOVZ or MOVN with
|
||||
// MOVK to build up the constant
|
||||
u_int64_t imm_h[4];
|
||||
uint64_t imm_h[4];
|
||||
int zero_count = 0;
|
||||
int neg_count = 0;
|
||||
int i;
|
||||
@ -1596,7 +1596,7 @@ void MacroAssembler::mov_immediate64(Register dst, u_int64_t imm64)
|
||||
} else if (zero_count == 3) {
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (imm_h[i] != 0L) {
|
||||
movz(dst, (u_int32_t)imm_h[i], (i << 4));
|
||||
movz(dst, (uint32_t)imm_h[i], (i << 4));
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -1604,7 +1604,7 @@ void MacroAssembler::mov_immediate64(Register dst, u_int64_t imm64)
|
||||
// one MOVN will do
|
||||
for (int i = 0; i < 4; i++) {
|
||||
if (imm_h[i] != 0xffffL) {
|
||||
movn(dst, (u_int32_t)imm_h[i] ^ 0xffffL, (i << 4));
|
||||
movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4));
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -1612,69 +1612,69 @@ void MacroAssembler::mov_immediate64(Register dst, u_int64_t imm64)
|
||||
// one MOVZ and one MOVK will do
|
||||
for (i = 0; i < 3; i++) {
|
||||
if (imm_h[i] != 0L) {
|
||||
movz(dst, (u_int32_t)imm_h[i], (i << 4));
|
||||
movz(dst, (uint32_t)imm_h[i], (i << 4));
|
||||
i++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
for (;i < 4; i++) {
|
||||
if (imm_h[i] != 0L) {
|
||||
movk(dst, (u_int32_t)imm_h[i], (i << 4));
|
||||
movk(dst, (uint32_t)imm_h[i], (i << 4));
|
||||
}
|
||||
}
|
||||
} else if (neg_count == 2) {
|
||||
// one MOVN and one MOVK will do
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (imm_h[i] != 0xffffL) {
|
||||
movn(dst, (u_int32_t)imm_h[i] ^ 0xffffL, (i << 4));
|
||||
movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4));
|
||||
i++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
for (;i < 4; i++) {
|
||||
if (imm_h[i] != 0xffffL) {
|
||||
movk(dst, (u_int32_t)imm_h[i], (i << 4));
|
||||
movk(dst, (uint32_t)imm_h[i], (i << 4));
|
||||
}
|
||||
}
|
||||
} else if (zero_count == 1) {
|
||||
// one MOVZ and two MOVKs will do
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (imm_h[i] != 0L) {
|
||||
movz(dst, (u_int32_t)imm_h[i], (i << 4));
|
||||
movz(dst, (uint32_t)imm_h[i], (i << 4));
|
||||
i++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
for (;i < 4; i++) {
|
||||
if (imm_h[i] != 0x0L) {
|
||||
movk(dst, (u_int32_t)imm_h[i], (i << 4));
|
||||
movk(dst, (uint32_t)imm_h[i], (i << 4));
|
||||
}
|
||||
}
|
||||
} else if (neg_count == 1) {
|
||||
// one MOVN and two MOVKs will do
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (imm_h[i] != 0xffffL) {
|
||||
movn(dst, (u_int32_t)imm_h[i] ^ 0xffffL, (i << 4));
|
||||
movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4));
|
||||
i++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
for (;i < 4; i++) {
|
||||
if (imm_h[i] != 0xffffL) {
|
||||
movk(dst, (u_int32_t)imm_h[i], (i << 4));
|
||||
movk(dst, (uint32_t)imm_h[i], (i << 4));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// use a MOVZ and 3 MOVKs (makes it easier to debug)
|
||||
movz(dst, (u_int32_t)imm_h[0], 0);
|
||||
movz(dst, (uint32_t)imm_h[0], 0);
|
||||
for (i = 1; i < 4; i++) {
|
||||
movk(dst, (u_int32_t)imm_h[i], (i << 4));
|
||||
movk(dst, (uint32_t)imm_h[i], (i << 4));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::mov_immediate32(Register dst, u_int32_t imm32)
|
||||
void MacroAssembler::mov_immediate32(Register dst, uint32_t imm32)
|
||||
{
|
||||
#ifndef PRODUCT
|
||||
{
|
||||
@ -1688,7 +1688,7 @@ void MacroAssembler::mov_immediate32(Register dst, u_int32_t imm32)
|
||||
} else {
|
||||
// we can use MOVZ, MOVN or two calls to MOVK to build up the
|
||||
// constant
|
||||
u_int32_t imm_h[2];
|
||||
uint32_t imm_h[2];
|
||||
imm_h[0] = imm32 & 0xffff;
|
||||
imm_h[1] = ((imm32 >> 16) & 0xffff);
|
||||
if (imm_h[0] == 0) {
|
||||
@ -1711,7 +1711,7 @@ void MacroAssembler::mov_immediate32(Register dst, u_int32_t imm32)
|
||||
// not actually be used: you must use the Address that is returned.
|
||||
// It is up to you to ensure that the shift provided matches the size
|
||||
// of your data.
|
||||
Address MacroAssembler::form_address(Register Rd, Register base, long byte_offset, int shift) {
|
||||
Address MacroAssembler::form_address(Register Rd, Register base, int64_t byte_offset, int shift) {
|
||||
if (Address::offset_ok_for_immed(byte_offset, shift))
|
||||
// It fits; no need for any heroics
|
||||
return Address(base, byte_offset);
|
||||
@ -1726,8 +1726,8 @@ Address MacroAssembler::form_address(Register Rd, Register base, long byte_offse
|
||||
|
||||
// See if we can do this with two 12-bit offsets
|
||||
{
|
||||
unsigned long word_offset = byte_offset >> shift;
|
||||
unsigned long masked_offset = word_offset & 0xfff000;
|
||||
uint64_t word_offset = byte_offset >> shift;
|
||||
uint64_t masked_offset = word_offset & 0xfff000;
|
||||
if (Address::offset_ok_for_immed(word_offset - masked_offset, 0)
|
||||
&& Assembler::operand_valid_for_add_sub_immediate(masked_offset << shift)) {
|
||||
add(Rd, base, masked_offset << shift);
|
||||
@ -1968,7 +1968,7 @@ void MacroAssembler::decrement(Register reg, int value)
|
||||
if (value < (1 << 12)) { sub(reg, reg, value); return; }
|
||||
/* else */ {
|
||||
assert(reg != rscratch2, "invalid dst for register decrement");
|
||||
mov(rscratch2, (unsigned long)value);
|
||||
mov(rscratch2, (uint64_t)value);
|
||||
sub(reg, reg, rscratch2);
|
||||
}
|
||||
}
|
||||
@ -2720,19 +2720,19 @@ Address MacroAssembler::spill_address(int size, int offset, Register tmp)
|
||||
// Returns true if it is, else false.
|
||||
bool MacroAssembler::merge_alignment_check(Register base,
|
||||
size_t size,
|
||||
long cur_offset,
|
||||
long prev_offset) const {
|
||||
int64_t cur_offset,
|
||||
int64_t prev_offset) const {
|
||||
if (AvoidUnalignedAccesses) {
|
||||
if (base == sp) {
|
||||
// Checks whether low offset if aligned to pair of registers.
|
||||
long pair_mask = size * 2 - 1;
|
||||
long offset = prev_offset > cur_offset ? cur_offset : prev_offset;
|
||||
int64_t pair_mask = size * 2 - 1;
|
||||
int64_t offset = prev_offset > cur_offset ? cur_offset : prev_offset;
|
||||
return (offset & pair_mask) == 0;
|
||||
} else { // If base is not sp, we can't guarantee the access is aligned.
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
long mask = size - 1;
|
||||
int64_t mask = size - 1;
|
||||
// Load/store pair instruction only supports element size aligned offset.
|
||||
return (cur_offset & mask) == 0 && (prev_offset & mask) == 0;
|
||||
}
|
||||
@ -2765,8 +2765,8 @@ bool MacroAssembler::ldst_can_merge(Register rt,
|
||||
return false;
|
||||
}
|
||||
|
||||
long max_offset = 63 * prev_size_in_bytes;
|
||||
long min_offset = -64 * prev_size_in_bytes;
|
||||
int64_t max_offset = 63 * prev_size_in_bytes;
|
||||
int64_t min_offset = -64 * prev_size_in_bytes;
|
||||
|
||||
assert(prev_ldst->is_not_pre_post_index(), "pre-index or post-index is not supported to be merged.");
|
||||
|
||||
@ -2775,8 +2775,8 @@ bool MacroAssembler::ldst_can_merge(Register rt,
|
||||
return false;
|
||||
}
|
||||
|
||||
long cur_offset = adr.offset();
|
||||
long prev_offset = prev_ldst->offset();
|
||||
int64_t cur_offset = adr.offset();
|
||||
int64_t prev_offset = prev_ldst->offset();
|
||||
size_t diff = abs(cur_offset - prev_offset);
|
||||
if (diff != prev_size_in_bytes) {
|
||||
return false;
|
||||
@ -2793,7 +2793,7 @@ bool MacroAssembler::ldst_can_merge(Register rt,
|
||||
return false;
|
||||
}
|
||||
|
||||
long low_offset = prev_offset > cur_offset ? cur_offset : prev_offset;
|
||||
int64_t low_offset = prev_offset > cur_offset ? cur_offset : prev_offset;
|
||||
// Offset range must be in ldp/stp instruction's range.
|
||||
if (low_offset > max_offset || low_offset < min_offset) {
|
||||
return false;
|
||||
@ -2818,7 +2818,7 @@ void MacroAssembler::merge_ldst(Register rt,
|
||||
address prev = pc() - NativeInstruction::instruction_size;
|
||||
NativeLdSt* prev_ldst = NativeLdSt_at(prev);
|
||||
|
||||
long offset;
|
||||
int64_t offset;
|
||||
|
||||
if (adr.offset() < prev_ldst->offset()) {
|
||||
offset = adr.offset();
|
||||
@ -3364,7 +3364,7 @@ void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len,
|
||||
Register table0, Register table1, Register table2, Register table3,
|
||||
Register tmp, Register tmp2, Register tmp3) {
|
||||
Label L_by16, L_by16_loop, L_by4, L_by4_loop, L_by1, L_by1_loop, L_exit;
|
||||
unsigned long offset;
|
||||
uint64_t offset;
|
||||
|
||||
if (UseCRC32) {
|
||||
kernel_crc32_using_crc32(crc, buf, len, table0, table1, table2, table3);
|
||||
@ -3666,7 +3666,7 @@ void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len,
|
||||
SkipIfEqual::SkipIfEqual(
|
||||
MacroAssembler* masm, const bool* flag_addr, bool value) {
|
||||
_masm = masm;
|
||||
unsigned long offset;
|
||||
uint64_t offset;
|
||||
_masm->adrp(rscratch1, ExternalAddress((address)flag_addr), offset);
|
||||
_masm->ldrb(rscratch1, Address(rscratch1, offset));
|
||||
_masm->cbzw(rscratch1, _label);
|
||||
@ -3695,7 +3695,7 @@ void MacroAssembler::addptr(const Address &dst, int32_t src) {
|
||||
}
|
||||
|
||||
void MacroAssembler::cmpptr(Register src1, Address src2) {
|
||||
unsigned long offset;
|
||||
uint64_t offset;
|
||||
adrp(rscratch1, src2, offset);
|
||||
ldr(rscratch1, Address(rscratch1, offset));
|
||||
cmp(src1, rscratch1);
|
||||
@ -3951,7 +3951,7 @@ MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() {
|
||||
if (operand_valid_for_logical_immediate(
|
||||
/*is32*/false, (uint64_t)CompressedKlassPointers::base())) {
|
||||
const uint64_t range_mask =
|
||||
(1UL << log2_intptr(CompressedKlassPointers::range())) - 1;
|
||||
(1ULL << log2_intptr(CompressedKlassPointers::range())) - 1;
|
||||
if (((uint64_t)CompressedKlassPointers::base() & range_mask) == 0) {
|
||||
return (_klass_decode_mode = KlassDecodeXor);
|
||||
}
|
||||
@ -4357,13 +4357,13 @@ address MacroAssembler::read_polling_page(Register r, relocInfo::relocType rtype
|
||||
return inst_mark();
|
||||
}
|
||||
|
||||
void MacroAssembler::adrp(Register reg1, const Address &dest, unsigned long &byte_offset) {
|
||||
void MacroAssembler::adrp(Register reg1, const Address &dest, uint64_t &byte_offset) {
|
||||
relocInfo::relocType rtype = dest.rspec().reloc()->type();
|
||||
unsigned long low_page = (unsigned long)CodeCache::low_bound() >> 12;
|
||||
unsigned long high_page = (unsigned long)(CodeCache::high_bound()-1) >> 12;
|
||||
unsigned long dest_page = (unsigned long)dest.target() >> 12;
|
||||
long offset_low = dest_page - low_page;
|
||||
long offset_high = dest_page - high_page;
|
||||
uint64_t low_page = (uint64_t)CodeCache::low_bound() >> 12;
|
||||
uint64_t high_page = (uint64_t)(CodeCache::high_bound()-1) >> 12;
|
||||
uint64_t dest_page = (uint64_t)dest.target() >> 12;
|
||||
int64_t offset_low = dest_page - low_page;
|
||||
int64_t offset_high = dest_page - high_page;
|
||||
|
||||
assert(is_valid_AArch64_address(dest.target()), "bad address");
|
||||
assert(dest.getMode() == Address::literal, "ADRP must be applied to a literal address");
|
||||
@ -4375,14 +4375,14 @@ void MacroAssembler::adrp(Register reg1, const Address &dest, unsigned long &byt
|
||||
if (offset_high >= -(1<<20) && offset_low < (1<<20)) {
|
||||
_adrp(reg1, dest.target());
|
||||
} else {
|
||||
unsigned long target = (unsigned long)dest.target();
|
||||
unsigned long adrp_target
|
||||
= (target & 0xffffffffUL) | ((unsigned long)pc() & 0xffff00000000UL);
|
||||
uint64_t target = (uint64_t)dest.target();
|
||||
uint64_t adrp_target
|
||||
= (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL);
|
||||
|
||||
_adrp(reg1, (address)adrp_target);
|
||||
movk(reg1, target >> 32, 32);
|
||||
}
|
||||
byte_offset = (unsigned long)dest.target() & 0xfff;
|
||||
byte_offset = (uint64_t)dest.target() & 0xfff;
|
||||
}
|
||||
|
||||
void MacroAssembler::load_byte_map_base(Register reg) {
|
||||
@ -4392,7 +4392,7 @@ void MacroAssembler::load_byte_map_base(Register reg) {
|
||||
if (is_valid_AArch64_address((address)byte_map_base)) {
|
||||
// Strictly speaking the byte_map_base isn't an address at all,
|
||||
// and it might even be negative.
|
||||
unsigned long offset;
|
||||
uint64_t offset;
|
||||
adrp(reg, ExternalAddress((address)byte_map_base), offset);
|
||||
// We expect offset to be zero with most collectors.
|
||||
if (offset != 0) {
|
||||
@ -4835,7 +4835,7 @@ void MacroAssembler::zero_words(Register ptr, Register cnt)
|
||||
// base: Address of a buffer to be zeroed, 8 bytes aligned.
|
||||
// cnt: Immediate count in HeapWords.
|
||||
#define SmallArraySize (18 * BytesPerLong)
|
||||
void MacroAssembler::zero_words(Register base, u_int64_t cnt)
|
||||
void MacroAssembler::zero_words(Register base, uint64_t cnt)
|
||||
{
|
||||
BLOCK_COMMENT("zero_words {");
|
||||
int i = cnt & 1; // store any odd word to start
|
||||
|
||||
@ -456,8 +456,8 @@ class MacroAssembler: public Assembler {
|
||||
// first two private routines for loading 32 bit or 64 bit constants
|
||||
private:
|
||||
|
||||
void mov_immediate64(Register dst, u_int64_t imm64);
|
||||
void mov_immediate32(Register dst, u_int32_t imm32);
|
||||
void mov_immediate64(Register dst, uint64_t imm64);
|
||||
void mov_immediate32(Register dst, uint32_t imm32);
|
||||
|
||||
int push(unsigned int bitset, Register stack);
|
||||
int pop(unsigned int bitset, Register stack);
|
||||
@ -486,27 +486,27 @@ public:
|
||||
|
||||
inline void mov(Register dst, address addr)
|
||||
{
|
||||
mov_immediate64(dst, (u_int64_t)addr);
|
||||
mov_immediate64(dst, (uint64_t)addr);
|
||||
}
|
||||
|
||||
inline void mov(Register dst, u_int64_t imm64)
|
||||
inline void mov(Register dst, uint64_t imm64)
|
||||
{
|
||||
mov_immediate64(dst, imm64);
|
||||
}
|
||||
|
||||
inline void movw(Register dst, u_int32_t imm32)
|
||||
inline void movw(Register dst, uint32_t imm32)
|
||||
{
|
||||
mov_immediate32(dst, imm32);
|
||||
}
|
||||
|
||||
inline void mov(Register dst, long l)
|
||||
inline void mov(Register dst, int64_t l)
|
||||
{
|
||||
mov(dst, (u_int64_t)l);
|
||||
mov(dst, (uint64_t)l);
|
||||
}
|
||||
|
||||
inline void mov(Register dst, int i)
|
||||
{
|
||||
mov(dst, (long)i);
|
||||
mov(dst, (int64_t)i);
|
||||
}
|
||||
|
||||
void mov(Register dst, RegisterOrConstant src) {
|
||||
@ -518,7 +518,7 @@ public:
|
||||
|
||||
void movptr(Register r, uintptr_t imm64);
|
||||
|
||||
void mov(FloatRegister Vd, SIMD_Arrangement T, u_int32_t imm32);
|
||||
void mov(FloatRegister Vd, SIMD_Arrangement T, uint32_t imm32);
|
||||
|
||||
void mov(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) {
|
||||
orr(Vd, T, Vn, Vn);
|
||||
@ -1170,7 +1170,7 @@ public:
|
||||
void sub(Register Rd, Register Rn, RegisterOrConstant decrement);
|
||||
void subw(Register Rd, Register Rn, RegisterOrConstant decrement);
|
||||
|
||||
void adrp(Register reg1, const Address &dest, unsigned long &byte_offset);
|
||||
void adrp(Register reg1, const Address &dest, uint64_t &byte_offset);
|
||||
|
||||
void tableswitch(Register index, jint lowbound, jint highbound,
|
||||
Label &jumptable, Label &jumptable_end, int stride = 1) {
|
||||
@ -1187,7 +1187,7 @@ public:
|
||||
// actually be used: you must use the Address that is returned. It
|
||||
// is up to you to ensure that the shift provided matches the size
|
||||
// of your data.
|
||||
Address form_address(Register Rd, Register base, long byte_offset, int shift);
|
||||
Address form_address(Register Rd, Register base, int64_t byte_offset, int shift);
|
||||
|
||||
// Return true iff an address is within the 48-bit AArch64 address
|
||||
// space.
|
||||
@ -1212,7 +1212,7 @@ public:
|
||||
if (NearCpool) {
|
||||
ldr(dest, const_addr);
|
||||
} else {
|
||||
unsigned long offset;
|
||||
uint64_t offset;
|
||||
adrp(dest, InternalAddress(const_addr.target()), offset);
|
||||
ldr(dest, Address(dest, offset));
|
||||
}
|
||||
@ -1237,7 +1237,7 @@ public:
|
||||
int elem_size);
|
||||
|
||||
void fill_words(Register base, Register cnt, Register value);
|
||||
void zero_words(Register base, u_int64_t cnt);
|
||||
void zero_words(Register base, uint64_t cnt);
|
||||
void zero_words(Register ptr, Register cnt);
|
||||
void zero_dcache_blocks(Register base, Register cnt);
|
||||
|
||||
@ -1310,7 +1310,7 @@ private:
|
||||
// Uses rscratch2 if the address is not directly reachable
|
||||
Address spill_address(int size, int offset, Register tmp=rscratch2);
|
||||
|
||||
bool merge_alignment_check(Register base, size_t size, long cur_offset, long prev_offset) const;
|
||||
bool merge_alignment_check(Register base, size_t size, int64_t cur_offset, int64_t prev_offset) const;
|
||||
|
||||
// Check whether two loads/stores can be merged into ldp/stp.
|
||||
bool ldst_can_merge(Register rx, const Address &adr, size_t cur_size_in_bytes, bool is_store) const;
|
||||
|
||||
@ -260,9 +260,9 @@ void MacroAssembler::fast_log(FloatRegister vtmp0, FloatRegister vtmp1,
|
||||
Register tmp4, Register tmp5) {
|
||||
Label DONE, CHECK_CORNER_CASES, SMALL_VALUE, MAIN,
|
||||
CHECKED_CORNER_CASES, RETURN_MINF_OR_NAN;
|
||||
const long INF_OR_NAN_PREFIX = 0x7FF0;
|
||||
const long MINF_OR_MNAN_PREFIX = 0xFFF0;
|
||||
const long ONE_PREFIX = 0x3FF0;
|
||||
const int64_t INF_OR_NAN_PREFIX = 0x7FF0;
|
||||
const int64_t MINF_OR_MNAN_PREFIX = 0xFFF0;
|
||||
const int64_t ONE_PREFIX = 0x3FF0;
|
||||
movz(tmp2, ONE_PREFIX, 48);
|
||||
movz(tmp4, 0x0010, 48);
|
||||
fmovd(rscratch1, v0); // rscratch1 = AS_LONG_BITS(X)
|
||||
|
||||
@ -201,9 +201,9 @@
|
||||
// NOTE: fpu registers are actively reused. See comments in code about their usage
|
||||
void MacroAssembler::generate__ieee754_rem_pio2(address npio2_hw,
|
||||
address two_over_pi, address pio2) {
|
||||
const long PIO2_1t = 0x3DD0B4611A626331UL;
|
||||
const long PIO2_2 = 0x3DD0B4611A600000UL;
|
||||
const long PIO2_2t = 0x3BA3198A2E037073UL;
|
||||
const int64_t PIO2_1t = 0x3DD0B4611A626331ULL;
|
||||
const int64_t PIO2_2 = 0x3DD0B4611A600000ULL;
|
||||
const int64_t PIO2_2t = 0x3BA3198A2E037073ULL;
|
||||
Label X_IS_NEGATIVE, X_IS_MEDIUM_OR_LARGE, X_IS_POSITIVE_LONG_PI, LARGE_ELSE,
|
||||
REDUCTION_DONE, X_IS_MEDIUM_BRANCH_DONE, X_IS_LARGE, NX_SET,
|
||||
X_IS_NEGATIVE_LONG_PI;
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,6 +25,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "code/compiledIC.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "nativeInst_aarch64.hpp"
|
||||
@ -304,7 +305,7 @@ void NativeMovRegMem::set_offset(int x) {
|
||||
unsigned insn = *(unsigned*)pc;
|
||||
if (maybe_cpool_ref(pc)) {
|
||||
address addr = MacroAssembler::target_addr_for_insn(pc);
|
||||
*(long*)addr = x;
|
||||
*(int64_t*)addr = x;
|
||||
} else {
|
||||
MacroAssembler::pd_patch_instruction(pc, (address)intptr_t(x));
|
||||
ICache::invalidate_range(instruction_address(), instruction_size);
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -65,7 +65,7 @@ class RegisterImpl: public AbstractRegisterImpl {
|
||||
|
||||
// Return the bit which represents this register. This is intended
|
||||
// to be ORed into a bitmask: for usage see class RegSet below.
|
||||
unsigned long bit(bool should_set = true) const { return should_set ? 1 << encoding() : 0; }
|
||||
uint64_t bit(bool should_set = true) const { return should_set ? 1 << encoding() : 0; }
|
||||
};
|
||||
|
||||
// The integer registers of the aarch64 architecture
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -25,6 +25,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "code/compiledMethod.hpp"
|
||||
#include "code/relocInfo.hpp"
|
||||
#include "nativeInst_aarch64.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
||||
@ -26,6 +26,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "code/debugInfoRec.hpp"
|
||||
#include "code/icBuffer.hpp"
|
||||
#include "code/vtableStubs.hpp"
|
||||
@ -402,7 +403,7 @@ static void gen_c2i_adapter(MacroAssembler *masm,
|
||||
// 3 8 T_BOOL
|
||||
// - 0 return address
|
||||
//
|
||||
// However to make thing extra confusing. Because we can fit a long/double in
|
||||
// However to make thing extra confusing. Because we can fit a Java long/double in
|
||||
// a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
|
||||
// leaves one slot empty and only stores to a single slot. In this case the
|
||||
// slot that is occupied is the T_VOID slot. See I said it was confusing.
|
||||
@ -435,7 +436,7 @@ static void gen_c2i_adapter(MacroAssembler *masm,
|
||||
__ str(rscratch1, Address(sp, next_off));
|
||||
#ifdef ASSERT
|
||||
// Overwrite the unused slot with known junk
|
||||
__ mov(rscratch1, 0xdeadffffdeadaaaaul);
|
||||
__ mov(rscratch1, (uint64_t)0xdeadffffdeadaaaaull);
|
||||
__ str(rscratch1, Address(sp, st_off));
|
||||
#endif /* ASSERT */
|
||||
} else {
|
||||
@ -452,10 +453,10 @@ static void gen_c2i_adapter(MacroAssembler *masm,
|
||||
// Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
|
||||
// T_DOUBLE and T_LONG use two slots in the interpreter
|
||||
if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
|
||||
// long/double in gpr
|
||||
// jlong/double in gpr
|
||||
#ifdef ASSERT
|
||||
// Overwrite the unused slot with known junk
|
||||
__ mov(rscratch1, 0xdeadffffdeadaaabul);
|
||||
__ mov(rscratch1, (uint64_t)0xdeadffffdeadaaabull);
|
||||
__ str(rscratch1, Address(sp, st_off));
|
||||
#endif /* ASSERT */
|
||||
__ str(r, Address(sp, next_off));
|
||||
@ -471,7 +472,7 @@ static void gen_c2i_adapter(MacroAssembler *masm,
|
||||
} else {
|
||||
#ifdef ASSERT
|
||||
// Overwrite the unused slot with known junk
|
||||
__ mov(rscratch1, 0xdeadffffdeadaaacul);
|
||||
__ mov(rscratch1, (uint64_t)0xdeadffffdeadaaacull);
|
||||
__ str(rscratch1, Address(sp, st_off));
|
||||
#endif /* ASSERT */
|
||||
__ strd(r_1->as_FloatRegister(), Address(sp, next_off));
|
||||
@ -1700,7 +1701,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
Label dtrace_method_entry, dtrace_method_entry_done;
|
||||
{
|
||||
unsigned long offset;
|
||||
uint64_t offset;
|
||||
__ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
|
||||
__ ldrb(rscratch1, Address(rscratch1, offset));
|
||||
__ cbnzw(rscratch1, dtrace_method_entry);
|
||||
@ -1914,7 +1915,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
Label dtrace_method_exit, dtrace_method_exit_done;
|
||||
{
|
||||
unsigned long offset;
|
||||
uint64_t offset;
|
||||
__ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
|
||||
__ ldrb(rscratch1, Address(rscratch1, offset));
|
||||
__ cbnzw(rscratch1, dtrace_method_exit);
|
||||
|
||||
@ -3283,8 +3283,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
// Max number of bytes we can process before having to take the mod
|
||||
// 0x15B0 is 5552 in decimal, the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1
|
||||
unsigned long BASE = 0xfff1;
|
||||
unsigned long NMAX = 0x15B0;
|
||||
uint64_t BASE = 0xfff1;
|
||||
uint64_t NMAX = 0x15B0;
|
||||
|
||||
__ mov(base, BASE);
|
||||
__ mov(nmax, NMAX);
|
||||
@ -5381,12 +5381,12 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// In C, approximately:
|
||||
|
||||
// void
|
||||
// montgomery_multiply(unsigned long Pa_base[], unsigned long Pb_base[],
|
||||
// unsigned long Pn_base[], unsigned long Pm_base[],
|
||||
// unsigned long inv, int len) {
|
||||
// unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
|
||||
// unsigned long *Pa, *Pb, *Pn, *Pm;
|
||||
// unsigned long Ra, Rb, Rn, Rm;
|
||||
// montgomery_multiply(julong Pa_base[], julong Pb_base[],
|
||||
// julong Pn_base[], julong Pm_base[],
|
||||
// julong inv, int len) {
|
||||
// julong t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
|
||||
// julong *Pa, *Pb, *Pn, *Pm;
|
||||
// julong Ra, Rb, Rn, Rm;
|
||||
|
||||
// int i;
|
||||
|
||||
@ -5594,11 +5594,11 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// In C, approximately:
|
||||
|
||||
// void
|
||||
// montgomery_square(unsigned long Pa_base[], unsigned long Pn_base[],
|
||||
// unsigned long Pm_base[], unsigned long inv, int len) {
|
||||
// unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
|
||||
// unsigned long *Pa, *Pb, *Pn, *Pm;
|
||||
// unsigned long Ra, Rb, Rn, Rm;
|
||||
// montgomery_square(julong Pa_base[], julong Pn_base[],
|
||||
// julong Pm_base[], julong inv, int len) {
|
||||
// julong t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
|
||||
// julong *Pa, *Pb, *Pn, *Pm;
|
||||
// julong Ra, Rb, Rn, Rm;
|
||||
|
||||
// int i;
|
||||
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -994,7 +994,7 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
|
||||
__ ldrw(val, Address(esp, 0)); // byte value
|
||||
__ ldrw(crc, Address(esp, wordSize)); // Initial CRC
|
||||
|
||||
unsigned long offset;
|
||||
uint64_t offset;
|
||||
__ adrp(tbl, ExternalAddress(StubRoutines::crc_table_addr()), offset);
|
||||
__ add(tbl, tbl, offset);
|
||||
|
||||
|
||||
@ -1706,7 +1706,7 @@ void TemplateTable::lcmp()
|
||||
Label done;
|
||||
__ pop_l(r1);
|
||||
__ cmp(r1, r0);
|
||||
__ mov(r0, (u_int64_t)-1L);
|
||||
__ mov(r0, (uint64_t)-1L);
|
||||
__ br(Assembler::LT, done);
|
||||
// __ mov(r0, 1UL);
|
||||
// __ csel(r0, r0, zr, Assembler::NE);
|
||||
@ -1730,7 +1730,7 @@ void TemplateTable::float_cmp(bool is_float, int unordered_result)
|
||||
if (unordered_result < 0) {
|
||||
// we want -1 for unordered or less than, 0 for equal and 1 for
|
||||
// greater than.
|
||||
__ mov(r0, (u_int64_t)-1L);
|
||||
__ mov(r0, (uint64_t)-1L);
|
||||
// for FP LT tests less than or unordered
|
||||
__ br(Assembler::LT, done);
|
||||
// install 0 for EQ otherwise 1
|
||||
@ -2975,6 +2975,9 @@ void TemplateTable::fast_storefield(TosState state)
|
||||
// access constant pool cache
|
||||
__ get_cache_and_index_at_bcp(r2, r1, 1);
|
||||
|
||||
// Must prevent reordering of the following cp cache loads with bytecode load
|
||||
__ membar(MacroAssembler::LoadLoad);
|
||||
|
||||
// test for volatile with r3
|
||||
__ ldrw(r3, Address(r2, in_bytes(base +
|
||||
ConstantPoolCacheEntry::flags_offset())));
|
||||
@ -3067,6 +3070,10 @@ void TemplateTable::fast_accessfield(TosState state)
|
||||
|
||||
// access constant pool cache
|
||||
__ get_cache_and_index_at_bcp(r2, r1, 1);
|
||||
|
||||
// Must prevent reordering of the following cp cache loads with bytecode load
|
||||
__ membar(MacroAssembler::LoadLoad);
|
||||
|
||||
__ ldr(r1, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
|
||||
ConstantPoolCacheEntry::f2_offset())));
|
||||
__ ldrw(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
|
||||
|
||||
@ -161,7 +161,7 @@ void VM_Version::get_processor_features() {
|
||||
SoftwarePrefetchHintDistance &= ~7;
|
||||
}
|
||||
|
||||
unsigned long auxv = getauxval(AT_HWCAP);
|
||||
uint64_t auxv = getauxval(AT_HWCAP);
|
||||
|
||||
char buf[512];
|
||||
|
||||
|
||||
@ -216,7 +216,7 @@ int ConstantTable::calculate_table_base_offset() const {
|
||||
// flds, fldd: 8-bit offset multiplied by 4: +/- 1024
|
||||
// ldr, ldrb : 12-bit offset: +/- 4096
|
||||
if (!Assembler::is_simm10(offset)) {
|
||||
offset = Assembler::min_simm10();
|
||||
offset = Assembler::min_simm10;
|
||||
}
|
||||
return offset;
|
||||
}
|
||||
|
||||
@ -29,6 +29,7 @@
|
||||
#include "c1/c1_LIRAssembler.hpp"
|
||||
#include "c1/c1_MacroAssembler.hpp"
|
||||
#include "c1/c1_Runtime1.hpp"
|
||||
#include "classfile/javaClasses.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "nativeInst_arm.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
|
||||
@ -30,6 +30,7 @@
|
||||
#include "c1/c1_LIRAssembler.hpp"
|
||||
#include "c1/c1_MacroAssembler.hpp"
|
||||
#include "c1/c1_Runtime1.hpp"
|
||||
#include "classfile/javaClasses.hpp"
|
||||
#include "nativeInst_ppc.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
@ -9203,6 +9203,19 @@ instruct signmask64L_regL(iRegLdst dst, iRegLsrc src) %{
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct absL_reg_Ex(iRegLdst dst, iRegLsrc src) %{
|
||||
match(Set dst (AbsL src));
|
||||
ins_cost(DEFAULT_COST*3);
|
||||
|
||||
expand %{
|
||||
iRegLdst tmp1;
|
||||
iRegLdst tmp2;
|
||||
signmask64L_regL(tmp1, src);
|
||||
xorL_reg_reg(tmp2, tmp1, src);
|
||||
subL_reg_reg(dst, tmp2, tmp1);
|
||||
%}
|
||||
%}
|
||||
|
||||
// Long negation
|
||||
instruct negL_reg_reg(iRegLdst dst, immL_0 zero, iRegLsrc src2) %{
|
||||
match(Set dst (SubL zero src2));
|
||||
|
||||
@ -30,6 +30,7 @@
|
||||
#include "c1/c1_LIRAssembler.hpp"
|
||||
#include "c1/c1_MacroAssembler.hpp"
|
||||
#include "c1/c1_Runtime1.hpp"
|
||||
#include "classfile/javaClasses.hpp"
|
||||
#include "nativeInst_s390.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
//
|
||||
// Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 2017, 2019 SAP SE. All rights reserved.
|
||||
// Copyright (c) 2017, 2020 SAP SE. All rights reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
@ -9050,6 +9050,17 @@ instruct absI_reg(iRegI dst, iRegI src, flagsReg cr) %{
|
||||
ins_pipe(pipe_class_dummy);
|
||||
%}
|
||||
|
||||
instruct absL_reg(iRegL dst, iRegL src, flagsReg cr) %{
|
||||
match(Set dst (AbsL src));
|
||||
effect(KILL cr);
|
||||
ins_cost(DEFAULT_COST_LOW);
|
||||
// TODO: s390 port size(FIXED_SIZE);
|
||||
format %{ "LPGR $dst, $src" %}
|
||||
opcode(LPGR_ZOPC);
|
||||
ins_encode(z_rreform(dst, src));
|
||||
ins_pipe(pipe_class_dummy);
|
||||
%}
|
||||
|
||||
instruct negabsI_reg(iRegI dst, iRegI src, immI_0 zero, flagsReg cr) %{
|
||||
match(Set dst (SubI zero (AbsI src)));
|
||||
effect(KILL cr);
|
||||
|
||||
@ -28,6 +28,7 @@
|
||||
#include "c1/c1_LIRAssembler.hpp"
|
||||
#include "c1/c1_MacroAssembler.hpp"
|
||||
#include "c1/c1_Runtime1.hpp"
|
||||
#include "classfile/javaClasses.hpp"
|
||||
#include "nativeInst_x86.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
|
||||
@ -45,11 +45,11 @@ bool IntelJccErratum::is_crossing_or_ending_at_32_byte_boundary(uintptr_t start_
|
||||
return boundary(start_pc) != boundary(end_pc);
|
||||
}
|
||||
|
||||
bool IntelJccErratum::is_jcc_erratum_branch(const Block* block, const MachNode* node, uint node_index) {
|
||||
bool IntelJccErratum::is_jcc_erratum_branch(const MachNode* node) {
|
||||
if (node->is_MachCall() && !node->is_MachCallJava()) {
|
||||
return true;
|
||||
}
|
||||
return node_index == (block->number_of_nodes() - 1);
|
||||
return node->is_MachBranch();
|
||||
}
|
||||
|
||||
int IntelJccErratum::jcc_erratum_taint_node(MachNode* node, PhaseRegAlloc* regalloc) {
|
||||
@ -70,7 +70,7 @@ int IntelJccErratum::tag_affected_machnodes(Compile* C, PhaseCFG* cfg, PhaseRegA
|
||||
continue;
|
||||
}
|
||||
MachNode* m = node->as_Mach();
|
||||
if (is_jcc_erratum_branch(block, m, j)) {
|
||||
if (is_jcc_erratum_branch(m)) {
|
||||
// Found a root jcc erratum branch, flag it as problematic
|
||||
nop_size += jcc_erratum_taint_node(m, regalloc);
|
||||
|
||||
|
||||
@ -43,7 +43,7 @@ private:
|
||||
|
||||
public:
|
||||
static bool is_crossing_or_ending_at_32_byte_boundary(uintptr_t start_pc, uintptr_t end_pc);
|
||||
static bool is_jcc_erratum_branch(const Block* block, const MachNode* node, uint node_index);
|
||||
static bool is_jcc_erratum_branch(const MachNode* node);
|
||||
// Analyze JCC erratum branches. Affected nodes get tagged with Flag_intel_jcc_erratum.
|
||||
// The function returns a conservative estimate of all required nops on all mach nodes.
|
||||
static int tag_affected_machnodes(Compile* C, PhaseCFG* cfg, PhaseRegAlloc* regalloc);
|
||||
|
||||
@ -733,11 +733,11 @@ void VM_Version::get_processor_features() {
|
||||
|
||||
char buf[512];
|
||||
int res = jio_snprintf(buf, sizeof(buf),
|
||||
"(%u cores per cpu, %u threads per core) family %d model %d stepping %d"
|
||||
"(%u cores per cpu, %u threads per core) family %d model %d stepping %d microcode 0x%x"
|
||||
"%s%s%s%s%s%s%s%s%s%s" "%s%s%s%s%s%s%s%s%s%s" "%s%s%s%s%s%s%s%s%s%s" "%s%s%s%s%s%s%s%s%s%s" "%s%s%s%s%s%s",
|
||||
|
||||
cores_per_cpu(), threads_per_core(),
|
||||
cpu_family(), _model, _stepping,
|
||||
cpu_family(), _model, _stepping, os::cpu_microcode_revision(),
|
||||
|
||||
(supports_cmov() ? ", cmov" : ""),
|
||||
(supports_cmpxchg8() ? ", cx8" : ""),
|
||||
|
||||
@ -1188,10 +1188,7 @@ void PhaseOutput::pd_perform_mach_node_analysis() {
|
||||
}
|
||||
|
||||
int MachNode::pd_alignment_required() const {
|
||||
PhaseOutput* output = Compile::current()->output();
|
||||
Block* block = output->block();
|
||||
int index = output->index();
|
||||
if (VM_Version::has_intel_jcc_erratum() && IntelJccErratum::is_jcc_erratum_branch(block, this, index)) {
|
||||
if (VM_Version::has_intel_jcc_erratum() && IntelJccErratum::is_jcc_erratum_branch(this)) {
|
||||
// Conservatively add worst case padding. We assume that relocInfo::addr_unit() is 1 on x86.
|
||||
return IntelJccErratum::largest_jcc_size() + 1;
|
||||
} else {
|
||||
|
||||
@ -30,6 +30,7 @@
|
||||
#include "classfile/classLoader.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "classfile/vmSymbols.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "code/icBuffer.hpp"
|
||||
#include "code/vtableStubs.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
|
||||
@ -801,6 +801,18 @@ bool os::is_allocatable(size_t bytes) {
|
||||
#endif // AMD64
|
||||
}
|
||||
|
||||
juint os::cpu_microcode_revision() {
|
||||
juint result = 0;
|
||||
char data[8];
|
||||
size_t sz = sizeof(data);
|
||||
int ret = sysctlbyname("machdep.cpu.microcode_version", data, &sz, NULL, 0);
|
||||
if (ret == 0) {
|
||||
if (sz == 4) result = *((juint*)data);
|
||||
if (sz == 8) result = *((juint*)data + 1); // upper 32-bits
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// thread stack
|
||||
|
||||
|
||||
@ -27,6 +27,7 @@
|
||||
|
||||
static void setup_fpu();
|
||||
static bool supports_sse();
|
||||
static juint cpu_microcode_revision();
|
||||
|
||||
static jlong rdtsc();
|
||||
|
||||
|
||||
@ -620,6 +620,26 @@ bool os::supports_sse() {
|
||||
#endif // AMD64
|
||||
}
|
||||
|
||||
juint os::cpu_microcode_revision() {
|
||||
juint result = 0;
|
||||
char data[2048] = {0}; // lines should fit in 2K buf
|
||||
size_t len = sizeof(data);
|
||||
FILE *fp = fopen("/proc/cpuinfo", "r");
|
||||
if (fp) {
|
||||
while (!feof(fp)) {
|
||||
if (fgets(data, len, fp)) {
|
||||
if (strstr(data, "microcode") != NULL) {
|
||||
char* rev = strchr(data, ':');
|
||||
if (rev != NULL) sscanf(rev + 1, "%x", &result);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
fclose(fp);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
bool os::is_allocatable(size_t bytes) {
|
||||
#ifdef AMD64
|
||||
// unused on amd64?
|
||||
|
||||
@ -27,6 +27,7 @@
|
||||
|
||||
static void setup_fpu();
|
||||
static bool supports_sse();
|
||||
static juint cpu_microcode_revision();
|
||||
|
||||
static jlong rdtsc();
|
||||
|
||||
|
||||
@ -649,6 +649,23 @@ extern "C" int SpinPause () {
|
||||
#endif // AMD64
|
||||
}
|
||||
|
||||
juint os::cpu_microcode_revision() {
|
||||
juint result = 0;
|
||||
BYTE data[8] = {0};
|
||||
HKEY key;
|
||||
DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE,
|
||||
"HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key);
|
||||
if (status == ERROR_SUCCESS) {
|
||||
DWORD size = sizeof(data);
|
||||
status = RegQueryValueEx(key, "Update Revision", NULL, NULL, data, &size);
|
||||
if (status == ERROR_SUCCESS) {
|
||||
if (size == 4) result = *((juint*)data);
|
||||
if (size == 8) result = *((juint*)data + 1); // upper 32-bits
|
||||
}
|
||||
RegCloseKey(key);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void os::setup_fpu() {
|
||||
#ifndef AMD64
|
||||
|
||||
@ -59,6 +59,7 @@
|
||||
|
||||
static void setup_fpu();
|
||||
static bool supports_sse() { return true; }
|
||||
static juint cpu_microcode_revision();
|
||||
|
||||
static jlong rdtsc();
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -211,6 +211,7 @@ int main(int argc, char *argv[])
|
||||
AD.addInclude(AD._CPP_file, "adfiles", get_basename(AD._VM_file._name));
|
||||
AD.addInclude(AD._CPP_file, "adfiles", get_basename(AD._HPP_file._name));
|
||||
AD.addInclude(AD._CPP_file, "memory/allocation.inline.hpp");
|
||||
AD.addInclude(AD._CPP_file, "code/codeCache.hpp");
|
||||
AD.addInclude(AD._CPP_file, "code/compiledIC.hpp");
|
||||
AD.addInclude(AD._CPP_file, "code/nativeInst.hpp");
|
||||
AD.addInclude(AD._CPP_file, "code/vmreg.inline.hpp");
|
||||
|
||||
@ -299,28 +299,29 @@ class AbstractAssembler : public ResourceObj {
|
||||
void emit_double( jdouble x) { code_section()->emit_double(x); }
|
||||
void emit_address(address x) { code_section()->emit_address(x); }
|
||||
|
||||
// min and max values for signed immediate ranges
|
||||
static int min_simm(int nbits) { return -(intptr_t(1) << (nbits - 1)) ; }
|
||||
static int max_simm(int nbits) { return (intptr_t(1) << (nbits - 1)) - 1; }
|
||||
enum { min_simm10 = -512 };
|
||||
|
||||
// Define some:
|
||||
static int min_simm10() { return min_simm(10); }
|
||||
static int min_simm13() { return min_simm(13); }
|
||||
static int min_simm16() { return min_simm(16); }
|
||||
// Test if x is within signed immediate range for width.
|
||||
static bool is_simm(int64_t x, uint w) {
|
||||
precond(1 < w && w < 64);
|
||||
int64_t limes = INT64_C(1) << (w - 1);
|
||||
return -limes <= x && x < limes;
|
||||
}
|
||||
|
||||
// Test if x is within signed immediate range for nbits
|
||||
static bool is_simm(intptr_t x, int nbits) { return min_simm(nbits) <= x && x <= max_simm(nbits); }
|
||||
static bool is_simm8(int64_t x) { return is_simm(x, 8); }
|
||||
static bool is_simm9(int64_t x) { return is_simm(x, 9); }
|
||||
static bool is_simm10(int64_t x) { return is_simm(x, 10); }
|
||||
static bool is_simm16(int64_t x) { return is_simm(x, 16); }
|
||||
static bool is_simm32(int64_t x) { return is_simm(x, 32); }
|
||||
|
||||
// Define some:
|
||||
static bool is_simm5( intptr_t x) { return is_simm(x, 5 ); }
|
||||
static bool is_simm8( intptr_t x) { return is_simm(x, 8 ); }
|
||||
static bool is_simm10(intptr_t x) { return is_simm(x, 10); }
|
||||
static bool is_simm11(intptr_t x) { return is_simm(x, 11); }
|
||||
static bool is_simm12(intptr_t x) { return is_simm(x, 12); }
|
||||
static bool is_simm13(intptr_t x) { return is_simm(x, 13); }
|
||||
static bool is_simm16(intptr_t x) { return is_simm(x, 16); }
|
||||
static bool is_simm26(intptr_t x) { return is_simm(x, 26); }
|
||||
static bool is_simm32(intptr_t x) { return is_simm(x, 32); }
|
||||
// Test if x is within unsigned immediate range for width.
|
||||
static bool is_uimm(uint64_t x, uint w) {
|
||||
precond(0 < w && w < 64);
|
||||
uint64_t limes = UINT64_C(1) << w;
|
||||
return x < limes;
|
||||
}
|
||||
|
||||
static bool is_uimm12(uint64_t x) { return is_uimm(x, 12); }
|
||||
|
||||
// Accessors
|
||||
CodeSection* code_section() const { return _code_section; }
|
||||
|
||||
@ -27,7 +27,6 @@
|
||||
|
||||
#include "ci/ciClassList.hpp"
|
||||
#include "ci/ciObjectFactory.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "code/debugInfoRec.hpp"
|
||||
#include "code/dependencies.hpp"
|
||||
#include "code/exceptionHandlerTable.hpp"
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -38,6 +38,7 @@ class MethodLiveness;
|
||||
class Arena;
|
||||
class BCEscapeAnalyzer;
|
||||
class InlineTree;
|
||||
class xmlStream;
|
||||
|
||||
// Whether profiling found an oop to be always, never or sometimes
|
||||
// null
|
||||
|
||||
@ -4028,43 +4028,6 @@ const InstanceKlass* ClassFileParser::parse_super_class(ConstantPool* const cp,
|
||||
return super_klass;
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
static void print_field_layout(const Symbol* name,
|
||||
Array<u2>* fields,
|
||||
ConstantPool* cp,
|
||||
int instance_size,
|
||||
int instance_fields_start,
|
||||
int instance_fields_end,
|
||||
int static_fields_end) {
|
||||
|
||||
assert(name != NULL, "invariant");
|
||||
|
||||
tty->print("%s: field layout\n", name->as_klass_external_name());
|
||||
tty->print(" @%3d %s\n", instance_fields_start, "--- instance fields start ---");
|
||||
for (AllFieldStream fs(fields, cp); !fs.done(); fs.next()) {
|
||||
if (!fs.access_flags().is_static()) {
|
||||
tty->print(" @%3d \"%s\" %s\n",
|
||||
fs.offset(),
|
||||
fs.name()->as_klass_external_name(),
|
||||
fs.signature()->as_klass_external_name());
|
||||
}
|
||||
}
|
||||
tty->print(" @%3d %s\n", instance_fields_end, "--- instance fields end ---");
|
||||
tty->print(" @%3d %s\n", instance_size * wordSize, "--- instance ends ---");
|
||||
tty->print(" @%3d %s\n", InstanceMirrorKlass::offset_of_static_fields(), "--- static fields start ---");
|
||||
for (AllFieldStream fs(fields, cp); !fs.done(); fs.next()) {
|
||||
if (fs.access_flags().is_static()) {
|
||||
tty->print(" @%3d \"%s\" %s\n",
|
||||
fs.offset(),
|
||||
fs.name()->as_klass_external_name(),
|
||||
fs.signature()->as_klass_external_name());
|
||||
}
|
||||
}
|
||||
tty->print(" @%3d %s\n", static_fields_end, "--- static fields end ---");
|
||||
tty->print("\n");
|
||||
}
|
||||
#endif
|
||||
|
||||
OopMapBlocksBuilder::OopMapBlocksBuilder(unsigned int max_blocks) {
|
||||
_max_nonstatic_oop_maps = max_blocks;
|
||||
_nonstatic_oop_map_count = 0;
|
||||
@ -4181,432 +4144,6 @@ void OopMapBlocksBuilder::print_value_on(outputStream* st) const {
|
||||
print_on(st);
|
||||
}
|
||||
|
||||
// Layout fields and fill in FieldLayoutInfo. Could use more refactoring!
|
||||
void ClassFileParser::layout_fields(ConstantPool* cp,
|
||||
const FieldAllocationCount* fac,
|
||||
const ClassAnnotationCollector* parsed_annotations,
|
||||
FieldLayoutInfo* info,
|
||||
TRAPS) {
|
||||
|
||||
assert(cp != NULL, "invariant");
|
||||
|
||||
// Field size and offset computation
|
||||
int nonstatic_field_size = _super_klass == NULL ? 0 :
|
||||
_super_klass->nonstatic_field_size();
|
||||
|
||||
// Count the contended fields by type.
|
||||
//
|
||||
// We ignore static fields, because @Contended is not supported for them.
|
||||
// The layout code below will also ignore the static fields.
|
||||
int nonstatic_contended_count = 0;
|
||||
FieldAllocationCount fac_contended;
|
||||
for (AllFieldStream fs(_fields, cp); !fs.done(); fs.next()) {
|
||||
FieldAllocationType atype = (FieldAllocationType) fs.allocation_type();
|
||||
if (fs.is_contended()) {
|
||||
fac_contended.count[atype]++;
|
||||
if (!fs.access_flags().is_static()) {
|
||||
nonstatic_contended_count++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Calculate the starting byte offsets
|
||||
int next_static_oop_offset = InstanceMirrorKlass::offset_of_static_fields();
|
||||
int next_static_double_offset = next_static_oop_offset +
|
||||
((fac->count[STATIC_OOP]) * heapOopSize);
|
||||
if (fac->count[STATIC_DOUBLE]) {
|
||||
next_static_double_offset = align_up(next_static_double_offset, BytesPerLong);
|
||||
}
|
||||
|
||||
int next_static_word_offset = next_static_double_offset +
|
||||
((fac->count[STATIC_DOUBLE]) * BytesPerLong);
|
||||
int next_static_short_offset = next_static_word_offset +
|
||||
((fac->count[STATIC_WORD]) * BytesPerInt);
|
||||
int next_static_byte_offset = next_static_short_offset +
|
||||
((fac->count[STATIC_SHORT]) * BytesPerShort);
|
||||
|
||||
int nonstatic_fields_start = instanceOopDesc::base_offset_in_bytes() +
|
||||
nonstatic_field_size * heapOopSize;
|
||||
|
||||
int next_nonstatic_field_offset = nonstatic_fields_start;
|
||||
|
||||
const bool is_contended_class = parsed_annotations->is_contended();
|
||||
|
||||
// Class is contended, pad before all the fields
|
||||
if (is_contended_class) {
|
||||
next_nonstatic_field_offset += ContendedPaddingWidth;
|
||||
}
|
||||
|
||||
// Compute the non-contended fields count.
|
||||
// The packing code below relies on these counts to determine if some field
|
||||
// can be squeezed into the alignment gap. Contended fields are obviously
|
||||
// exempt from that.
|
||||
unsigned int nonstatic_double_count = fac->count[NONSTATIC_DOUBLE] - fac_contended.count[NONSTATIC_DOUBLE];
|
||||
unsigned int nonstatic_word_count = fac->count[NONSTATIC_WORD] - fac_contended.count[NONSTATIC_WORD];
|
||||
unsigned int nonstatic_short_count = fac->count[NONSTATIC_SHORT] - fac_contended.count[NONSTATIC_SHORT];
|
||||
unsigned int nonstatic_byte_count = fac->count[NONSTATIC_BYTE] - fac_contended.count[NONSTATIC_BYTE];
|
||||
unsigned int nonstatic_oop_count = fac->count[NONSTATIC_OOP] - fac_contended.count[NONSTATIC_OOP];
|
||||
|
||||
// Total non-static fields count, including every contended field
|
||||
unsigned int nonstatic_fields_count = fac->count[NONSTATIC_DOUBLE] + fac->count[NONSTATIC_WORD] +
|
||||
fac->count[NONSTATIC_SHORT] + fac->count[NONSTATIC_BYTE] +
|
||||
fac->count[NONSTATIC_OOP];
|
||||
|
||||
const bool super_has_nonstatic_fields =
|
||||
(_super_klass != NULL && _super_klass->has_nonstatic_fields());
|
||||
const bool has_nonstatic_fields =
|
||||
super_has_nonstatic_fields || (nonstatic_fields_count != 0);
|
||||
|
||||
|
||||
// Prepare list of oops for oop map generation.
|
||||
//
|
||||
// "offset" and "count" lists are describing the set of contiguous oop
|
||||
// regions. offset[i] is the start of the i-th region, which then has
|
||||
// count[i] oops following. Before we know how many regions are required,
|
||||
// we pessimistically allocate the maps to fit all the oops into the
|
||||
// distinct regions.
|
||||
|
||||
int super_oop_map_count = (_super_klass == NULL) ? 0 :_super_klass->nonstatic_oop_map_count();
|
||||
int max_oop_map_count = super_oop_map_count + fac->count[NONSTATIC_OOP];
|
||||
|
||||
OopMapBlocksBuilder* nonstatic_oop_maps = new OopMapBlocksBuilder(max_oop_map_count);
|
||||
if (super_oop_map_count > 0) {
|
||||
nonstatic_oop_maps->initialize_inherited_blocks(_super_klass->start_of_nonstatic_oop_maps(),
|
||||
_super_klass->nonstatic_oop_map_count());
|
||||
}
|
||||
|
||||
int first_nonstatic_oop_offset = 0; // will be set for first oop field
|
||||
|
||||
bool compact_fields = true;
|
||||
bool allocate_oops_first = false;
|
||||
|
||||
int next_nonstatic_oop_offset = 0;
|
||||
int next_nonstatic_double_offset = 0;
|
||||
|
||||
// Rearrange fields for a given allocation style
|
||||
if (allocate_oops_first) {
|
||||
// Fields order: oops, longs/doubles, ints, shorts/chars, bytes, padded fields
|
||||
next_nonstatic_oop_offset = next_nonstatic_field_offset;
|
||||
next_nonstatic_double_offset = next_nonstatic_oop_offset +
|
||||
(nonstatic_oop_count * heapOopSize);
|
||||
} else {
|
||||
// Fields order: longs/doubles, ints, shorts/chars, bytes, oops, padded fields
|
||||
next_nonstatic_double_offset = next_nonstatic_field_offset;
|
||||
}
|
||||
|
||||
int nonstatic_oop_space_count = 0;
|
||||
int nonstatic_word_space_count = 0;
|
||||
int nonstatic_short_space_count = 0;
|
||||
int nonstatic_byte_space_count = 0;
|
||||
int nonstatic_oop_space_offset = 0;
|
||||
int nonstatic_word_space_offset = 0;
|
||||
int nonstatic_short_space_offset = 0;
|
||||
int nonstatic_byte_space_offset = 0;
|
||||
|
||||
// Try to squeeze some of the fields into the gaps due to
|
||||
// long/double alignment.
|
||||
if (nonstatic_double_count > 0) {
|
||||
int offset = next_nonstatic_double_offset;
|
||||
next_nonstatic_double_offset = align_up(offset, BytesPerLong);
|
||||
if (compact_fields && offset != next_nonstatic_double_offset) {
|
||||
// Allocate available fields into the gap before double field.
|
||||
int length = next_nonstatic_double_offset - offset;
|
||||
assert(length == BytesPerInt, "");
|
||||
nonstatic_word_space_offset = offset;
|
||||
if (nonstatic_word_count > 0) {
|
||||
nonstatic_word_count -= 1;
|
||||
nonstatic_word_space_count = 1; // Only one will fit
|
||||
length -= BytesPerInt;
|
||||
offset += BytesPerInt;
|
||||
}
|
||||
nonstatic_short_space_offset = offset;
|
||||
while (length >= BytesPerShort && nonstatic_short_count > 0) {
|
||||
nonstatic_short_count -= 1;
|
||||
nonstatic_short_space_count += 1;
|
||||
length -= BytesPerShort;
|
||||
offset += BytesPerShort;
|
||||
}
|
||||
nonstatic_byte_space_offset = offset;
|
||||
while (length > 0 && nonstatic_byte_count > 0) {
|
||||
nonstatic_byte_count -= 1;
|
||||
nonstatic_byte_space_count += 1;
|
||||
length -= 1;
|
||||
}
|
||||
// Allocate oop field in the gap if there are no other fields for that.
|
||||
nonstatic_oop_space_offset = offset;
|
||||
if (length >= heapOopSize && nonstatic_oop_count > 0 &&
|
||||
!allocate_oops_first) { // when oop fields not first
|
||||
nonstatic_oop_count -= 1;
|
||||
nonstatic_oop_space_count = 1; // Only one will fit
|
||||
length -= heapOopSize;
|
||||
offset += heapOopSize;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int next_nonstatic_word_offset = next_nonstatic_double_offset +
|
||||
(nonstatic_double_count * BytesPerLong);
|
||||
int next_nonstatic_short_offset = next_nonstatic_word_offset +
|
||||
(nonstatic_word_count * BytesPerInt);
|
||||
int next_nonstatic_byte_offset = next_nonstatic_short_offset +
|
||||
(nonstatic_short_count * BytesPerShort);
|
||||
int next_nonstatic_padded_offset = next_nonstatic_byte_offset +
|
||||
nonstatic_byte_count;
|
||||
|
||||
// let oops jump before padding with this allocation style
|
||||
if (!allocate_oops_first) {
|
||||
next_nonstatic_oop_offset = next_nonstatic_padded_offset;
|
||||
if( nonstatic_oop_count > 0 ) {
|
||||
next_nonstatic_oop_offset = align_up(next_nonstatic_oop_offset, heapOopSize);
|
||||
}
|
||||
next_nonstatic_padded_offset = next_nonstatic_oop_offset + (nonstatic_oop_count * heapOopSize);
|
||||
}
|
||||
|
||||
// Iterate over fields again and compute correct offsets.
|
||||
// The field allocation type was temporarily stored in the offset slot.
|
||||
// oop fields are located before non-oop fields (static and non-static).
|
||||
for (AllFieldStream fs(_fields, cp); !fs.done(); fs.next()) {
|
||||
|
||||
// skip already laid out fields
|
||||
if (fs.is_offset_set()) continue;
|
||||
|
||||
// contended instance fields are handled below
|
||||
if (fs.is_contended() && !fs.access_flags().is_static()) continue;
|
||||
|
||||
int real_offset = 0;
|
||||
const FieldAllocationType atype = (const FieldAllocationType) fs.allocation_type();
|
||||
|
||||
// pack the rest of the fields
|
||||
switch (atype) {
|
||||
case STATIC_OOP:
|
||||
real_offset = next_static_oop_offset;
|
||||
next_static_oop_offset += heapOopSize;
|
||||
break;
|
||||
case STATIC_BYTE:
|
||||
real_offset = next_static_byte_offset;
|
||||
next_static_byte_offset += 1;
|
||||
break;
|
||||
case STATIC_SHORT:
|
||||
real_offset = next_static_short_offset;
|
||||
next_static_short_offset += BytesPerShort;
|
||||
break;
|
||||
case STATIC_WORD:
|
||||
real_offset = next_static_word_offset;
|
||||
next_static_word_offset += BytesPerInt;
|
||||
break;
|
||||
case STATIC_DOUBLE:
|
||||
real_offset = next_static_double_offset;
|
||||
next_static_double_offset += BytesPerLong;
|
||||
break;
|
||||
case NONSTATIC_OOP:
|
||||
if( nonstatic_oop_space_count > 0 ) {
|
||||
real_offset = nonstatic_oop_space_offset;
|
||||
nonstatic_oop_space_offset += heapOopSize;
|
||||
nonstatic_oop_space_count -= 1;
|
||||
} else {
|
||||
real_offset = next_nonstatic_oop_offset;
|
||||
next_nonstatic_oop_offset += heapOopSize;
|
||||
}
|
||||
nonstatic_oop_maps->add(real_offset, 1);
|
||||
break;
|
||||
case NONSTATIC_BYTE:
|
||||
if( nonstatic_byte_space_count > 0 ) {
|
||||
real_offset = nonstatic_byte_space_offset;
|
||||
nonstatic_byte_space_offset += 1;
|
||||
nonstatic_byte_space_count -= 1;
|
||||
} else {
|
||||
real_offset = next_nonstatic_byte_offset;
|
||||
next_nonstatic_byte_offset += 1;
|
||||
}
|
||||
break;
|
||||
case NONSTATIC_SHORT:
|
||||
if( nonstatic_short_space_count > 0 ) {
|
||||
real_offset = nonstatic_short_space_offset;
|
||||
nonstatic_short_space_offset += BytesPerShort;
|
||||
nonstatic_short_space_count -= 1;
|
||||
} else {
|
||||
real_offset = next_nonstatic_short_offset;
|
||||
next_nonstatic_short_offset += BytesPerShort;
|
||||
}
|
||||
break;
|
||||
case NONSTATIC_WORD:
|
||||
if( nonstatic_word_space_count > 0 ) {
|
||||
real_offset = nonstatic_word_space_offset;
|
||||
nonstatic_word_space_offset += BytesPerInt;
|
||||
nonstatic_word_space_count -= 1;
|
||||
} else {
|
||||
real_offset = next_nonstatic_word_offset;
|
||||
next_nonstatic_word_offset += BytesPerInt;
|
||||
}
|
||||
break;
|
||||
case NONSTATIC_DOUBLE:
|
||||
real_offset = next_nonstatic_double_offset;
|
||||
next_nonstatic_double_offset += BytesPerLong;
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
fs.set_offset(real_offset);
|
||||
}
|
||||
|
||||
|
||||
// Handle the contended cases.
|
||||
//
|
||||
// Each contended field should not intersect the cache line with another contended field.
|
||||
// In the absence of alignment information, we end up with pessimistically separating
|
||||
// the fields with full-width padding.
|
||||
//
|
||||
// Additionally, this should not break alignment for the fields, so we round the alignment up
|
||||
// for each field.
|
||||
if (nonstatic_contended_count > 0) {
|
||||
|
||||
// if there is at least one contended field, we need to have pre-padding for them
|
||||
next_nonstatic_padded_offset += ContendedPaddingWidth;
|
||||
|
||||
// collect all contended groups
|
||||
ResourceBitMap bm(cp->size());
|
||||
for (AllFieldStream fs(_fields, cp); !fs.done(); fs.next()) {
|
||||
// skip already laid out fields
|
||||
if (fs.is_offset_set()) continue;
|
||||
|
||||
if (fs.is_contended()) {
|
||||
bm.set_bit(fs.contended_group());
|
||||
}
|
||||
}
|
||||
|
||||
int current_group = -1;
|
||||
while ((current_group = (int)bm.get_next_one_offset(current_group + 1)) != (int)bm.size()) {
|
||||
|
||||
for (AllFieldStream fs(_fields, cp); !fs.done(); fs.next()) {
|
||||
|
||||
// skip already laid out fields
|
||||
if (fs.is_offset_set()) continue;
|
||||
|
||||
// skip non-contended fields and fields from different group
|
||||
if (!fs.is_contended() || (fs.contended_group() != current_group)) continue;
|
||||
|
||||
// handle statics below
|
||||
if (fs.access_flags().is_static()) continue;
|
||||
|
||||
int real_offset = 0;
|
||||
FieldAllocationType atype = (FieldAllocationType) fs.allocation_type();
|
||||
|
||||
switch (atype) {
|
||||
case NONSTATIC_BYTE:
|
||||
next_nonstatic_padded_offset = align_up(next_nonstatic_padded_offset, 1);
|
||||
real_offset = next_nonstatic_padded_offset;
|
||||
next_nonstatic_padded_offset += 1;
|
||||
break;
|
||||
|
||||
case NONSTATIC_SHORT:
|
||||
next_nonstatic_padded_offset = align_up(next_nonstatic_padded_offset, BytesPerShort);
|
||||
real_offset = next_nonstatic_padded_offset;
|
||||
next_nonstatic_padded_offset += BytesPerShort;
|
||||
break;
|
||||
|
||||
case NONSTATIC_WORD:
|
||||
next_nonstatic_padded_offset = align_up(next_nonstatic_padded_offset, BytesPerInt);
|
||||
real_offset = next_nonstatic_padded_offset;
|
||||
next_nonstatic_padded_offset += BytesPerInt;
|
||||
break;
|
||||
|
||||
case NONSTATIC_DOUBLE:
|
||||
next_nonstatic_padded_offset = align_up(next_nonstatic_padded_offset, BytesPerLong);
|
||||
real_offset = next_nonstatic_padded_offset;
|
||||
next_nonstatic_padded_offset += BytesPerLong;
|
||||
break;
|
||||
|
||||
case NONSTATIC_OOP:
|
||||
next_nonstatic_padded_offset = align_up(next_nonstatic_padded_offset, heapOopSize);
|
||||
real_offset = next_nonstatic_padded_offset;
|
||||
next_nonstatic_padded_offset += heapOopSize;
|
||||
nonstatic_oop_maps->add(real_offset, 1);
|
||||
break;
|
||||
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
if (fs.contended_group() == 0) {
|
||||
// Contended group defines the equivalence class over the fields:
|
||||
// the fields within the same contended group are not inter-padded.
|
||||
// The only exception is default group, which does not incur the
|
||||
// equivalence, and so requires intra-padding.
|
||||
next_nonstatic_padded_offset += ContendedPaddingWidth;
|
||||
}
|
||||
|
||||
fs.set_offset(real_offset);
|
||||
} // for
|
||||
|
||||
// Start laying out the next group.
|
||||
// Note that this will effectively pad the last group in the back;
|
||||
// this is expected to alleviate memory contention effects for
|
||||
// subclass fields and/or adjacent object.
|
||||
// If this was the default group, the padding is already in place.
|
||||
if (current_group != 0) {
|
||||
next_nonstatic_padded_offset += ContendedPaddingWidth;
|
||||
}
|
||||
}
|
||||
|
||||
// handle static fields
|
||||
}
|
||||
|
||||
// Entire class is contended, pad in the back.
|
||||
// This helps to alleviate memory contention effects for subclass fields
|
||||
// and/or adjacent object.
|
||||
if (is_contended_class) {
|
||||
next_nonstatic_padded_offset += ContendedPaddingWidth;
|
||||
}
|
||||
|
||||
int notaligned_nonstatic_fields_end = next_nonstatic_padded_offset;
|
||||
|
||||
int nonstatic_fields_end = align_up(notaligned_nonstatic_fields_end, heapOopSize);
|
||||
int instance_end = align_up(notaligned_nonstatic_fields_end, wordSize);
|
||||
int static_fields_end = align_up(next_static_byte_offset, wordSize);
|
||||
|
||||
int static_field_size = (static_fields_end -
|
||||
InstanceMirrorKlass::offset_of_static_fields()) / wordSize;
|
||||
nonstatic_field_size = nonstatic_field_size +
|
||||
(nonstatic_fields_end - nonstatic_fields_start) / heapOopSize;
|
||||
|
||||
int instance_size = align_object_size(instance_end / wordSize);
|
||||
|
||||
assert(instance_size == align_object_size(align_up(
|
||||
(instanceOopDesc::base_offset_in_bytes() + nonstatic_field_size*heapOopSize),
|
||||
wordSize) / wordSize), "consistent layout helper value");
|
||||
|
||||
// Invariant: nonstatic_field end/start should only change if there are
|
||||
// nonstatic fields in the class, or if the class is contended. We compare
|
||||
// against the non-aligned value, so that end alignment will not fail the
|
||||
// assert without actually having the fields.
|
||||
assert((notaligned_nonstatic_fields_end == nonstatic_fields_start) ||
|
||||
is_contended_class ||
|
||||
(nonstatic_fields_count > 0), "double-check nonstatic start/end");
|
||||
|
||||
// Number of non-static oop map blocks allocated at end of klass.
|
||||
nonstatic_oop_maps->compact();
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (PrintFieldLayout) {
|
||||
print_field_layout(_class_name,
|
||||
_fields,
|
||||
cp,
|
||||
instance_size,
|
||||
nonstatic_fields_start,
|
||||
nonstatic_fields_end,
|
||||
static_fields_end);
|
||||
}
|
||||
|
||||
#endif
|
||||
// Pass back information needed for InstanceKlass creation
|
||||
info->oop_map_blocks = nonstatic_oop_maps;
|
||||
info->_instance_size = instance_size;
|
||||
info->_static_field_size = static_field_size;
|
||||
info->_nonstatic_field_size = nonstatic_field_size;
|
||||
info->_has_nonstatic_fields = has_nonstatic_fields;
|
||||
}
|
||||
|
||||
void ClassFileParser::set_precomputed_flags(InstanceKlass* ik) {
|
||||
assert(ik != NULL, "invariant");
|
||||
|
||||
@ -6668,13 +6205,9 @@ void ClassFileParser::post_process_parsed_stream(const ClassFileStream* const st
|
||||
assert(_parsed_annotations != NULL, "invariant");
|
||||
|
||||
_field_info = new FieldLayoutInfo();
|
||||
if (UseNewFieldLayout) {
|
||||
FieldLayoutBuilder lb(class_name(), super_klass(), _cp, _fields,
|
||||
_parsed_annotations->is_contended(), _field_info);
|
||||
lb.build_layout();
|
||||
} else {
|
||||
layout_fields(cp, _fac, _parsed_annotations, _field_info, CHECK);
|
||||
}
|
||||
FieldLayoutBuilder lb(class_name(), super_klass(), _cp, _fields,
|
||||
_parsed_annotations->is_contended(), _field_info);
|
||||
lb.build_layout();
|
||||
|
||||
// Compute reference typ
|
||||
_rt = (NULL ==_super_klass) ? REF_NONE : _super_klass->reference_type();
|
||||
|
||||
@ -533,13 +533,6 @@ class ClassFileParser {
|
||||
int annotation_default_length,
|
||||
TRAPS);
|
||||
|
||||
// lays out fields in class and returns the total oopmap count
|
||||
void layout_fields(ConstantPool* cp,
|
||||
const FieldAllocationCount* fac,
|
||||
const ClassAnnotationCollector* parsed_annotations,
|
||||
FieldLayoutInfo* info,
|
||||
TRAPS);
|
||||
|
||||
void update_class_name(Symbol* new_name);
|
||||
|
||||
public:
|
||||
|
||||
@ -55,12 +55,12 @@
|
||||
#include "classfile/packageEntry.hpp"
|
||||
#include "classfile/symbolTable.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "gc/shared/oopStorageSet.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/metadataFactory.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/oopHandle.inline.hpp"
|
||||
@ -488,7 +488,7 @@ void ClassLoaderData::add_class(Klass* k, bool publicize /* true */) {
|
||||
void ClassLoaderData::initialize_holder(Handle loader_or_mirror) {
|
||||
if (loader_or_mirror() != NULL) {
|
||||
assert(_holder.is_null(), "never replace holders");
|
||||
_holder = WeakHandle(OopStorageSet::vm_weak(), loader_or_mirror);
|
||||
_holder = WeakHandle(Universe::vm_weak(), loader_or_mirror);
|
||||
}
|
||||
}
|
||||
|
||||
@ -655,7 +655,7 @@ ClassLoaderData::~ClassLoaderData() {
|
||||
ClassLoaderDataGraph::dec_instance_classes(cl.instance_class_released());
|
||||
|
||||
// Release the WeakHandle
|
||||
_holder.release(OopStorageSet::vm_weak());
|
||||
_holder.release(Universe::vm_weak());
|
||||
|
||||
// Release C heap allocated hashtable for all the packages.
|
||||
if (_packages != NULL) {
|
||||
|
||||
@ -27,12 +27,12 @@
|
||||
#include "classfile/dictionary.hpp"
|
||||
#include "classfile/protectionDomainCache.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "gc/shared/oopStorageSet.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "memory/metaspaceClosure.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/oopHandle.inline.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
@ -407,14 +407,14 @@ oop SymbolPropertyEntry::method_type() const {
|
||||
}
|
||||
|
||||
void SymbolPropertyEntry::set_method_type(oop p) {
|
||||
_method_type = OopHandle(OopStorageSet::vm_global(), p);
|
||||
_method_type = OopHandle(Universe::vm_global(), p);
|
||||
}
|
||||
|
||||
void SymbolPropertyEntry::free_entry() {
|
||||
// decrement Symbol refcount here because hashtable doesn't.
|
||||
literal()->decrement_refcount();
|
||||
// Free OopHandle
|
||||
_method_type.release(OopStorageSet::vm_global());
|
||||
_method_type.release(Universe::vm_global());
|
||||
}
|
||||
|
||||
SymbolPropertyTable::SymbolPropertyTable(int table_size)
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,11 +27,11 @@
|
||||
#include "classfile/dictionary.hpp"
|
||||
#include "classfile/protectionDomainCache.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "gc/shared/oopStorageSet.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/weakHandle.inline.hpp"
|
||||
#include "utilities/hashtable.inline.hpp"
|
||||
@ -94,7 +94,7 @@ void ProtectionDomainCacheTable::unlink() {
|
||||
LogStream ls(lt);
|
||||
ls.print_cr("protection domain unlinked at %d", i);
|
||||
}
|
||||
entry->literal().release(OopStorageSet::vm_weak());
|
||||
entry->literal().release(Universe::vm_weak());
|
||||
*p = entry->next();
|
||||
free_entry(entry);
|
||||
}
|
||||
@ -181,7 +181,7 @@ ProtectionDomainCacheEntry* ProtectionDomainCacheTable::add_entry(int index, uns
|
||||
protection_domain->print_value_on(&ls);
|
||||
ls.cr();
|
||||
}
|
||||
WeakHandle w(OopStorageSet::vm_weak(), protection_domain);
|
||||
WeakHandle w(Universe::vm_weak(), protection_domain);
|
||||
ProtectionDomainCacheEntry* p = new_entry(hash, w);
|
||||
Hashtable<WeakHandle, mtClass>::add_entry(index, p);
|
||||
return p;
|
||||
|
||||
@ -85,8 +85,7 @@ static StringTableHash* _local_table = NULL;
|
||||
|
||||
volatile bool StringTable::_has_work = false;
|
||||
volatile bool StringTable::_needs_rehashing = false;
|
||||
|
||||
volatile size_t StringTable::_uncleaned_items_count = 0;
|
||||
OopStorage* StringTable::_oop_storage;
|
||||
|
||||
static size_t _current_size = 0;
|
||||
static volatile size_t _items_count = 0;
|
||||
@ -129,7 +128,7 @@ class StringTableConfig : public StackObj {
|
||||
return AllocateHeap(size, mtSymbol);
|
||||
}
|
||||
static void free_node(void* memory, Value const& value) {
|
||||
value.release(OopStorageSet::string_table_weak());
|
||||
value.release(StringTable::_oop_storage);
|
||||
FreeHeap(memory);
|
||||
StringTable::item_removed();
|
||||
}
|
||||
@ -211,30 +210,24 @@ void StringTable::create_table() {
|
||||
log_trace(stringtable)("Start size: " SIZE_FORMAT " (" SIZE_FORMAT ")",
|
||||
_current_size, start_size_log_2);
|
||||
_local_table = new StringTableHash(start_size_log_2, END_SIZE, REHASH_LEN);
|
||||
_oop_storage = OopStorageSet::create_weak("StringTable Weak");
|
||||
_oop_storage->register_num_dead_callback(&gc_notification);
|
||||
}
|
||||
|
||||
size_t StringTable::item_added() {
|
||||
return Atomic::add(&_items_count, (size_t)1);
|
||||
}
|
||||
|
||||
size_t StringTable::add_items_to_clean(size_t ndead) {
|
||||
size_t total = Atomic::add(&_uncleaned_items_count, (size_t)ndead);
|
||||
log_trace(stringtable)(
|
||||
"Uncleaned items:" SIZE_FORMAT " added: " SIZE_FORMAT " total:" SIZE_FORMAT,
|
||||
_uncleaned_items_count, ndead, total);
|
||||
return total;
|
||||
}
|
||||
|
||||
void StringTable::item_removed() {
|
||||
Atomic::add(&_items_count, (size_t)-1);
|
||||
}
|
||||
|
||||
double StringTable::get_load_factor() {
|
||||
return (double)_items_count/_current_size;
|
||||
return double(_items_count)/double(_current_size);
|
||||
}
|
||||
|
||||
double StringTable::get_dead_factor() {
|
||||
return (double)_uncleaned_items_count/_current_size;
|
||||
double StringTable::get_dead_factor(size_t num_dead) {
|
||||
return double(num_dead)/double(_current_size);
|
||||
}
|
||||
|
||||
size_t StringTable::table_size() {
|
||||
@ -243,7 +236,7 @@ size_t StringTable::table_size() {
|
||||
|
||||
void StringTable::trigger_concurrent_work() {
|
||||
MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
|
||||
_has_work = true;
|
||||
Atomic::store(&_has_work, true);
|
||||
Service_lock->notify_all();
|
||||
}
|
||||
|
||||
@ -368,7 +361,7 @@ oop StringTable::do_intern(Handle string_or_null_h, const jchar* name,
|
||||
bool rehash_warning;
|
||||
do {
|
||||
// Callers have already looked up the String using the jchar* name, so just go to add.
|
||||
WeakHandle wh(OopStorageSet::string_table_weak(), string_h);
|
||||
WeakHandle wh(_oop_storage, string_h);
|
||||
// The hash table takes ownership of the WeakHandle, even if it's not inserted.
|
||||
if (_local_table->insert(THREAD, lookup, wh, &rehash_warning)) {
|
||||
update_needs_rehash(rehash_warning);
|
||||
@ -449,13 +442,15 @@ void StringTable::clean_dead_entries(JavaThread* jt) {
|
||||
log_debug(stringtable)("Cleaned %ld of %ld", stdc._count, stdc._item);
|
||||
}
|
||||
|
||||
void StringTable::check_concurrent_work() {
|
||||
if (_has_work) {
|
||||
void StringTable::gc_notification(size_t num_dead) {
|
||||
log_trace(stringtable)("Uncleaned items:" SIZE_FORMAT, num_dead);
|
||||
|
||||
if (has_work()) {
|
||||
return;
|
||||
}
|
||||
|
||||
double load_factor = StringTable::get_load_factor();
|
||||
double dead_factor = StringTable::get_dead_factor();
|
||||
double dead_factor = StringTable::get_dead_factor(num_dead);
|
||||
// We should clean/resize if we have more dead than alive,
|
||||
// more items than preferred load factor or
|
||||
// more dead items than water mark.
|
||||
@ -468,8 +463,11 @@ void StringTable::check_concurrent_work() {
|
||||
}
|
||||
}
|
||||
|
||||
bool StringTable::has_work() {
|
||||
return Atomic::load_acquire(&_has_work);
|
||||
}
|
||||
|
||||
void StringTable::do_concurrent_work(JavaThread* jt) {
|
||||
_has_work = false;
|
||||
double load_factor = get_load_factor();
|
||||
log_debug(stringtable, perf)("Concurrent work, live factor: %g", load_factor);
|
||||
// We prefer growing, since that also removes dead items
|
||||
@ -478,6 +476,7 @@ void StringTable::do_concurrent_work(JavaThread* jt) {
|
||||
} else {
|
||||
clean_dead_entries(jt);
|
||||
}
|
||||
Atomic::release_store(&_has_work, false);
|
||||
}
|
||||
|
||||
// Rehash
|
||||
|
||||
@ -46,23 +46,26 @@ class StringTable : public CHeapObj<mtSymbol>{
|
||||
friend class StringTableCreateEntry;
|
||||
|
||||
static volatile bool _has_work;
|
||||
static volatile size_t _uncleaned_items_count;
|
||||
|
||||
// Set if one bucket is out of balance due to hash algorithm deficiency
|
||||
static volatile bool _needs_rehashing;
|
||||
|
||||
static OopStorage* _oop_storage;
|
||||
|
||||
static void grow(JavaThread* jt);
|
||||
static void clean_dead_entries(JavaThread* jt);
|
||||
|
||||
static double get_load_factor();
|
||||
static double get_dead_factor();
|
||||
static double get_dead_factor(size_t num_dead);
|
||||
|
||||
static void check_concurrent_work();
|
||||
// GC support
|
||||
|
||||
// Callback for GC to notify of changes that might require cleaning or resize.
|
||||
static void gc_notification(size_t num_dead);
|
||||
static void trigger_concurrent_work();
|
||||
|
||||
static size_t item_added();
|
||||
static void item_removed();
|
||||
static size_t add_items_to_clean(size_t ndead);
|
||||
|
||||
static oop intern(Handle string_or_null_h, const jchar* name, int len, TRAPS);
|
||||
static oop do_intern(Handle string_or_null, const jchar* name, int len, uintx hash, TRAPS);
|
||||
@ -79,20 +82,7 @@ class StringTable : public CHeapObj<mtSymbol>{
|
||||
static void create_table();
|
||||
|
||||
static void do_concurrent_work(JavaThread* jt);
|
||||
static bool has_work() { return _has_work; }
|
||||
|
||||
// GC support
|
||||
|
||||
// Must be called before a parallel walk where strings might die.
|
||||
static void reset_dead_counter() { _uncleaned_items_count = 0; }
|
||||
|
||||
// After the parallel walk this method must be called to trigger
|
||||
// cleaning. Note it might trigger a resize instead.
|
||||
static void finish_dead_counter() { check_concurrent_work(); }
|
||||
|
||||
// If GC uses ParState directly it should add the number of cleared
|
||||
// strings to this method.
|
||||
static void inc_dead_counter(size_t ndead) { add_items_to_clean(ndead); }
|
||||
static bool has_work();
|
||||
|
||||
// Probing
|
||||
static oop lookup(Symbol* symbol);
|
||||
|
||||
@ -46,7 +46,6 @@
|
||||
#include "code/codeCache.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
#include "gc/shared/gcTraceTime.inline.hpp"
|
||||
#include "gc/shared/oopStorageSet.hpp"
|
||||
#include "interpreter/bytecodeStream.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "jfr/jfrEvents.hpp"
|
||||
@ -176,7 +175,7 @@ void SystemDictionary::compute_java_loaders(TRAPS) {
|
||||
vmSymbols::void_classloader_signature(),
|
||||
CHECK);
|
||||
|
||||
_java_system_loader = OopHandle(OopStorageSet::vm_global(), (oop)result.get_jobject());
|
||||
_java_system_loader = OopHandle(Universe::vm_global(), (oop)result.get_jobject());
|
||||
|
||||
JavaCalls::call_static(&result,
|
||||
class_loader_klass,
|
||||
@ -184,7 +183,7 @@ void SystemDictionary::compute_java_loaders(TRAPS) {
|
||||
vmSymbols::void_classloader_signature(),
|
||||
CHECK);
|
||||
|
||||
_java_platform_loader = OopHandle(OopStorageSet::vm_global(), (oop)result.get_jobject());
|
||||
_java_platform_loader = OopHandle(Universe::vm_global(), (oop)result.get_jobject());
|
||||
}
|
||||
|
||||
ClassLoaderData* SystemDictionary::register_loader(Handle class_loader, bool create_mirror_cld) {
|
||||
@ -2030,7 +2029,7 @@ void SystemDictionary::initialize(TRAPS) {
|
||||
|
||||
// Allocate private object used as system class loader lock
|
||||
oop lock_obj = oopFactory::new_intArray(0, CHECK);
|
||||
_system_loader_lock_obj = OopHandle(OopStorageSet::vm_global(), lock_obj);
|
||||
_system_loader_lock_obj = OopHandle(Universe::vm_global(), lock_obj);
|
||||
|
||||
// Initialize basic classes
|
||||
resolve_well_known_classes(CHECK);
|
||||
|
||||
@ -36,7 +36,6 @@
|
||||
#include "classfile/systemDictionaryShared.hpp"
|
||||
#include "classfile/verificationType.hpp"
|
||||
#include "classfile/vmSymbols.hpp"
|
||||
#include "gc/shared/oopStorageSet.hpp"
|
||||
#include "jfr/jfrEvents.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
@ -717,7 +716,7 @@ Handle SystemDictionaryShared::get_shared_jar_url(int shared_path_index, TRAPS)
|
||||
Handle SystemDictionaryShared::get_package_name(Symbol* class_name, TRAPS) {
|
||||
ResourceMark rm(THREAD);
|
||||
Handle pkgname_string;
|
||||
Symbol* pkg = ClassLoader::package_from_class_name(class_name);
|
||||
TempNewSymbol pkg = ClassLoader::package_from_class_name(class_name);
|
||||
if (pkg != NULL) { // Package prefix found
|
||||
const char* pkgname = pkg->as_klass_external_name();
|
||||
pkgname_string = java_lang_String::create_from_str(pkgname,
|
||||
@ -1023,7 +1022,7 @@ void SystemDictionaryShared::allocate_shared_protection_domain_array(int size, T
|
||||
if (_shared_protection_domains.resolve() == NULL) {
|
||||
oop spd = oopFactory::new_objArray(
|
||||
SystemDictionary::ProtectionDomain_klass(), size, CHECK);
|
||||
_shared_protection_domains = OopHandle(OopStorageSet::vm_global(), spd);
|
||||
_shared_protection_domains = OopHandle(Universe::vm_global(), spd);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1031,7 +1030,7 @@ void SystemDictionaryShared::allocate_shared_jar_url_array(int size, TRAPS) {
|
||||
if (_shared_jar_urls.resolve() == NULL) {
|
||||
oop sju = oopFactory::new_objArray(
|
||||
SystemDictionary::URL_klass(), size, CHECK);
|
||||
_shared_jar_urls = OopHandle(OopStorageSet::vm_global(), sju);
|
||||
_shared_jar_urls = OopHandle(Universe::vm_global(), sju);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1039,7 +1038,7 @@ void SystemDictionaryShared::allocate_shared_jar_manifest_array(int size, TRAPS)
|
||||
if (_shared_jar_manifests.resolve() == NULL) {
|
||||
oop sjm = oopFactory::new_objArray(
|
||||
SystemDictionary::Jar_Manifest_klass(), size, CHECK);
|
||||
_shared_jar_manifests = OopHandle(OopStorageSet::vm_global(), sjm);
|
||||
_shared_jar_manifests = OopHandle(Universe::vm_global(), sjm);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -127,7 +127,7 @@
|
||||
template(jdk_internal_vm_PostVMInitHook, "jdk/internal/vm/PostVMInitHook") \
|
||||
template(sun_net_www_ParseUtil, "sun/net/www/ParseUtil") \
|
||||
template(java_util_Iterator, "java/util/Iterator") \
|
||||
template(java_lang_Record, "java/lang/Record") \
|
||||
template(java_lang_Record, "java/lang/Record") \
|
||||
\
|
||||
template(jdk_internal_loader_NativeLibraries, "jdk/internal/loader/NativeLibraries") \
|
||||
template(jdk_internal_loader_ClassLoaders_AppClassLoader, "jdk/internal/loader/ClassLoaders$AppClassLoader") \
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,7 +29,6 @@
|
||||
#include "ci/ciKlass.hpp"
|
||||
#include "ci/ciMethod.hpp"
|
||||
#include "ci/ciMethodHandle.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "code/compressedStream.hpp"
|
||||
#include "code/nmethod.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -34,6 +34,9 @@
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
class CompiledIC;
|
||||
class CompiledICHolder;
|
||||
|
||||
//
|
||||
// For CompiledIC's:
|
||||
//
|
||||
|
||||
@ -23,6 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/javaClasses.hpp"
|
||||
#include "gc/g1/c2/g1BarrierSetC2.hpp"
|
||||
#include "gc/g1/g1BarrierSet.hpp"
|
||||
#include "gc/g1/g1BarrierSetRuntime.hpp"
|
||||
|
||||
@ -95,13 +95,13 @@
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/flags/flagSetting.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/orderAccess.hpp"
|
||||
#include "runtime/threadSMR.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/autoRestore.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/stack.inline.hpp"
|
||||
@ -1143,77 +1143,15 @@ void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
|
||||
void G1CollectedHeap::resize_heap_if_necessary() {
|
||||
assert_at_safepoint_on_vm_thread();
|
||||
|
||||
// Capacity, free and used after the GC counted as full regions to
|
||||
// include the waste in the following calculations.
|
||||
const size_t capacity_after_gc = capacity();
|
||||
const size_t used_after_gc = capacity_after_gc - unused_committed_regions_in_bytes();
|
||||
bool should_expand;
|
||||
size_t resize_amount = _heap_sizing_policy->full_collection_resize_amount(should_expand);
|
||||
|
||||
// This is enforced in arguments.cpp.
|
||||
assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
|
||||
"otherwise the code below doesn't make sense");
|
||||
|
||||
// We don't have floating point command-line arguments
|
||||
const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;
|
||||
const double maximum_used_percentage = 1.0 - minimum_free_percentage;
|
||||
const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
|
||||
const double minimum_used_percentage = 1.0 - maximum_free_percentage;
|
||||
|
||||
// We have to be careful here as these two calculations can overflow
|
||||
// 32-bit size_t's.
|
||||
double used_after_gc_d = (double) used_after_gc;
|
||||
double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage;
|
||||
double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage;
|
||||
|
||||
// Let's make sure that they are both under the max heap size, which
|
||||
// by default will make them fit into a size_t.
|
||||
double desired_capacity_upper_bound = (double) MaxHeapSize;
|
||||
minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,
|
||||
desired_capacity_upper_bound);
|
||||
maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,
|
||||
desired_capacity_upper_bound);
|
||||
|
||||
// We can now safely turn them into size_t's.
|
||||
size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d;
|
||||
size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d;
|
||||
|
||||
// This assert only makes sense here, before we adjust them
|
||||
// with respect to the min and max heap size.
|
||||
assert(minimum_desired_capacity <= maximum_desired_capacity,
|
||||
"minimum_desired_capacity = " SIZE_FORMAT ", "
|
||||
"maximum_desired_capacity = " SIZE_FORMAT,
|
||||
minimum_desired_capacity, maximum_desired_capacity);
|
||||
|
||||
// Should not be greater than the heap max size. No need to adjust
|
||||
// it with respect to the heap min size as it's a lower bound (i.e.,
|
||||
// we'll try to make the capacity larger than it, not smaller).
|
||||
minimum_desired_capacity = MIN2(minimum_desired_capacity, MaxHeapSize);
|
||||
// Should not be less than the heap min size. No need to adjust it
|
||||
// with respect to the heap max size as it's an upper bound (i.e.,
|
||||
// we'll try to make the capacity smaller than it, not greater).
|
||||
maximum_desired_capacity = MAX2(maximum_desired_capacity, MinHeapSize);
|
||||
|
||||
if (capacity_after_gc < minimum_desired_capacity) {
|
||||
// Don't expand unless it's significant
|
||||
size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
|
||||
|
||||
log_debug(gc, ergo, heap)("Attempt heap expansion (capacity lower than min desired capacity). "
|
||||
"Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B live: " SIZE_FORMAT "B "
|
||||
"min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
|
||||
capacity_after_gc, used_after_gc, used(), minimum_desired_capacity, MinHeapFreeRatio);
|
||||
|
||||
expand(expand_bytes, _workers);
|
||||
|
||||
// No expansion, now see if we want to shrink
|
||||
} else if (capacity_after_gc > maximum_desired_capacity) {
|
||||
// Capacity too large, compute shrinking size
|
||||
size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
|
||||
|
||||
log_debug(gc, ergo, heap)("Attempt heap shrinking (capacity higher than max desired capacity). "
|
||||
"Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B live: " SIZE_FORMAT "B "
|
||||
"maximum_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
|
||||
capacity_after_gc, used_after_gc, used(), maximum_desired_capacity, MaxHeapFreeRatio);
|
||||
|
||||
shrink(shrink_bytes);
|
||||
if (resize_amount == 0) {
|
||||
return;
|
||||
} else if (should_expand) {
|
||||
expand(resize_amount, _workers);
|
||||
} else {
|
||||
shrink(resize_amount);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1868,7 +1806,7 @@ void G1CollectedHeap::ref_processing_init() {
|
||||
// the regions in the collection set may be dotted around.
|
||||
//
|
||||
// * For the concurrent marking ref processor:
|
||||
// * Reference discovery is enabled at initial marking.
|
||||
// * Reference discovery is enabled at concurrent start.
|
||||
// * Reference discovery is disabled and the discovered
|
||||
// references processed etc during remarking.
|
||||
// * Reference discovery is MT (see below).
|
||||
@ -2002,7 +1940,7 @@ void G1CollectedHeap::allocate_dummy_regions() {
|
||||
|
||||
// _filler_array_max_size is set to humongous object threshold
|
||||
// but temporarily change it to use CollectedHeap::fill_with_object().
|
||||
SizeTFlagSetting fs(_filler_array_max_size, word_size);
|
||||
AutoModifyRestore<size_t> temporarily(_filler_array_max_size, word_size);
|
||||
|
||||
for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
|
||||
// Let's use the existing mechanism for the allocation
|
||||
@ -2109,7 +2047,7 @@ bool G1CollectedHeap::try_collect_concurrently(GCCause::Cause cause,
|
||||
"Non-concurrent cause %s", GCCause::to_string(cause));
|
||||
|
||||
for (uint i = 1; true; ++i) {
|
||||
// Try to schedule an initial-mark evacuation pause that will
|
||||
// Try to schedule concurrent start evacuation pause that will
|
||||
// start a concurrent cycle.
|
||||
LOG_COLLECT_CONCURRENTLY(cause, "attempt %u", i);
|
||||
VM_G1TryInitiateConcMark op(gc_counter,
|
||||
@ -2178,7 +2116,7 @@ bool G1CollectedHeap::try_collect_concurrently(GCCause::Cause cause,
|
||||
//
|
||||
// Note that (1) does not imply (4). If we're still in the mixed
|
||||
// phase of an earlier concurrent collection, the request to make the
|
||||
// collection an initial-mark won't be honored. If we don't check for
|
||||
// collection a concurrent start won't be honored. If we don't check for
|
||||
// both conditions we'll spin doing back-to-back collections.
|
||||
if (op.gc_succeeded() ||
|
||||
op.cycle_already_in_progress() ||
|
||||
@ -2676,7 +2614,7 @@ void G1CollectedHeap::gc_prologue(bool full) {
|
||||
|
||||
// Update common counters.
|
||||
increment_total_collections(full /* full gc */);
|
||||
if (full || collector_state()->in_initial_mark_gc()) {
|
||||
if (full || collector_state()->in_concurrent_start_gc()) {
|
||||
increment_old_marking_cycles_started();
|
||||
}
|
||||
|
||||
@ -2907,7 +2845,7 @@ void G1CollectedHeap::calculate_collection_set(G1EvacuationInfo& evacuation_info
|
||||
}
|
||||
|
||||
G1HeapVerifier::G1VerifyType G1CollectedHeap::young_collection_verify_type() const {
|
||||
if (collector_state()->in_initial_mark_gc()) {
|
||||
if (collector_state()->in_concurrent_start_gc()) {
|
||||
return G1HeapVerifier::G1VerifyConcurrentStart;
|
||||
} else if (collector_state()->in_young_only_phase()) {
|
||||
return G1HeapVerifier::G1VerifyYoungNormal;
|
||||
@ -2939,7 +2877,7 @@ void G1CollectedHeap::verify_after_young_collection(G1HeapVerifier::G1VerifyType
|
||||
}
|
||||
|
||||
void G1CollectedHeap::expand_heap_after_young_collection(){
|
||||
size_t expand_bytes = _heap_sizing_policy->expansion_amount();
|
||||
size_t expand_bytes = _heap_sizing_policy->young_collection_expansion_amount();
|
||||
if (expand_bytes > 0) {
|
||||
// No need for an ergo logging here,
|
||||
// expansion_amount() does this when it returns a value > 0.
|
||||
@ -2952,7 +2890,7 @@ void G1CollectedHeap::expand_heap_after_young_collection(){
|
||||
}
|
||||
|
||||
const char* G1CollectedHeap::young_gc_name() const {
|
||||
if (collector_state()->in_initial_mark_gc()) {
|
||||
if (collector_state()->in_concurrent_start_gc()) {
|
||||
return "Pause Young (Concurrent Start)";
|
||||
} else if (collector_state()->in_young_only_phase()) {
|
||||
if (collector_state()->in_young_gc_before_mixed()) {
|
||||
@ -3005,24 +2943,24 @@ void G1CollectedHeap::do_collection_pause_at_safepoint_helper(double target_paus
|
||||
_verifier->verify_region_sets_optional();
|
||||
_verifier->verify_dirty_young_regions();
|
||||
|
||||
// We should not be doing initial mark unless the conc mark thread is running
|
||||
// We should not be doing concurrent start unless the concurrent mark thread is running
|
||||
if (!_cm_thread->should_terminate()) {
|
||||
// This call will decide whether this pause is an initial-mark
|
||||
// pause. If it is, in_initial_mark_gc() will return true
|
||||
// This call will decide whether this pause is a concurrent start
|
||||
// pause. If it is, in_concurrent_start_gc() will return true
|
||||
// for the duration of this pause.
|
||||
policy()->decide_on_conc_mark_initiation();
|
||||
}
|
||||
|
||||
// We do not allow initial-mark to be piggy-backed on a mixed GC.
|
||||
assert(!collector_state()->in_initial_mark_gc() ||
|
||||
// We do not allow concurrent start to be piggy-backed on a mixed GC.
|
||||
assert(!collector_state()->in_concurrent_start_gc() ||
|
||||
collector_state()->in_young_only_phase(), "sanity");
|
||||
// We also do not allow mixed GCs during marking.
|
||||
assert(!collector_state()->mark_or_rebuild_in_progress() || collector_state()->in_young_only_phase(), "sanity");
|
||||
|
||||
// Record whether this pause is an initial mark. When the current
|
||||
// Record whether this pause is a concurrent start. When the current
|
||||
// thread has completed its logging output and it's safe to signal
|
||||
// the CM thread, the flag's value in the policy has been reset.
|
||||
bool should_start_conc_mark = collector_state()->in_initial_mark_gc();
|
||||
bool should_start_conc_mark = collector_state()->in_concurrent_start_gc();
|
||||
if (should_start_conc_mark) {
|
||||
_cm->gc_tracer_cm()->set_gc_cause(gc_cause());
|
||||
}
|
||||
@ -3106,7 +3044,7 @@ void G1CollectedHeap::do_collection_pause_at_safepoint_helper(double target_paus
|
||||
// We have to do this before we notify the CM threads that
|
||||
// they can start working to make sure that all the
|
||||
// appropriate initialization is done on the CM object.
|
||||
concurrent_mark()->post_initial_mark();
|
||||
concurrent_mark()->post_concurrent_start();
|
||||
// Note that we don't actually trigger the CM thread at
|
||||
// this point. We do that later when we're sure that
|
||||
// the current thread has completed its logging output.
|
||||
@ -3587,7 +3525,7 @@ void G1CollectedHeap::process_discovered_references(G1ParScanThreadStateSet* per
|
||||
}
|
||||
|
||||
void G1CollectedHeap::make_pending_list_reachable() {
|
||||
if (collector_state()->in_initial_mark_gc()) {
|
||||
if (collector_state()->in_concurrent_start_gc()) {
|
||||
oop pll_head = Universe::reference_pending_list();
|
||||
if (pll_head != NULL) {
|
||||
// Any valid worker id is fine here as we are in the VM thread and single-threaded.
|
||||
@ -3776,9 +3714,9 @@ void G1CollectedHeap::pre_evacuate_collection_set(G1EvacuationInfo& evacuation_i
|
||||
DerivedPointerTable::clear();
|
||||
#endif
|
||||
|
||||
// InitialMark needs claim bits to keep track of the marked-through CLDs.
|
||||
if (collector_state()->in_initial_mark_gc()) {
|
||||
concurrent_mark()->pre_initial_mark();
|
||||
// Concurrent start needs claim bits to keep track of the marked-through CLDs.
|
||||
if (collector_state()->in_concurrent_start_gc()) {
|
||||
concurrent_mark()->pre_concurrent_start();
|
||||
|
||||
double start_clear_claimed_marks = os::elapsedTime();
|
||||
|
||||
@ -4846,7 +4784,7 @@ void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
|
||||
_survivor.add_used_bytes(allocated_bytes);
|
||||
}
|
||||
|
||||
bool const during_im = collector_state()->in_initial_mark_gc();
|
||||
bool const during_im = collector_state()->in_concurrent_start_gc();
|
||||
if (during_im && allocated_bytes > 0) {
|
||||
_cm->root_regions()->add(alloc_region->next_top_at_mark_start(), alloc_region->top());
|
||||
}
|
||||
|
||||
@ -533,8 +533,8 @@ private:
|
||||
// Process any reference objects discovered.
|
||||
void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
|
||||
|
||||
// If during an initial mark pause we may install a pending list head which is not
|
||||
// otherwise reachable ensure that it is marked in the bitmap for concurrent marking
|
||||
// If during a concurrent start pause we may install a pending list head which is not
|
||||
// otherwise reachable, ensure that it is marked in the bitmap for concurrent marking
|
||||
// to discover.
|
||||
void make_pending_list_reachable();
|
||||
|
||||
@ -732,7 +732,7 @@ private:
|
||||
|
||||
// Shrink the garbage-first heap by at most the given size (in bytes!).
|
||||
// (Rounds down to a HeapRegion boundary.)
|
||||
void shrink(size_t expand_bytes);
|
||||
void shrink(size_t shrink_bytes);
|
||||
void shrink_helper(size_t expand_bytes);
|
||||
|
||||
#if TASKQUEUE_STATS
|
||||
@ -856,7 +856,7 @@ public:
|
||||
// for the current GC (based upon the type of GC and which
|
||||
// command line flags are set);
|
||||
inline bool evacuation_failure_alot_for_gc_type(bool for_young_gc,
|
||||
bool during_initial_mark,
|
||||
bool during_concurrent_start,
|
||||
bool mark_or_rebuild_in_progress);
|
||||
|
||||
inline void set_evacuation_failure_alot_for_current_gc();
|
||||
@ -916,7 +916,7 @@ public:
|
||||
// making the STW ref processor inactive by disabling discovery.
|
||||
// * Verify that the CM ref processor is still inactive
|
||||
// and no references have been placed on it's discovered
|
||||
// lists (also checked as a precondition during initial marking).
|
||||
// lists (also checked as a precondition during concurrent start).
|
||||
|
||||
// The (stw) reference processor...
|
||||
ReferenceProcessor* _ref_processor_stw;
|
||||
|
||||
@ -193,14 +193,14 @@ void G1CollectedHeap::register_optional_region_with_region_attr(HeapRegion* r) {
|
||||
|
||||
inline bool
|
||||
G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool for_young_gc,
|
||||
bool during_initial_mark,
|
||||
bool during_concurrent_start,
|
||||
bool mark_or_rebuild_in_progress) {
|
||||
bool res = false;
|
||||
if (mark_or_rebuild_in_progress) {
|
||||
res |= G1EvacuationFailureALotDuringConcMark;
|
||||
}
|
||||
if (during_initial_mark) {
|
||||
res |= G1EvacuationFailureALotDuringInitialMark;
|
||||
if (during_concurrent_start) {
|
||||
res |= G1EvacuationFailureALotDuringConcurrentStart;
|
||||
}
|
||||
if (for_young_gc) {
|
||||
res |= G1EvacuationFailureALotDuringYoungGC;
|
||||
@ -227,12 +227,12 @@ G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() {
|
||||
|
||||
// Now check if G1EvacuationFailureALot is enabled for the current GC type.
|
||||
const bool in_young_only_phase = collector_state()->in_young_only_phase();
|
||||
const bool in_initial_mark_gc = collector_state()->in_initial_mark_gc();
|
||||
const bool in_concurrent_start_gc = collector_state()->in_concurrent_start_gc();
|
||||
const bool mark_or_rebuild_in_progress = collector_state()->mark_or_rebuild_in_progress();
|
||||
|
||||
_evacuation_failure_alot_for_current_gc &=
|
||||
evacuation_failure_alot_for_gc_type(in_young_only_phase,
|
||||
in_initial_mark_gc,
|
||||
in_concurrent_start_gc,
|
||||
mark_or_rebuild_in_progress);
|
||||
}
|
||||
}
|
||||
|
||||
@ -40,28 +40,28 @@ class G1CollectorState {
|
||||
|
||||
// If _initiate_conc_mark_if_possible is set at the beginning of a
|
||||
// pause, it is a suggestion that the pause should start a marking
|
||||
// cycle by doing the initial-mark work. However, it is possible
|
||||
// cycle by doing the concurrent start work. However, it is possible
|
||||
// that the concurrent marking thread is still finishing up the
|
||||
// previous marking cycle (e.g., clearing the next marking
|
||||
// bitmap). If that is the case we cannot start a new cycle and
|
||||
// we'll have to wait for the concurrent marking thread to finish
|
||||
// what it is doing. In this case we will postpone the marking cycle
|
||||
// initiation decision for the next pause. When we eventually decide
|
||||
// to start a cycle, we will set _in_initial_mark_gc which
|
||||
// will stay true until the end of the initial-mark pause doing the
|
||||
// initial-mark work.
|
||||
volatile bool _in_initial_mark_gc;
|
||||
// to start a cycle, we will set _in_concurrent_start_gc which
|
||||
// will stay true until the end of the concurrent start pause doing the
|
||||
// concurrent start work.
|
||||
volatile bool _in_concurrent_start_gc;
|
||||
|
||||
// At the end of a pause we check the heap occupancy and we decide
|
||||
// whether we will start a marking cycle during the next pause. If
|
||||
// we decide that we want to do that, set this parameter. This parameter will
|
||||
// stay set until the beginning of a subsequent pause (not necessarily
|
||||
// the next one) when we decide that we will indeed start a marking cycle and
|
||||
// do the initial-mark work.
|
||||
// do the concurrent start phase work.
|
||||
volatile bool _initiate_conc_mark_if_possible;
|
||||
|
||||
// Marking or rebuilding remembered set work is in progress. Set from the end
|
||||
// of the initial mark pause to the end of the Cleanup pause.
|
||||
// of the concurrent start pause to the end of the Cleanup pause.
|
||||
bool _mark_or_rebuild_in_progress;
|
||||
|
||||
// The next bitmap is currently being cleared or about to be cleared. TAMS and bitmap
|
||||
@ -76,7 +76,7 @@ public:
|
||||
_in_young_only_phase(true),
|
||||
_in_young_gc_before_mixed(false),
|
||||
|
||||
_in_initial_mark_gc(false),
|
||||
_in_concurrent_start_gc(false),
|
||||
_initiate_conc_mark_if_possible(false),
|
||||
|
||||
_mark_or_rebuild_in_progress(false),
|
||||
@ -88,7 +88,7 @@ public:
|
||||
|
||||
// Pause setters
|
||||
void set_in_young_gc_before_mixed(bool v) { _in_young_gc_before_mixed = v; }
|
||||
void set_in_initial_mark_gc(bool v) { _in_initial_mark_gc = v; }
|
||||
void set_in_concurrent_start_gc(bool v) { _in_concurrent_start_gc = v; }
|
||||
void set_in_full_gc(bool v) { _in_full_gc = v; }
|
||||
|
||||
void set_initiate_conc_mark_if_possible(bool v) { _initiate_conc_mark_if_possible = v; }
|
||||
@ -103,7 +103,7 @@ public:
|
||||
// Specific pauses
|
||||
bool in_young_gc_before_mixed() const { return _in_young_gc_before_mixed; }
|
||||
bool in_full_gc() const { return _in_full_gc; }
|
||||
bool in_initial_mark_gc() const { return _in_initial_mark_gc; }
|
||||
bool in_concurrent_start_gc() const { return _in_concurrent_start_gc; }
|
||||
|
||||
bool initiate_conc_mark_if_possible() const { return _initiate_conc_mark_if_possible; }
|
||||
|
||||
@ -111,8 +111,8 @@ public:
|
||||
bool clearing_next_bitmap() const { return _clearing_next_bitmap; }
|
||||
|
||||
G1YCType yc_type() const {
|
||||
if (in_initial_mark_gc()) {
|
||||
return InitialMark;
|
||||
if (in_concurrent_start_gc()) {
|
||||
return ConcurrentStart;
|
||||
} else if (mark_or_rebuild_in_progress()) {
|
||||
return DuringMarkOrRebuild;
|
||||
} else if (in_young_only_phase()) {
|
||||
|
||||
@ -683,7 +683,7 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
void G1ConcurrentMark::pre_initial_mark() {
|
||||
void G1ConcurrentMark::pre_concurrent_start() {
|
||||
assert_at_safepoint_on_vm_thread();
|
||||
|
||||
// Reset marking state.
|
||||
@ -697,7 +697,7 @@ void G1ConcurrentMark::pre_initial_mark() {
|
||||
}
|
||||
|
||||
|
||||
void G1ConcurrentMark::post_initial_mark() {
|
||||
void G1ConcurrentMark::post_concurrent_start() {
|
||||
// Start Concurrent Marking weak-reference discovery.
|
||||
ReferenceProcessor* rp = _g1h->ref_processor_cm();
|
||||
// enable ("weak") refs discovery
|
||||
@ -714,7 +714,7 @@ void G1ConcurrentMark::post_initial_mark() {
|
||||
|
||||
// update_g1_committed() will be called at the end of an evac pause
|
||||
// when marking is on. So, it's also called at the end of the
|
||||
// initial-mark pause to update the heap end, if the heap expands
|
||||
// concurrent start pause to update the heap end, if the heap expands
|
||||
// during it. No need to call it here.
|
||||
}
|
||||
|
||||
@ -2411,7 +2411,7 @@ bool G1ConcurrentMark::try_stealing(uint worker_id, G1TaskQueueEntry& task_entry
|
||||
|
||||
(1) Marking Bitmap. If there are gray objects that appear only
|
||||
on the bitmap (this happens either when dealing with an overflow
|
||||
or when the initial marking phase has simply marked the roots
|
||||
or when the concurrent start pause has simply marked the roots
|
||||
and didn't push them on the stack), then tasks claim heap
|
||||
regions whose bitmap they then scan to find gray objects. A
|
||||
global finger indicates where the end of the last claimed region
|
||||
|
||||
@ -220,8 +220,8 @@ private:
|
||||
// roots wrt to the marking. They must be scanned before marking to maintain the
|
||||
// SATB invariant.
|
||||
// Typically they contain the areas from nTAMS to top of the regions.
|
||||
// We could scan and mark through these objects during the initial-mark pause, but for
|
||||
// pause time reasons we move this work to the concurrent phase.
|
||||
// We could scan and mark through these objects during the concurrent start pause,
|
||||
// but for pause time reasons we move this work to the concurrent phase.
|
||||
// We need to complete this procedure before the next GC because it might determine
|
||||
// that some of these "root objects" are dead, potentially dropping some required
|
||||
// references.
|
||||
@ -384,7 +384,7 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
|
||||
void clear_statistics(HeapRegion* r);
|
||||
|
||||
// Resets the global marking data structures, as well as the
|
||||
// task local ones; should be called during initial mark.
|
||||
// task local ones; should be called during concurrent start.
|
||||
void reset();
|
||||
|
||||
// Resets all the marking data structures. Called when we have to restart
|
||||
@ -435,7 +435,7 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
|
||||
|
||||
// Returns the task with the given id
|
||||
G1CMTask* task(uint id) {
|
||||
// During initial mark we use the parallel gc threads to do some work, so
|
||||
// During concurrent start we use the parallel gc threads to do some work, so
|
||||
// we can only compare against _max_num_tasks.
|
||||
assert(id < _max_num_tasks, "Task id %u not within bounds up to %u", id, _max_num_tasks);
|
||||
return _tasks[id];
|
||||
@ -541,9 +541,9 @@ public:
|
||||
void clear_prev_bitmap(WorkGang* workers);
|
||||
|
||||
// These two methods do the work that needs to be done at the start and end of the
|
||||
// initial mark pause.
|
||||
void pre_initial_mark();
|
||||
void post_initial_mark();
|
||||
// concurrent start pause.
|
||||
void pre_concurrent_start();
|
||||
void post_concurrent_start();
|
||||
|
||||
// Scan all the root regions and mark everything reachable from
|
||||
// them.
|
||||
|
||||
@ -76,7 +76,7 @@ class G1ConcurrentMarkThread: public ConcurrentGCThread {
|
||||
bool in_progress() { return _state == InProgress; }
|
||||
|
||||
// Returns true from the moment a marking cycle is
|
||||
// initiated (during the initial-mark pause when started() is set)
|
||||
// initiated (during the concurrent start pause when started() is set)
|
||||
// to the moment when the cycle completes (just after the next
|
||||
// marking bitmap has been cleared and in_progress() is
|
||||
// cleared). While during_cycle() is true we will not start another cycle
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -22,33 +22,33 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_G1_G1INITIALMARKTOMIXEDTIMETRACKER_HPP
|
||||
#define SHARE_GC_G1_G1INITIALMARKTOMIXEDTIMETRACKER_HPP
|
||||
#ifndef SHARE_GC_G1_G1CONCURRENTSTARTTOMIXEDTIMETRACKER_HPP
|
||||
#define SHARE_GC_G1_G1CONCURRENTSTARTTOMIXEDTIMETRACKER_HPP
|
||||
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
// Used to track time from the end of initial mark to the first mixed GC.
|
||||
// After calling the initial mark/mixed gc notifications, the result can be
|
||||
// Used to track time from the end of concurrent start to the first mixed GC.
|
||||
// After calling the concurrent start/mixed gc notifications, the result can be
|
||||
// obtained in last_marking_time() once, after which the tracking resets.
|
||||
// Any pauses recorded by add_pause() will be subtracted from that results.
|
||||
class G1InitialMarkToMixedTimeTracker {
|
||||
class G1ConcurrentStartToMixedTimeTracker {
|
||||
private:
|
||||
bool _active;
|
||||
double _initial_mark_end_time;
|
||||
double _concurrent_start_end_time;
|
||||
double _mixed_start_time;
|
||||
double _total_pause_time;
|
||||
|
||||
double wall_time() const {
|
||||
return _mixed_start_time - _initial_mark_end_time;
|
||||
return _mixed_start_time - _concurrent_start_end_time;
|
||||
}
|
||||
public:
|
||||
G1InitialMarkToMixedTimeTracker() { reset(); }
|
||||
G1ConcurrentStartToMixedTimeTracker() { reset(); }
|
||||
|
||||
// Record initial mark pause end, starting the time tracking.
|
||||
void record_initial_mark_end(double end_time) {
|
||||
assert(!_active, "Initial mark out of order.");
|
||||
_initial_mark_end_time = end_time;
|
||||
// Record concurrent start pause end, starting the time tracking.
|
||||
void record_concurrent_start_end(double end_time) {
|
||||
assert(!_active, "Concurrent start out of order.");
|
||||
_concurrent_start_end_time = end_time;
|
||||
_active = true;
|
||||
}
|
||||
|
||||
@ -62,7 +62,7 @@ public:
|
||||
|
||||
double last_marking_time() {
|
||||
assert(has_result(), "Do not have all measurements yet.");
|
||||
double result = (_mixed_start_time - _initial_mark_end_time) - _total_pause_time;
|
||||
double result = (_mixed_start_time - _concurrent_start_end_time) - _total_pause_time;
|
||||
reset();
|
||||
return result;
|
||||
}
|
||||
@ -70,7 +70,7 @@ public:
|
||||
void reset() {
|
||||
_active = false;
|
||||
_total_pause_time = 0.0;
|
||||
_initial_mark_end_time = -1.0;
|
||||
_concurrent_start_end_time = -1.0;
|
||||
_mixed_start_time = -1.0;
|
||||
}
|
||||
|
||||
@ -81,7 +81,7 @@ public:
|
||||
}
|
||||
|
||||
// Returns whether we have a result that can be retrieved.
|
||||
bool has_result() const { return _mixed_start_time > 0.0 && _initial_mark_end_time > 0.0; }
|
||||
bool has_result() const { return _mixed_start_time > 0.0 && _concurrent_start_end_time > 0.0; }
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_G1_G1INITIALMARKTOMIXEDTIMETRACKER_HPP
|
||||
#endif // SHARE_GC_G1_G1CONCURRENTSTARTTOMIXEDTIMETRACKER_HPP
|
||||
@ -79,21 +79,21 @@ class RemoveSelfForwardPtrObjClosure: public ObjectClosure {
|
||||
HeapRegion* _hr;
|
||||
size_t _marked_bytes;
|
||||
UpdateLogBuffersDeferred* _log_buffer_cl;
|
||||
bool _during_initial_mark;
|
||||
bool _during_concurrent_start;
|
||||
uint _worker_id;
|
||||
HeapWord* _last_forwarded_object_end;
|
||||
|
||||
public:
|
||||
RemoveSelfForwardPtrObjClosure(HeapRegion* hr,
|
||||
UpdateLogBuffersDeferred* log_buffer_cl,
|
||||
bool during_initial_mark,
|
||||
bool during_concurrent_start,
|
||||
uint worker_id) :
|
||||
_g1h(G1CollectedHeap::heap()),
|
||||
_cm(_g1h->concurrent_mark()),
|
||||
_hr(hr),
|
||||
_marked_bytes(0),
|
||||
_log_buffer_cl(log_buffer_cl),
|
||||
_during_initial_mark(during_initial_mark),
|
||||
_during_concurrent_start(during_concurrent_start),
|
||||
_worker_id(worker_id),
|
||||
_last_forwarded_object_end(hr->bottom()) { }
|
||||
|
||||
@ -119,14 +119,14 @@ public:
|
||||
if (!_cm->is_marked_in_prev_bitmap(obj)) {
|
||||
_cm->mark_in_prev_bitmap(obj);
|
||||
}
|
||||
if (_during_initial_mark) {
|
||||
if (_during_concurrent_start) {
|
||||
// For the next marking info we'll only mark the
|
||||
// self-forwarded objects explicitly if we are during
|
||||
// initial-mark (since, normally, we only mark objects pointed
|
||||
// concurrent start (since, normally, we only mark objects pointed
|
||||
// to by roots if we succeed in copying them). By marking all
|
||||
// self-forwarded objects we ensure that we mark any that are
|
||||
// still pointed to be roots. During concurrent marking, and
|
||||
// after initial-mark, we don't need to mark any objects
|
||||
// after concurrent start, we don't need to mark any objects
|
||||
// explicitly and all objects in the CSet are considered
|
||||
// (implicitly) live. So, we won't mark them explicitly and
|
||||
// we'll leave them over NTAMS.
|
||||
@ -211,10 +211,10 @@ public:
|
||||
}
|
||||
|
||||
size_t remove_self_forward_ptr_by_walking_hr(HeapRegion* hr,
|
||||
bool during_initial_mark) {
|
||||
bool during_concurrent_start) {
|
||||
RemoveSelfForwardPtrObjClosure rspc(hr,
|
||||
&_log_buffer_cl,
|
||||
during_initial_mark,
|
||||
during_concurrent_start,
|
||||
_worker_id);
|
||||
hr->object_iterate(&rspc);
|
||||
// Need to zap the remainder area of the processed region.
|
||||
@ -230,16 +230,16 @@ public:
|
||||
if (hr->evacuation_failed()) {
|
||||
hr->clear_index_in_opt_cset();
|
||||
|
||||
bool during_initial_mark = _g1h->collector_state()->in_initial_mark_gc();
|
||||
bool during_conc_mark = _g1h->collector_state()->mark_or_rebuild_in_progress();
|
||||
bool during_concurrent_start = _g1h->collector_state()->in_concurrent_start_gc();
|
||||
bool during_concurrent_mark = _g1h->collector_state()->mark_or_rebuild_in_progress();
|
||||
|
||||
hr->note_self_forwarding_removal_start(during_initial_mark,
|
||||
during_conc_mark);
|
||||
hr->note_self_forwarding_removal_start(during_concurrent_start,
|
||||
during_concurrent_mark);
|
||||
_g1h->verifier()->check_bitmaps("Self-Forwarding Ptr Removal", hr);
|
||||
|
||||
hr->reset_bot();
|
||||
|
||||
size_t live_bytes = remove_self_forward_ptr_by_walking_hr(hr, during_initial_mark);
|
||||
size_t live_bytes = remove_self_forward_ptr_by_walking_hr(hr, during_concurrent_start);
|
||||
|
||||
hr->rem_set()->clean_strong_code_roots(hr);
|
||||
hr->rem_set()->clear_locked(true);
|
||||
|
||||
@ -60,9 +60,7 @@ G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) :
|
||||
_gc_par_phases[ThreadRoots] = new WorkerDataArray<double>("ThreadRoots", "Thread Roots (ms):", max_gc_threads);
|
||||
_gc_par_phases[UniverseRoots] = new WorkerDataArray<double>("UniverseRoots", "Universe Roots (ms):", max_gc_threads);
|
||||
_gc_par_phases[ObjectSynchronizerRoots] = new WorkerDataArray<double>("ObjectSynchronizerRoots", "ObjectSynchronizer Roots (ms):", max_gc_threads);
|
||||
_gc_par_phases[ManagementRoots] = new WorkerDataArray<double>("ManagementRoots", "Management Roots (ms):", max_gc_threads);
|
||||
_gc_par_phases[CLDGRoots] = new WorkerDataArray<double>("CLDGRoots", "CLDG Roots (ms):", max_gc_threads);
|
||||
_gc_par_phases[JVMTIRoots] = new WorkerDataArray<double>("JVMTIRoots", "JVMTI Roots (ms):", max_gc_threads);
|
||||
AOT_ONLY(_gc_par_phases[AOTCodeRoots] = new WorkerDataArray<double>("AOTCodeRoots", "AOT Root Scan (ms):", max_gc_threads);)
|
||||
_gc_par_phases[CMRefRoots] = new WorkerDataArray<double>("CMRefRoots", "CM RefProcessor Roots (ms):", max_gc_threads);
|
||||
|
||||
|
||||
@ -50,9 +50,7 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
ThreadRoots,
|
||||
UniverseRoots,
|
||||
ObjectSynchronizerRoots,
|
||||
ManagementRoots,
|
||||
CLDGRoots,
|
||||
JVMTIRoots,
|
||||
AOT_ONLY(AOTCodeRoots COMMA)
|
||||
CMRefRoots,
|
||||
// For every OopStorage there will be one element in the enum, starting with
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -82,7 +82,7 @@ static void log_expansion(double short_term_pause_time_ratio,
|
||||
resize_bytes);
|
||||
}
|
||||
|
||||
size_t G1HeapSizingPolicy::expansion_amount() {
|
||||
size_t G1HeapSizingPolicy::young_collection_expansion_amount() {
|
||||
assert(GCTimeRatio > 0, "must be");
|
||||
|
||||
double long_term_pause_time_ratio = _analytics->long_term_pause_time_ratio();
|
||||
@ -195,3 +195,74 @@ size_t G1HeapSizingPolicy::expansion_amount() {
|
||||
|
||||
return expand_bytes;
|
||||
}
|
||||
|
||||
static size_t target_heap_capacity(size_t used_bytes, uintx free_ratio) {
|
||||
const double desired_free_percentage = (double) free_ratio / 100.0;
|
||||
const double desired_used_percentage = 1.0 - desired_free_percentage;
|
||||
|
||||
// We have to be careful here as these two calculations can overflow
|
||||
// 32-bit size_t's.
|
||||
double used_bytes_d = (double) used_bytes;
|
||||
double desired_capacity_d = used_bytes_d / desired_used_percentage;
|
||||
// Let's make sure that they are both under the max heap size, which
|
||||
// by default will make it fit into a size_t.
|
||||
double desired_capacity_upper_bound = (double) MaxHeapSize;
|
||||
desired_capacity_d = MIN2(desired_capacity_d, desired_capacity_upper_bound);
|
||||
// We can now safely turn it into size_t's.
|
||||
return (size_t) desired_capacity_d;
|
||||
}
|
||||
|
||||
size_t G1HeapSizingPolicy::full_collection_resize_amount(bool& expand) {
|
||||
// Capacity, free and used after the GC counted as full regions to
|
||||
// include the waste in the following calculations.
|
||||
const size_t capacity_after_gc = _g1h->capacity();
|
||||
const size_t used_after_gc = capacity_after_gc - _g1h->unused_committed_regions_in_bytes();
|
||||
|
||||
size_t minimum_desired_capacity = target_heap_capacity(used_after_gc, MinHeapFreeRatio);
|
||||
size_t maximum_desired_capacity = target_heap_capacity(used_after_gc, MaxHeapFreeRatio);
|
||||
|
||||
// This assert only makes sense here, before we adjust them
|
||||
// with respect to the min and max heap size.
|
||||
assert(minimum_desired_capacity <= maximum_desired_capacity,
|
||||
"minimum_desired_capacity = " SIZE_FORMAT ", "
|
||||
"maximum_desired_capacity = " SIZE_FORMAT,
|
||||
minimum_desired_capacity, maximum_desired_capacity);
|
||||
|
||||
// Should not be greater than the heap max size. No need to adjust
|
||||
// it with respect to the heap min size as it's a lower bound (i.e.,
|
||||
// we'll try to make the capacity larger than it, not smaller).
|
||||
minimum_desired_capacity = MIN2(minimum_desired_capacity, MaxHeapSize);
|
||||
// Should not be less than the heap min size. No need to adjust it
|
||||
// with respect to the heap max size as it's an upper bound (i.e.,
|
||||
// we'll try to make the capacity smaller than it, not greater).
|
||||
maximum_desired_capacity = MAX2(maximum_desired_capacity, MinHeapSize);
|
||||
|
||||
// Don't expand unless it's significant; prefer expansion to shrinking.
|
||||
if (capacity_after_gc < minimum_desired_capacity) {
|
||||
size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
|
||||
|
||||
log_debug(gc, ergo, heap)("Attempt heap expansion (capacity lower than min desired capacity). "
|
||||
"Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B live: " SIZE_FORMAT "B "
|
||||
"min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
|
||||
capacity_after_gc, used_after_gc, _g1h->used(), minimum_desired_capacity, MinHeapFreeRatio);
|
||||
|
||||
expand = true;
|
||||
return expand_bytes;
|
||||
// No expansion, now see if we want to shrink
|
||||
} else if (capacity_after_gc > maximum_desired_capacity) {
|
||||
// Capacity too large, compute shrinking size
|
||||
size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
|
||||
|
||||
log_debug(gc, ergo, heap)("Attempt heap shrinking (capacity higher than max desired capacity). "
|
||||
"Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B live: " SIZE_FORMAT "B "
|
||||
"maximum_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
|
||||
capacity_after_gc, used_after_gc, _g1h->used(), maximum_desired_capacity, MaxHeapFreeRatio);
|
||||
|
||||
expand = false;
|
||||
return shrink_bytes;
|
||||
}
|
||||
|
||||
expand = true; // Does not matter.
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -54,8 +54,11 @@ public:
|
||||
|
||||
// If an expansion would be appropriate, because recent GC overhead had
|
||||
// exceeded the desired limit, return an amount to expand by.
|
||||
size_t expansion_amount();
|
||||
size_t young_collection_expansion_amount();
|
||||
|
||||
// Returns the amount of bytes to resize the heap; if expand is set, the heap
|
||||
// should by expanded by that amount, shrunk otherwise.
|
||||
size_t full_collection_resize_amount(bool& expand);
|
||||
// Clear ratio tracking data used by expansion_amount().
|
||||
void clear_ratio_check_data();
|
||||
|
||||
|
||||
@ -51,7 +51,7 @@ class G1IHOPControl : public CHeapObj<mtGC> {
|
||||
// occupancy will be updated at the first heap expansion.
|
||||
G1IHOPControl(double initial_ihop_percent);
|
||||
|
||||
// Most recent time from the end of the initial mark to the start of the first
|
||||
// Most recent time from the end of the concurrent start to the start of the first
|
||||
// mixed gc.
|
||||
virtual double last_marking_length_s() const = 0;
|
||||
public:
|
||||
@ -71,7 +71,7 @@ class G1IHOPControl : public CHeapObj<mtGC> {
|
||||
// difference between old gen size and total heap size at the start of reclamation,
|
||||
// and space required for that reclamation.
|
||||
virtual void update_allocation_info(double allocation_time_s, size_t allocated_bytes, size_t additional_buffer_size);
|
||||
// Update the time spent in the mutator beginning from the end of initial mark to
|
||||
// Update the time spent in the mutator beginning from the end of concurrent start to
|
||||
// the first mixed gc.
|
||||
virtual void update_marking_length(double marking_length_s) = 0;
|
||||
|
||||
@ -82,7 +82,7 @@ class G1IHOPControl : public CHeapObj<mtGC> {
|
||||
// The returned concurrent mark starting occupancy threshold is a fixed value
|
||||
// relative to the maximum heap size.
|
||||
class G1StaticIHOPControl : public G1IHOPControl {
|
||||
// Most recent mutator time between the end of initial mark to the start of the
|
||||
// Most recent mutator time between the end of concurrent mark to the start of the
|
||||
// first mixed gc.
|
||||
double _last_marking_length_s;
|
||||
protected:
|
||||
@ -104,7 +104,7 @@ class G1StaticIHOPControl : public G1IHOPControl {
|
||||
// This algorithm tries to return a concurrent mark starting occupancy value that
|
||||
// makes sure that during marking the given target occupancy is never exceeded,
|
||||
// based on predictions of current allocation rate and time periods between
|
||||
// initial mark and the first mixed gc.
|
||||
// concurrent start and the first mixed gc.
|
||||
class G1AdaptiveIHOPControl : public G1IHOPControl {
|
||||
size_t _heap_reserve_percent; // Percentage of maximum heap capacity we should avoid to touch
|
||||
size_t _heap_waste_percent; // Percentage of free heap that should be considered as waste.
|
||||
|
||||
@ -251,7 +251,7 @@ void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
|
||||
}
|
||||
|
||||
// The object is not in collection set. If we're a root scanning
|
||||
// closure during an initial mark pause then attempt to mark the object.
|
||||
// closure during a concurrent start pause then attempt to mark the object.
|
||||
if (do_mark_object == G1MarkFromRoot) {
|
||||
mark_object(obj);
|
||||
}
|
||||
|
||||
@ -74,10 +74,11 @@ G1Policy::G1Policy(STWGCTimer* gc_timer) :
|
||||
_rs_length_prediction(0),
|
||||
_pending_cards_at_gc_start(0),
|
||||
_old_gen_alloc_tracker(),
|
||||
_initial_mark_to_mixed(),
|
||||
_concurrent_start_to_mixed(),
|
||||
_collection_set(NULL),
|
||||
_g1h(NULL),
|
||||
_phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)),
|
||||
_phase_times_timer(gc_timer),
|
||||
_phase_times(NULL),
|
||||
_mark_remark_start_sec(0),
|
||||
_mark_cleanup_start_sec(0),
|
||||
_tenuring_threshold(MaxTenuringThreshold),
|
||||
@ -401,6 +402,15 @@ double G1Policy::predict_survivor_regions_evac_time() const {
|
||||
return survivor_regions_evac_time;
|
||||
}
|
||||
|
||||
G1GCPhaseTimes* G1Policy::phase_times() const {
|
||||
// Lazy allocation because it must follow initialization of all the
|
||||
// OopStorage objects by various other subsystems.
|
||||
if (_phase_times == NULL) {
|
||||
_phase_times = new G1GCPhaseTimes(_phase_times_timer, ParallelGCThreads);
|
||||
}
|
||||
return _phase_times;
|
||||
}
|
||||
|
||||
void G1Policy::revise_young_list_target_length_if_necessary(size_t rs_length) {
|
||||
guarantee(use_adaptive_young_list_length(), "should not call this otherwise" );
|
||||
|
||||
@ -448,7 +458,7 @@ void G1Policy::record_full_collection_end() {
|
||||
collector_state()->set_in_young_only_phase(true);
|
||||
collector_state()->set_in_young_gc_before_mixed(false);
|
||||
collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
|
||||
collector_state()->set_in_initial_mark_gc(false);
|
||||
collector_state()->set_in_concurrent_start_gc(false);
|
||||
collector_state()->set_mark_or_rebuild_in_progress(false);
|
||||
collector_state()->set_clearing_next_bitmap(false);
|
||||
|
||||
@ -544,7 +554,7 @@ void G1Policy::record_collection_pause_start(double start_time_sec) {
|
||||
|
||||
void G1Policy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) {
|
||||
assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
|
||||
collector_state()->set_in_initial_mark_gc(false);
|
||||
collector_state()->set_in_concurrent_start_gc(false);
|
||||
}
|
||||
|
||||
void G1Policy::record_concurrent_mark_remark_start() {
|
||||
@ -632,17 +642,15 @@ void G1Policy::record_collection_pause_end(double pause_time_ms) {
|
||||
|
||||
double end_time_sec = os::elapsedTime();
|
||||
|
||||
bool this_pause_included_initial_mark = false;
|
||||
bool this_pause_was_young_only = collector_state()->in_young_only_phase();
|
||||
PauseKind this_pause = young_gc_pause_kind();
|
||||
|
||||
bool update_stats = !_g1h->evacuation_failed();
|
||||
|
||||
record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
|
||||
record_pause(this_pause, end_time_sec - pause_time_ms / 1000.0, end_time_sec);
|
||||
|
||||
_collection_pause_end_millis = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
|
||||
|
||||
this_pause_included_initial_mark = collector_state()->in_initial_mark_gc();
|
||||
if (this_pause_included_initial_mark) {
|
||||
if (is_concurrent_start_pause(this_pause)) {
|
||||
record_concurrent_mark_init_end(0.0);
|
||||
} else {
|
||||
maybe_start_marking();
|
||||
@ -673,14 +681,15 @@ void G1Policy::record_collection_pause_end(double pause_time_ms) {
|
||||
_analytics->update_recent_gc_times(end_time_sec, pause_time_ms);
|
||||
}
|
||||
|
||||
if (collector_state()->in_young_gc_before_mixed()) {
|
||||
assert(!this_pause_included_initial_mark, "The young GC before mixed is not allowed to be an initial mark GC");
|
||||
if (is_last_young_pause(this_pause)) {
|
||||
assert(!is_concurrent_start_pause(this_pause),
|
||||
"The young GC before mixed is not allowed to be concurrent start GC");
|
||||
// This has been the young GC before we start doing mixed GCs. We already
|
||||
// decided to start mixed GCs much earlier, so there is nothing to do except
|
||||
// advancing the state.
|
||||
collector_state()->set_in_young_only_phase(false);
|
||||
collector_state()->set_in_young_gc_before_mixed(false);
|
||||
} else if (!this_pause_was_young_only) {
|
||||
} else if (is_mixed_pause(this_pause)) {
|
||||
// This is a mixed GC. Here we decide whether to continue doing more
|
||||
// mixed GCs or not.
|
||||
if (!next_gc_should_be_mixed("continue mixed GCs",
|
||||
@ -690,6 +699,8 @@ void G1Policy::record_collection_pause_end(double pause_time_ms) {
|
||||
clear_collection_set_candidates();
|
||||
maybe_start_marking();
|
||||
}
|
||||
} else {
|
||||
assert(is_young_only_pause(this_pause), "must be");
|
||||
}
|
||||
|
||||
_eden_surv_rate_group->start_adding_regions();
|
||||
@ -713,7 +724,8 @@ void G1Policy::record_collection_pause_end(double pause_time_ms) {
|
||||
average_time_ms(G1GCPhaseTimes::MergeHCC) +
|
||||
average_time_ms(G1GCPhaseTimes::MergeLB) +
|
||||
average_time_ms(G1GCPhaseTimes::OptMergeRS);
|
||||
_analytics->report_cost_per_card_merge_ms(avg_time_merge_cards / total_cards_merged, this_pause_was_young_only);
|
||||
_analytics->report_cost_per_card_merge_ms(avg_time_merge_cards / total_cards_merged,
|
||||
is_young_only_pause(this_pause));
|
||||
}
|
||||
|
||||
// Update prediction for card scan
|
||||
@ -724,7 +736,8 @@ void G1Policy::record_collection_pause_end(double pause_time_ms) {
|
||||
double avg_time_dirty_card_scan = average_time_ms(G1GCPhaseTimes::ScanHR) +
|
||||
average_time_ms(G1GCPhaseTimes::OptScanHR);
|
||||
|
||||
_analytics->report_cost_per_card_scan_ms(avg_time_dirty_card_scan / total_cards_scanned, this_pause_was_young_only);
|
||||
_analytics->report_cost_per_card_scan_ms(avg_time_dirty_card_scan / total_cards_scanned,
|
||||
is_young_only_pause(this_pause));
|
||||
}
|
||||
|
||||
// Update prediction for the ratio between cards from the remembered
|
||||
@ -738,7 +751,8 @@ void G1Policy::record_collection_pause_end(double pause_time_ms) {
|
||||
if (total_cards_scanned > 0) {
|
||||
merge_to_scan_ratio = (double) from_rs_length_cards / total_cards_scanned;
|
||||
}
|
||||
_analytics->report_card_merge_to_scan_ratio(merge_to_scan_ratio, this_pause_was_young_only);
|
||||
_analytics->report_card_merge_to_scan_ratio(merge_to_scan_ratio,
|
||||
is_young_only_pause(this_pause));
|
||||
|
||||
const size_t recorded_rs_length = _collection_set->recorded_rs_length();
|
||||
const size_t rs_length_diff = _rs_length > recorded_rs_length ? _rs_length - recorded_rs_length : 0;
|
||||
@ -768,15 +782,15 @@ void G1Policy::record_collection_pause_end(double pause_time_ms) {
|
||||
// these are is wildly different to during young only gc and mess up young gen sizing right
|
||||
// after the mixed gc phase.
|
||||
// During mixed gc we do not use them for young gen sizing.
|
||||
if (this_pause_was_young_only) {
|
||||
if (is_young_only_pause(this_pause)) {
|
||||
_analytics->report_pending_cards((double) _pending_cards_at_gc_start);
|
||||
_analytics->report_rs_length((double) _rs_length);
|
||||
}
|
||||
}
|
||||
|
||||
assert(!(this_pause_included_initial_mark && collector_state()->mark_or_rebuild_in_progress()),
|
||||
"If the last pause has been an initial mark, we should not have been in the marking window");
|
||||
if (this_pause_included_initial_mark) {
|
||||
assert(!(is_concurrent_start_pause(this_pause) && collector_state()->mark_or_rebuild_in_progress()),
|
||||
"If the last pause has been concurrent start, we should not have been in the marking window");
|
||||
if (is_concurrent_start_pause(this_pause)) {
|
||||
collector_state()->set_mark_or_rebuild_in_progress(true);
|
||||
}
|
||||
|
||||
@ -797,7 +811,7 @@ void G1Policy::record_collection_pause_end(double pause_time_ms) {
|
||||
update_ihop_prediction(_old_gen_alloc_tracker.last_cycle_duration(),
|
||||
_old_gen_alloc_tracker.last_cycle_old_bytes(),
|
||||
last_unrestrained_young_length * HeapRegion::GrainBytes,
|
||||
this_pause_was_young_only);
|
||||
is_young_only_pause(this_pause));
|
||||
|
||||
_ihop_control->send_trace_event(_g1h->gc_tracer_stw());
|
||||
} else {
|
||||
@ -807,7 +821,7 @@ void G1Policy::record_collection_pause_end(double pause_time_ms) {
|
||||
// for completing the marking, i.e. are faster than expected.
|
||||
// This skews the predicted marking length towards smaller values which might cause
|
||||
// the mark start being too late.
|
||||
_initial_mark_to_mixed.reset();
|
||||
_concurrent_start_to_mixed.reset();
|
||||
}
|
||||
|
||||
// Note that _mmu_tracker->max_gc_time() returns the time in seconds.
|
||||
@ -858,10 +872,10 @@ void G1Policy::update_ihop_prediction(double mutator_time_s,
|
||||
bool report = false;
|
||||
|
||||
double marking_to_mixed_time = -1.0;
|
||||
if (!this_gc_was_young_only && _initial_mark_to_mixed.has_result()) {
|
||||
marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time();
|
||||
if (!this_gc_was_young_only && _concurrent_start_to_mixed.has_result()) {
|
||||
marking_to_mixed_time = _concurrent_start_to_mixed.last_marking_time();
|
||||
assert(marking_to_mixed_time > 0.0,
|
||||
"Initial mark to mixed time must be larger than zero but is %.3f",
|
||||
"Concurrent start to mixed time must be larger than zero but is %.3f",
|
||||
marking_to_mixed_time);
|
||||
if (marking_to_mixed_time > min_valid_time) {
|
||||
_ihop_control->update_marking_length(marking_to_mixed_time);
|
||||
@ -1017,35 +1031,39 @@ void G1Policy::update_survivors_policy() {
|
||||
_g1h->num_free_or_available_regions());
|
||||
}
|
||||
|
||||
bool G1Policy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) {
|
||||
bool G1Policy::force_concurrent_start_if_outside_cycle(GCCause::Cause gc_cause) {
|
||||
// We actually check whether we are marking here and not if we are in a
|
||||
// reclamation phase. This means that we will schedule a concurrent mark
|
||||
// even while we are still in the process of reclaiming memory.
|
||||
bool during_cycle = _g1h->concurrent_mark()->cm_thread()->during_cycle();
|
||||
if (!during_cycle) {
|
||||
log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause));
|
||||
log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). "
|
||||
"GC cause: %s",
|
||||
GCCause::to_string(gc_cause));
|
||||
collector_state()->set_initiate_conc_mark_if_possible(true);
|
||||
return true;
|
||||
} else {
|
||||
log_debug(gc, ergo)("Do not request concurrent cycle initiation (concurrent cycle already in progress). GC cause: %s", GCCause::to_string(gc_cause));
|
||||
log_debug(gc, ergo)("Do not request concurrent cycle initiation "
|
||||
"(concurrent cycle already in progress). GC cause: %s",
|
||||
GCCause::to_string(gc_cause));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
void G1Policy::initiate_conc_mark() {
|
||||
collector_state()->set_in_initial_mark_gc(true);
|
||||
collector_state()->set_in_concurrent_start_gc(true);
|
||||
collector_state()->set_initiate_conc_mark_if_possible(false);
|
||||
}
|
||||
|
||||
void G1Policy::decide_on_conc_mark_initiation() {
|
||||
// We are about to decide on whether this pause will be an
|
||||
// initial-mark pause.
|
||||
// We are about to decide on whether this pause will be a
|
||||
// concurrent start pause.
|
||||
|
||||
// First, collector_state()->in_initial_mark_gc() should not be already set. We
|
||||
// First, collector_state()->in_concurrent_start_gc() should not be already set. We
|
||||
// will set it here if we have to. However, it should be cleared by
|
||||
// the end of the pause (it's only set for the duration of an
|
||||
// initial-mark pause).
|
||||
assert(!collector_state()->in_initial_mark_gc(), "pre-condition");
|
||||
// the end of the pause (it's only set for the duration of a
|
||||
// concurrent start pause).
|
||||
assert(!collector_state()->in_concurrent_start_gc(), "pre-condition");
|
||||
|
||||
if (collector_state()->initiate_conc_mark_if_possible()) {
|
||||
// We had noticed on a previous pause that the heap occupancy has
|
||||
@ -1059,13 +1077,13 @@ void G1Policy::decide_on_conc_mark_initiation() {
|
||||
ConcurrentGCBreakpoints::is_controlled()) {
|
||||
log_debug(gc, ergo)("Do not initiate concurrent cycle (whitebox controlled)");
|
||||
} else if (!about_to_start_mixed_phase() && collector_state()->in_young_only_phase()) {
|
||||
// Initiate a new initial mark if there is no marking or reclamation going on.
|
||||
// Initiate a new concurrent start if there is no marking or reclamation going on.
|
||||
initiate_conc_mark();
|
||||
log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)");
|
||||
} else if (_g1h->is_user_requested_concurrent_full_gc(cause) ||
|
||||
(cause == GCCause::_wb_breakpoint)) {
|
||||
// Initiate a user requested initial mark or run_to a breakpoint.
|
||||
// An initial mark must be young only GC, so the collector state
|
||||
// Initiate a user requested concurrent start or run to a breakpoint.
|
||||
// A concurrent start must be young only GC, so the collector state
|
||||
// must be updated to reflect this.
|
||||
collector_state()->set_in_young_only_phase(true);
|
||||
collector_state()->set_in_young_gc_before_mixed(false);
|
||||
@ -1145,20 +1163,42 @@ void G1Policy::maybe_start_marking() {
|
||||
}
|
||||
}
|
||||
|
||||
bool G1Policy::is_young_only_pause(PauseKind kind) {
|
||||
assert(kind != FullGC, "must be");
|
||||
assert(kind != Remark, "must be");
|
||||
assert(kind != Cleanup, "must be");
|
||||
return kind == ConcurrentStartGC || kind == LastYoungGC || kind == YoungOnlyGC;
|
||||
}
|
||||
|
||||
bool G1Policy::is_mixed_pause(PauseKind kind) {
|
||||
assert(kind != FullGC, "must be");
|
||||
assert(kind != Remark, "must be");
|
||||
assert(kind != Cleanup, "must be");
|
||||
return kind == MixedGC;
|
||||
}
|
||||
|
||||
bool G1Policy::is_last_young_pause(PauseKind kind) {
|
||||
return kind == LastYoungGC;
|
||||
}
|
||||
|
||||
bool G1Policy::is_concurrent_start_pause(PauseKind kind) {
|
||||
return kind == ConcurrentStartGC;
|
||||
}
|
||||
|
||||
G1Policy::PauseKind G1Policy::young_gc_pause_kind() const {
|
||||
assert(!collector_state()->in_full_gc(), "must be");
|
||||
if (collector_state()->in_initial_mark_gc()) {
|
||||
if (collector_state()->in_concurrent_start_gc()) {
|
||||
assert(!collector_state()->in_young_gc_before_mixed(), "must be");
|
||||
return InitialMarkGC;
|
||||
return ConcurrentStartGC;
|
||||
} else if (collector_state()->in_young_gc_before_mixed()) {
|
||||
assert(!collector_state()->in_initial_mark_gc(), "must be");
|
||||
assert(!collector_state()->in_concurrent_start_gc(), "must be");
|
||||
return LastYoungGC;
|
||||
} else if (collector_state()->in_mixed_phase()) {
|
||||
assert(!collector_state()->in_initial_mark_gc(), "must be");
|
||||
assert(!collector_state()->in_concurrent_start_gc(), "must be");
|
||||
assert(!collector_state()->in_young_gc_before_mixed(), "must be");
|
||||
return MixedGC;
|
||||
} else {
|
||||
assert(!collector_state()->in_initial_mark_gc(), "must be");
|
||||
assert(!collector_state()->in_concurrent_start_gc(), "must be");
|
||||
assert(!collector_state()->in_young_gc_before_mixed(), "must be");
|
||||
return YoungOnlyGC;
|
||||
}
|
||||
@ -1169,7 +1209,7 @@ void G1Policy::record_pause(PauseKind kind, double start, double end) {
|
||||
if (kind != FullGC) {
|
||||
_mmu_tracker->add_pause(start, end);
|
||||
}
|
||||
// Manage the mutator time tracking from initial mark to first mixed gc.
|
||||
// Manage the mutator time tracking from concurrent start to first mixed gc.
|
||||
switch (kind) {
|
||||
case FullGC:
|
||||
abort_time_to_mixed_tracking();
|
||||
@ -1178,15 +1218,15 @@ void G1Policy::record_pause(PauseKind kind, double start, double end) {
|
||||
case Remark:
|
||||
case YoungOnlyGC:
|
||||
case LastYoungGC:
|
||||
_initial_mark_to_mixed.add_pause(end - start);
|
||||
_concurrent_start_to_mixed.add_pause(end - start);
|
||||
break;
|
||||
case InitialMarkGC:
|
||||
case ConcurrentStartGC:
|
||||
if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) {
|
||||
_initial_mark_to_mixed.record_initial_mark_end(end);
|
||||
_concurrent_start_to_mixed.record_concurrent_start_end(end);
|
||||
}
|
||||
break;
|
||||
case MixedGC:
|
||||
_initial_mark_to_mixed.record_mixed_gc_start(start);
|
||||
_concurrent_start_to_mixed.record_mixed_gc_start(start);
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
@ -1194,7 +1234,7 @@ void G1Policy::record_pause(PauseKind kind, double start, double end) {
|
||||
}
|
||||
|
||||
void G1Policy::abort_time_to_mixed_tracking() {
|
||||
_initial_mark_to_mixed.reset();
|
||||
_concurrent_start_to_mixed.reset();
|
||||
}
|
||||
|
||||
bool G1Policy::next_gc_should_be_mixed(const char* true_action_str,
|
||||
|
||||
@ -26,9 +26,9 @@
|
||||
#define SHARE_GC_G1_G1POLICY_HPP
|
||||
|
||||
#include "gc/g1/g1CollectorState.hpp"
|
||||
#include "gc/g1/g1ConcurrentStartToMixedTimeTracker.hpp"
|
||||
#include "gc/g1/g1GCPhaseTimes.hpp"
|
||||
#include "gc/g1/g1HeapRegionAttr.hpp"
|
||||
#include "gc/g1/g1InitialMarkToMixedTimeTracker.hpp"
|
||||
#include "gc/g1/g1MMUTracker.hpp"
|
||||
#include "gc/g1/g1OldGenAllocationTracker.hpp"
|
||||
#include "gc/g1/g1RemSetTrackingPolicy.hpp"
|
||||
@ -107,7 +107,7 @@ class G1Policy: public CHeapObj<mtGC> {
|
||||
// two GCs.
|
||||
G1OldGenAllocationTracker _old_gen_alloc_tracker;
|
||||
|
||||
G1InitialMarkToMixedTimeTracker _initial_mark_to_mixed;
|
||||
G1ConcurrentStartToMixedTimeTracker _concurrent_start_to_mixed;
|
||||
|
||||
bool should_update_surv_rate_group_predictors() {
|
||||
return collector_state()->in_young_only_phase() && !collector_state()->mark_or_rebuild_in_progress();
|
||||
@ -182,7 +182,9 @@ private:
|
||||
// Stash a pointer to the g1 heap.
|
||||
G1CollectedHeap* _g1h;
|
||||
|
||||
G1GCPhaseTimes* _phase_times;
|
||||
STWGCTimer* _phase_times_timer;
|
||||
// Lazily initialized
|
||||
mutable G1GCPhaseTimes* _phase_times;
|
||||
|
||||
// This set of variables tracks the collector efficiency, in order to
|
||||
// determine whether we should initiate a new marking.
|
||||
@ -271,11 +273,15 @@ private:
|
||||
YoungOnlyGC,
|
||||
MixedGC,
|
||||
LastYoungGC,
|
||||
InitialMarkGC,
|
||||
ConcurrentStartGC,
|
||||
Cleanup,
|
||||
Remark
|
||||
};
|
||||
|
||||
static bool is_young_only_pause(PauseKind kind);
|
||||
static bool is_mixed_pause(PauseKind kind);
|
||||
static bool is_last_young_pause(PauseKind kind);
|
||||
static bool is_concurrent_start_pause(PauseKind kind);
|
||||
// Calculate PauseKind from internal state.
|
||||
PauseKind young_gc_pause_kind() const;
|
||||
// Record the given STW pause with the given start and end times (in s).
|
||||
@ -296,7 +302,7 @@ public:
|
||||
|
||||
G1CollectorState* collector_state() const;
|
||||
|
||||
G1GCPhaseTimes* phase_times() const { return _phase_times; }
|
||||
G1GCPhaseTimes* phase_times() const;
|
||||
|
||||
// Check the current value of the young list RSet length and
|
||||
// compare it against the last prediction. If the current value is
|
||||
@ -364,14 +370,14 @@ public:
|
||||
// new cycle, as long as we are not already in one. It's best if it
|
||||
// is called during a safepoint when the test whether a cycle is in
|
||||
// progress or not is stable.
|
||||
bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause);
|
||||
bool force_concurrent_start_if_outside_cycle(GCCause::Cause gc_cause);
|
||||
|
||||
// This is called at the very beginning of an evacuation pause (it
|
||||
// has to be the first thing that the pause does). If
|
||||
// initiate_conc_mark_if_possible() is true, and the concurrent
|
||||
// marking thread has completed its work during the previous cycle,
|
||||
// it will set in_initial_mark_gc() to so that the pause does
|
||||
// the initial-mark work and start a marking cycle.
|
||||
// it will set in_concurrent_start_gc() to so that the pause does
|
||||
// the concurrent start work and start a marking cycle.
|
||||
void decide_on_conc_mark_initiation();
|
||||
|
||||
size_t young_list_target_length() const { return _young_list_target_length; }
|
||||
|
||||
@ -47,17 +47,17 @@ public:
|
||||
CodeBlobClosure* weak_codeblobs() { return &_closures._codeblobs; }
|
||||
};
|
||||
|
||||
// Closures used during initial mark.
|
||||
// Closures used during concurrent start.
|
||||
// The treatment of "weak" roots is selectable through the template parameter,
|
||||
// this is usually used to control unloading of classes and interned strings.
|
||||
template <G1Mark MarkWeak>
|
||||
class G1InitialMarkClosures : public G1EvacuationRootClosures {
|
||||
class G1ConcurrentStartMarkClosures : public G1EvacuationRootClosures {
|
||||
G1SharedClosures<G1MarkFromRoot> _strong;
|
||||
G1SharedClosures<MarkWeak> _weak;
|
||||
|
||||
public:
|
||||
G1InitialMarkClosures(G1CollectedHeap* g1h,
|
||||
G1ParScanThreadState* pss) :
|
||||
G1ConcurrentStartMarkClosures(G1CollectedHeap* g1h,
|
||||
G1ParScanThreadState* pss) :
|
||||
_strong(g1h, pss, /* process_only_dirty_klasses */ false),
|
||||
_weak(g1h, pss, /* process_only_dirty_klasses */ false) {}
|
||||
|
||||
@ -73,11 +73,11 @@ public:
|
||||
|
||||
G1EvacuationRootClosures* G1EvacuationRootClosures::create_root_closures(G1ParScanThreadState* pss, G1CollectedHeap* g1h) {
|
||||
G1EvacuationRootClosures* res = NULL;
|
||||
if (g1h->collector_state()->in_initial_mark_gc()) {
|
||||
if (g1h->collector_state()->in_concurrent_start_gc()) {
|
||||
if (ClassUnloadingWithConcurrentMark) {
|
||||
res = new G1InitialMarkClosures<G1MarkPromotedFromRoot>(g1h, pss);
|
||||
res = new G1ConcurrentStartMarkClosures<G1MarkPromotedFromRoot>(g1h, pss);
|
||||
} else {
|
||||
res = new G1InitialMarkClosures<G1MarkFromRoot>(g1h, pss);
|
||||
res = new G1ConcurrentStartMarkClosures<G1MarkFromRoot>(g1h, pss);
|
||||
}
|
||||
} else {
|
||||
res = new G1EvacuationClosures(g1h, pss, g1h->collector_state()->in_young_only_phase());
|
||||
|
||||
@ -45,7 +45,6 @@
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "runtime/mutex.hpp"
|
||||
#include "services/management.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
G1RootProcessor::G1RootProcessor(G1CollectedHeap* g1h, uint n_workers) :
|
||||
@ -196,20 +195,6 @@ void G1RootProcessor::process_vm_roots(G1RootClosures* closures,
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ManagementRoots, worker_id);
|
||||
if (_process_strong_tasks.try_claim_task(G1RP_PS_Management_oops_do)) {
|
||||
Management::oops_do(strong_roots);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::JVMTIRoots, worker_id);
|
||||
if (_process_strong_tasks.try_claim_task(G1RP_PS_jvmti_oops_do)) {
|
||||
JvmtiExport::oops_do(strong_roots);
|
||||
}
|
||||
}
|
||||
|
||||
#if INCLUDE_AOT
|
||||
if (UseAOT) {
|
||||
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::AOTCodeRoots, worker_id);
|
||||
|
||||
@ -34,7 +34,7 @@ template <G1Mark Mark>
|
||||
class G1SharedClosures {
|
||||
static bool needs_strong_processing() {
|
||||
// Request strong code root processing when G1MarkFromRoot is passed in during
|
||||
// initial mark.
|
||||
// concurrent start.
|
||||
return Mark == G1MarkFromRoot;
|
||||
}
|
||||
public:
|
||||
|
||||
@ -57,11 +57,11 @@ VM_G1TryInitiateConcMark::VM_G1TryInitiateConcMark(uint gc_count_before,
|
||||
bool VM_G1TryInitiateConcMark::doit_prologue() {
|
||||
bool result = VM_GC_Operation::doit_prologue();
|
||||
// The prologue can fail for a couple of reasons. The first is that another GC
|
||||
// got scheduled and prevented the scheduling of the initial mark GC. The
|
||||
// got scheduled and prevented the scheduling of the concurrent start GC. The
|
||||
// second is that the GC locker may be active and the heap can't be expanded.
|
||||
// In both cases we want to retry the GC so that the initial mark pause is
|
||||
// In both cases we want to retry the GC so that the concurrent start pause is
|
||||
// actually scheduled. In the second case, however, we should stall until
|
||||
// until the GC locker is no longer active and then retry the initial mark GC.
|
||||
// until the GC locker is no longer active and then retry the concurrent start GC.
|
||||
if (!result) _transient_failure = true;
|
||||
return result;
|
||||
}
|
||||
@ -80,15 +80,15 @@ void VM_G1TryInitiateConcMark::doit() {
|
||||
// a young-only or mixed GC (depending on phase). For a user request
|
||||
// there's no point in even doing that much, so done. For some non-user
|
||||
// requests the alternative GC might still be needed.
|
||||
} else if (!g1h->policy()->force_initial_mark_if_outside_cycle(_gc_cause)) {
|
||||
// Failure to force the next GC pause to be an initial mark indicates
|
||||
} else if (!g1h->policy()->force_concurrent_start_if_outside_cycle(_gc_cause)) {
|
||||
// Failure to force the next GC pause to be a concurrent start indicates
|
||||
// there is already a concurrent marking cycle in progress. Set flag
|
||||
// to notify the caller and return immediately.
|
||||
_cycle_already_in_progress = true;
|
||||
} else if ((_gc_cause != GCCause::_wb_breakpoint) &&
|
||||
ConcurrentGCBreakpoints::is_controlled()) {
|
||||
// WhiteBox wants to be in control of concurrent cycles, so don't try to
|
||||
// start one. This check is after the force_initial_mark_xxx so that a
|
||||
// start one. This check is after the force_concurrent_start_xxx so that a
|
||||
// request will be remembered for a later partial collection, even though
|
||||
// we've rejected this request.
|
||||
_whitebox_attached = true;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user